diff options
Diffstat (limited to 'drivers')
1703 files changed, 97919 insertions, 36135 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index acad70a0bb0d..445ce28475b3 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -221,6 +221,9 @@ config ACPI_PROCESSOR_IDLE bool select CPU_IDLE +config ACPI_MCFG + bool + config ACPI_CPPC_LIB bool depends on ACPI_PROCESSOR @@ -454,32 +457,7 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. -config ACPI_NFIT - tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" - depends on PHYS_ADDR_T_64BIT - depends on BLK_DEV - depends on ARCH_HAS_MMIO_FLUSH - select LIBNVDIMM - help - Infrastructure to probe ACPI 6 compliant platforms for - NVDIMMs (NFIT) and register a libnvdimm device tree. In - addition to storage devices this also enables libnvdimm to pass - ACPI._DSM messages for platform/dimm configuration. - - To compile this driver as a module, choose M here: - the module will be called nfit. - -config ACPI_NFIT_DEBUG - bool "NFIT DSM debug" - depends on ACPI_NFIT - depends on DYNAMIC_DEBUG - default n - help - Enabling this option causes the nfit driver to dump the - input and output buffers of _DSM operations on the ACPI0012 - device and its children. This can be very verbose, so leave - it disabled unless you are debugging a hardware / firmware - issue. +source "drivers/acpi/nfit/Kconfig" source "drivers/acpi/apei/Kconfig" source "drivers/acpi/dptf/Kconfig" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 88f54f03e3d2..5ae9d85c5159 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -40,6 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-y += pci_root.o pci_link.o pci_irq.o +obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o acpi-y += acpi_lpss.o acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o @@ -69,7 +70,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI) += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o -obj-$(CONFIG_ACPI_NFIT) += nfit.o +obj-$(CONFIG_ACPI_NFIT) += nfit/ obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig new file mode 100644 index 000000000000..dd0d53c52552 --- /dev/null +++ b/drivers/acpi/nfit/Kconfig @@ -0,0 +1,26 @@ +config ACPI_NFIT + tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" + depends on PHYS_ADDR_T_64BIT + depends on BLK_DEV + depends on ARCH_HAS_MMIO_FLUSH + select LIBNVDIMM + help + Infrastructure to probe ACPI 6 compliant platforms for + NVDIMMs (NFIT) and register a libnvdimm device tree. In + addition to storage devices this also enables libnvdimm to pass + ACPI._DSM messages for platform/dimm configuration. + + To compile this driver as a module, choose M here: + the module will be called nfit. + +config ACPI_NFIT_DEBUG + bool "NFIT DSM debug" + depends on ACPI_NFIT + depends on DYNAMIC_DEBUG + default n + help + Enabling this option causes the nfit driver to dump the + input and output buffers of _DSM operations on the ACPI0012 + device and its children. This can be very verbose, so leave + it disabled unless you are debugging a hardware / firmware + issue. diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile new file mode 100644 index 000000000000..a407e769f103 --- /dev/null +++ b/drivers/acpi/nfit/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ACPI_NFIT) := nfit.o +nfit-y := core.o +nfit-$(CONFIG_X86_MCE) += mce.o diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit/core.c index 1f0e06065ae6..8c234dd9b8bc 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit/core.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/ndctl.h> +#include <linux/sysfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/acpi.h> @@ -50,6 +51,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO); MODULE_PARM_DESC(disable_vendor_specific, "Limit commands to the publicly specified set\n"); +LIST_HEAD(acpi_descs); +DEFINE_MUTEX(acpi_desc_lock); + static struct workqueue_struct *nfit_wq; struct nfit_table_prev { @@ -360,7 +364,7 @@ static const char *spa_type_name(u16 type) return to_name[type]; } -static int nfit_spa_type(struct acpi_nfit_system_address *spa) +int nfit_spa_type(struct acpi_nfit_system_address *spa) { int i; @@ -374,22 +378,25 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { - size_t length = min_t(size_t, sizeof(*spa), spa->header.length); struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; + if (spa->header.length != sizeof(*spa)) + return false; + list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, length) == 0) { + if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } } - nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); + nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), + GFP_KERNEL); if (!nfit_spa) return false; INIT_LIST_HEAD(&nfit_spa->list); - nfit_spa->spa = spa; + memcpy(nfit_spa->spa, spa, sizeof(*spa)); list_add_tail(&nfit_spa->list, &acpi_desc->spas); dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, spa->range_index, @@ -401,21 +408,24 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { - size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; + if (memdev->header.length != sizeof(*memdev)) + return false; + list_for_each_entry(nfit_memdev, &prev->memdevs, list) - if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { + if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } - nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); + nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), + GFP_KERNEL); if (!nfit_memdev) return false; INIT_LIST_HEAD(&nfit_memdev->list); - nfit_memdev->memdev = memdev; + memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", __func__, memdev->device_handle, memdev->range_index, @@ -423,25 +433,42 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, return true; } +/* + * An implementation may provide a truncated control region if no block windows + * are defined. + */ +static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) +{ + if (dcr->header.length < offsetof(struct acpi_nfit_control_region, + window_size)) + return 0; + if (dcr->windows) + return sizeof(*dcr); + return offsetof(struct acpi_nfit_control_region, window_size); +} + static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { - size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; + if (!sizeof_dcr(dcr)) + return false; + list_for_each_entry(nfit_dcr, &prev->dcrs, list) - if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { + if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } - nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); + nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), + GFP_KERNEL); if (!nfit_dcr) return false; INIT_LIST_HEAD(&nfit_dcr->list); - nfit_dcr->dcr = dcr; + memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, dcr->region_index, dcr->windows); @@ -452,71 +479,102 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { - size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; + if (bdw->header.length != sizeof(*bdw)) + return false; list_for_each_entry(nfit_bdw, &prev->bdws, list) - if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { + if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } - nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); + nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), + GFP_KERNEL); if (!nfit_bdw) return false; INIT_LIST_HEAD(&nfit_bdw->list); - nfit_bdw->bdw = bdw; + memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, bdw->region_index, bdw->windows); return true; } +static size_t sizeof_idt(struct acpi_nfit_interleave *idt) +{ + if (idt->header.length < sizeof(*idt)) + return 0; + return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); +} + static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { - size_t length = min_t(size_t, sizeof(*idt), idt->header.length); struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; - list_for_each_entry(nfit_idt, &prev->idts, list) - if (memcmp(nfit_idt->idt, idt, length) == 0) { + if (!sizeof_idt(idt)) + return false; + + list_for_each_entry(nfit_idt, &prev->idts, list) { + if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) + continue; + + if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } + } - nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); + nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), + GFP_KERNEL); if (!nfit_idt) return false; INIT_LIST_HEAD(&nfit_idt->list); - nfit_idt->idt = idt; + memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); list_add_tail(&nfit_idt->list, &acpi_desc->idts); dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, idt->interleave_index, idt->line_count); return true; } +static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) +{ + if (flush->header.length < sizeof(*flush)) + return 0; + return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); +} + static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { - size_t length = min_t(size_t, sizeof(*flush), flush->header.length); struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; - list_for_each_entry(nfit_flush, &prev->flushes, list) - if (memcmp(nfit_flush->flush, flush, length) == 0) { + if (!sizeof_flush(flush)) + return false; + + list_for_each_entry(nfit_flush, &prev->flushes, list) { + if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) + continue; + + if (memcmp(nfit_flush->flush, flush, + sizeof_flush(flush)) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } + } - nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); + nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) + + sizeof_flush(flush), GFP_KERNEL); if (!nfit_flush) return false; INIT_LIST_HEAD(&nfit_flush->list); - nfit_flush->flush = flush; + memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); list_add_tail(&nfit_flush->list, &acpi_desc->flushes); dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, flush->device_handle, flush->hint_count); @@ -614,7 +672,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, { u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; struct nfit_memdev *nfit_memdev; - struct nfit_flush *nfit_flush; struct nfit_bdw *nfit_bdw; struct nfit_idt *nfit_idt; u16 idt_idx, range_index; @@ -647,14 +704,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, nfit_mem->idt_bdw = nfit_idt->idt; break; } - - list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { - if (nfit_flush->flush->device_handle != - nfit_memdev->memdev->device_handle) - continue; - nfit_mem->nfit_flush = nfit_flush; - break; - } break; } } @@ -675,6 +724,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, } list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct nfit_flush *nfit_flush; struct nfit_dcr *nfit_dcr; u32 device_handle; u16 dcr; @@ -721,6 +771,28 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, break; } + list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { + struct acpi_nfit_flush_address *flush; + u16 i; + + if (nfit_flush->flush->device_handle != device_handle) + continue; + nfit_mem->nfit_flush = nfit_flush; + flush = nfit_flush->flush; + nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, + flush->hint_count + * sizeof(struct resource), GFP_KERNEL); + if (!nfit_mem->flush_wpq) + return -ENOMEM; + for (i = 0; i < flush->hint_count; i++) { + struct resource *res = &nfit_mem->flush_wpq[i]; + + res->start = flush->hint_address[i]; + res->end = res->start + 8 - 1; + } + break; + } + if (dcr && !nfit_mem->dcr) { dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", spa->range_index, dcr); @@ -806,14 +878,85 @@ static ssize_t revision_show(struct device *dev, } static DEVICE_ATTR_RO(revision); +/* + * This shows the number of full Address Range Scrubs that have been + * completed since driver load time. Userspace can wait on this using + * select/poll etc. A '+' at the end indicates an ARS is in progress + */ +static ssize_t scrub_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc = -ENXIO; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, + (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); + } + device_unlock(dev); + return rc; +} + +static ssize_t scrub_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc; + long val; + + rc = kstrtol(buf, 0, &val); + if (rc) + return rc; + if (val != 1) + return -EINVAL; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = acpi_nfit_ars_rescan(acpi_desc); + } + device_unlock(dev); + if (rc) + return rc; + return size; +} +static DEVICE_ATTR_RW(scrub); + +static bool ars_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START + | 1 << ND_CMD_ARS_STATUS; + + return (nd_desc->cmd_mask & mask) == mask; +} + +static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + + if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) + return 0; + return a->mode; +} + static struct attribute *acpi_nfit_attributes[] = { &dev_attr_revision.attr, + &dev_attr_scrub.attr, NULL, }; static struct attribute_group acpi_nfit_attribute_group = { .name = "nfit", .attrs = acpi_nfit_attributes, + .is_visible = nfit_visible, }; static const struct attribute_group *acpi_nfit_attribute_groups[] = { @@ -1130,11 +1273,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, } /* - * Until standardization materializes we need to consider up to 3 + * Until standardization materializes we need to consider 4 * different command sets. Note, that checking for function0 (bit0) * tells us if any commands are reachable through this uuid. */ - for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) + for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) break; @@ -1144,12 +1287,14 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dsm_mask = 0x3fe; if (disable_vendor_specific) dsm_mask &= ~(1 << ND_CMD_VENDOR); - } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { dsm_mask = 0x1c3c76; - else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { dsm_mask = 0x1fe; if (disable_vendor_specific) dsm_mask &= ~(1 << 8); + } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { + dsm_mask = 0xffffffff; } else { dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; @@ -1171,6 +1316,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) int dimm_count = 0; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { + struct acpi_nfit_flush_address *flush; unsigned long flags = 0, cmd_mask; struct nvdimm *nvdimm; u32 device_handle; @@ -1204,9 +1350,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if (nfit_mem->family == NVDIMM_FAMILY_INTEL) cmd_mask |= nfit_mem->dsm_mask; + flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush + : NULL; nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, acpi_nfit_dimm_attribute_groups, - flags, cmd_mask); + flags, cmd_mask, flush ? flush->hint_count : 0, + nfit_mem->flush_wpq); if (!nvdimm) return -ENOMEM; @@ -1374,24 +1523,6 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) return mmio->base_offset + line_offset + table_offset + sub_line_offset; } -static void wmb_blk(struct nfit_blk *nfit_blk) -{ - - if (nfit_blk->nvdimm_flush) { - /* - * The first wmb() is needed to 'sfence' all previous writes - * such that they are architecturally visible for the platform - * buffer flush. Note that we've already arranged for pmem - * writes to avoid the cache via arch_memcpy_to_pmem(). The - * final wmb() ensures ordering for the NVDIMM flush write. - */ - wmb(); - writeq(1, nfit_blk->nvdimm_flush); - wmb(); - } else - wmb_pmem(); -} - static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; @@ -1426,7 +1557,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, offset = to_interleave_offset(offset, mmio); writeq(cmd, mmio->addr.base + offset); - wmb_blk(nfit_blk); + nvdimm_flush(nfit_blk->nd_region); if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) readq(mmio->addr.base + offset); @@ -1477,7 +1608,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, } if (rw) - wmb_blk(nfit_blk); + nvdimm_flush(nfit_blk->nd_region); rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; return rc; @@ -1509,125 +1640,6 @@ static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, return rc; } -static void nfit_spa_mapping_release(struct kref *kref) -{ - struct nfit_spa_mapping *spa_map = to_spa_map(kref); - struct acpi_nfit_system_address *spa = spa_map->spa; - struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); - if (spa_map->type == SPA_MAP_APERTURE) - memunmap((void __force *)spa_map->addr.aperture); - else - iounmap(spa_map->addr.base); - release_mem_region(spa->address, spa->length); - list_del(&spa_map->list); - kfree(spa_map); -} - -static struct nfit_spa_mapping *find_spa_mapping( - struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa) -{ - struct nfit_spa_mapping *spa_map; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) - if (spa_map->spa == spa) - return spa_map; - - return NULL; -} - -static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa) -{ - struct nfit_spa_mapping *spa_map; - - mutex_lock(&acpi_desc->spa_map_mutex); - spa_map = find_spa_mapping(acpi_desc, spa); - - if (spa_map) - kref_put(&spa_map->kref, nfit_spa_mapping_release); - mutex_unlock(&acpi_desc->spa_map_mutex); -} - -static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa, enum spa_map_type type) -{ - resource_size_t start = spa->address; - resource_size_t n = spa->length; - struct nfit_spa_mapping *spa_map; - struct resource *res; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - - spa_map = find_spa_mapping(acpi_desc, spa); - if (spa_map) { - kref_get(&spa_map->kref); - return spa_map->addr.base; - } - - spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); - if (!spa_map) - return NULL; - - INIT_LIST_HEAD(&spa_map->list); - spa_map->spa = spa; - kref_init(&spa_map->kref); - spa_map->acpi_desc = acpi_desc; - - res = request_mem_region(start, n, dev_name(acpi_desc->dev)); - if (!res) - goto err_mem; - - spa_map->type = type; - if (type == SPA_MAP_APERTURE) - spa_map->addr.aperture = (void __pmem *)memremap(start, n, - ARCH_MEMREMAP_PMEM); - else - spa_map->addr.base = ioremap_nocache(start, n); - - - if (!spa_map->addr.base) - goto err_map; - - list_add_tail(&spa_map->list, &acpi_desc->spa_maps); - return spa_map->addr.base; - - err_map: - release_mem_region(start, n); - err_mem: - kfree(spa_map); - return NULL; -} - -/** - * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges - * @nvdimm_bus: NFIT-bus that provided the spa table entry - * @nfit_spa: spa table to map - * @type: aperture or control region - * - * In the case where block-data-window apertures and - * dimm-control-regions are interleaved they will end up sharing a - * single request_mem_region() + ioremap() for the address range. In - * the style of devm nfit_spa_map() mappings are automatically dropped - * when all region devices referencing the same mapping are disabled / - * unbound. - */ -static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa, enum spa_map_type type) -{ - void __iomem *iomem; - - mutex_lock(&acpi_desc->spa_map_mutex); - iomem = __nfit_spa_map(acpi_desc, spa, type); - mutex_unlock(&acpi_desc->spa_map_mutex); - - return iomem; -} - static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, struct acpi_nfit_interleave *idt, u16 interleave_ways) { @@ -1669,9 +1681,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_flush *nfit_flush; struct nfit_blk_mmio *mmio; struct nfit_blk *nfit_blk; struct nfit_mem *nfit_mem; @@ -1697,8 +1707,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, /* map block aperture memory */ nfit_blk->bdw_offset = nfit_mem->bdw->offset; mmio = &nfit_blk->mmio[BDW]; - mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, - SPA_MAP_APERTURE); + mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, + nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, nvdimm_name(nvdimm)); @@ -1720,8 +1730,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; nfit_blk->stat_offset = nfit_mem->dcr->status_offset; mmio = &nfit_blk->mmio[DCR]; - mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, - SPA_MAP_CONTROL); + mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, + nfit_mem->spa_dcr->length); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, nvdimm_name(nvdimm)); @@ -1746,15 +1756,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, return rc; } - nfit_flush = nfit_mem->nfit_flush; - if (nfit_flush && nfit_flush->flush->hint_count != 0) { - nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, - nfit_flush->flush->hint_address[0], 8); - if (!nfit_blk->nvdimm_flush) - return -ENOMEM; - } - - if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) + if (nvdimm_has_flush(nfit_blk->nd_region) < 0) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (mmio->line_size == 0) @@ -1773,29 +1775,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, return 0; } -static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); - int i; - - if (!nfit_blk) - return; /* never enabled */ - - /* auto-free BLK spa mappings */ - for (i = 0; i < 2; i++) { - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; - - if (mmio->addr.base) - nfit_spa_unmap(acpi_desc, mmio->spa); - } - nd_blk_region_set_provider_data(ndbr, NULL); - /* devm will free nfit_blk */ -} - static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) { @@ -1919,11 +1898,11 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, if (ret) return ret; - ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res); - if (ret) { - remove_resource(res); + ret = devm_add_action_or_reset(acpi_desc->dev, + acpi_nfit_remove_resource, + res); + if (ret) return ret; - } return 0; } @@ -1969,7 +1948,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, ndr_desc->num_mappings = blk_valid; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr_desc->enable = acpi_nfit_blk_region_enable; - ndbr_desc->disable = acpi_nfit_blk_region_disable; ndbr_desc->do_io = acpi_desc->blk_do_io; nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc); @@ -1981,6 +1959,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, return 0; } +static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) +{ + return (nfit_spa_type(spa) == NFIT_SPA_VDISK || + nfit_spa_type(spa) == NFIT_SPA_VCD || + nfit_spa_type(spa) == NFIT_SPA_PDISK || + nfit_spa_type(spa) == NFIT_SPA_PCD); +} + static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { @@ -1996,7 +1982,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, if (nfit_spa->nd_region) return 0; - if (spa->range_index == 0) { + if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", __func__); return 0; @@ -2060,6 +2046,11 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; + } else if (nfit_spa_is_virtual(spa)) { + nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + rc = -ENOMEM; } out: @@ -2139,7 +2130,7 @@ static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, unsigned int tmo = scrub_timeout; int rc; - if (nfit_spa->ars_done || !nfit_spa->nd_region) + if (!nfit_spa->ars_required || !nfit_spa->nd_region) return; rc = ars_start(acpi_desc, nfit_spa); @@ -2228,7 +2219,9 @@ static void acpi_nfit_scrub(struct work_struct *work) * firmware initiated scrubs to complete and then we go search for the * affected spa regions to mark them scanned. In the second phase we * initiate a directed scrub for every range that was not scrubbed in - * phase 1. + * phase 1. If we're called for a 'rescan', we harmlessly pass through + * the first phase, but really only care about running phase 2, where + * regions can be notified of new poison. */ /* process platform firmware initiated scrubs */ @@ -2331,14 +2324,17 @@ static void acpi_nfit_scrub(struct work_struct *work) * Flag all the ranges that still need scrubbing, but * register them now to make data available. */ - if (nfit_spa->nd_region) - nfit_spa->ars_done = 1; - else + if (!nfit_spa->nd_region) { + nfit_spa->ars_required = 1; acpi_nfit_register_region(acpi_desc, nfit_spa); + } } list_for_each_entry(nfit_spa, &acpi_desc->spas, list) acpi_nfit_async_scrub(acpi_desc, nfit_spa); + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); mutex_unlock(&acpi_desc->init_mutex); } @@ -2376,14 +2372,89 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, return 0; } -int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) +static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct kernfs_node *nfit; + struct device *bus_dev; + + if (!ars_supported(acpi_desc->nvdimm_bus)) + return 0; + + bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); + if (!nfit) { + dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); + return -ENODEV; + } + acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); + sysfs_put(nfit); + if (!acpi_desc->scrub_count_state) { + dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); + return -ENODEV; + } + + return 0; +} + +static void acpi_nfit_destruct(void *data) +{ + struct acpi_nfit_desc *acpi_desc = data; + struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + + /* + * Destruct under acpi_desc_lock so that nfit_handle_mce does not + * race teardown + */ + mutex_lock(&acpi_desc_lock); + acpi_desc->cancel = 1; + /* + * Bounce the nvdimm bus lock to make sure any in-flight + * acpi_nfit_ars_rescan() submissions have had a chance to + * either submit or see ->cancel set. + */ + device_lock(bus_dev); + device_unlock(bus_dev); + + flush_workqueue(nfit_wq); + if (acpi_desc->scrub_count_state) + sysfs_put(acpi_desc->scrub_count_state); + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + acpi_desc->nvdimm_bus = NULL; + list_del(&acpi_desc->list); + mutex_unlock(&acpi_desc_lock); +} + +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) { struct device *dev = acpi_desc->dev; struct nfit_table_prev prev; const void *end; - u8 *data; int rc; + if (!acpi_desc->nvdimm_bus) { + acpi_nfit_init_dsms(acpi_desc); + + acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, + &acpi_desc->nd_desc); + if (!acpi_desc->nvdimm_bus) + return -ENOMEM; + + rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, + acpi_desc); + if (rc) + return rc; + + rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); + if (rc) + return rc; + + /* register this acpi_desc for mce notifications */ + mutex_lock(&acpi_desc_lock); + list_add_tail(&acpi_desc->list, &acpi_descs); + mutex_unlock(&acpi_desc_lock); + } + mutex_lock(&acpi_desc->init_mutex); INIT_LIST_HEAD(&prev.spas); @@ -2406,7 +2477,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) list_cut_position(&prev.flushes, &acpi_desc->flushes, acpi_desc->flushes.prev); - data = (u8 *) acpi_desc->nfit; end = data + sz; while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); @@ -2422,12 +2492,9 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) if (rc) goto out_unlock; - if (nfit_mem_init(acpi_desc) != 0) { - rc = -ENOMEM; + rc = nfit_mem_init(acpi_desc); + if (rc) goto out_unlock; - } - - acpi_nfit_init_dsms(acpi_desc); rc = acpi_nfit_register_dimms(acpi_desc); if (rc) @@ -2496,6 +2563,33 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct nfit_spa *nfit_spa; + + if (work_busy(&acpi_desc->work)) + return -EBUSY; + + if (acpi_desc->cancel) + return 0; + + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct acpi_nfit_system_address *spa = nfit_spa->spa; + + if (nfit_spa_type(spa) != NFIT_SPA_PM) + continue; + + nfit_spa->ars_required = 1; + } + queue_work(nfit_wq, &acpi_desc->work); + dev_dbg(dev, "%s: ars_scan triggered\n", __func__); + mutex_unlock(&acpi_desc->init_mutex); + + return 0; +} + void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc; @@ -2505,12 +2599,12 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; nd_desc = &acpi_desc->nd_desc; nd_desc->provider_name = "ACPI.NFIT"; + nd_desc->module = THIS_MODULE; nd_desc->ndctl = acpi_nfit_ctl; nd_desc->flush_probe = acpi_nfit_flush_probe; nd_desc->clear_to_send = acpi_nfit_clear_to_send; nd_desc->attr_groups = acpi_nfit_attribute_groups; - INIT_LIST_HEAD(&acpi_desc->spa_maps); INIT_LIST_HEAD(&acpi_desc->spas); INIT_LIST_HEAD(&acpi_desc->dcrs); INIT_LIST_HEAD(&acpi_desc->bdws); @@ -2518,7 +2612,7 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) INIT_LIST_HEAD(&acpi_desc->flushes); INIT_LIST_HEAD(&acpi_desc->memdevs); INIT_LIST_HEAD(&acpi_desc->dimms); - mutex_init(&acpi_desc->spa_map_mutex); + INIT_LIST_HEAD(&acpi_desc->list); mutex_init(&acpi_desc->init_mutex); INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); } @@ -2532,7 +2626,7 @@ static int acpi_nfit_add(struct acpi_device *adev) struct acpi_table_header *tbl; acpi_status status = AE_OK; acpi_size sz; - int rc; + int rc = 0; status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); if (ACPI_FAILURE(status)) { @@ -2545,50 +2639,33 @@ static int acpi_nfit_add(struct acpi_device *adev) if (!acpi_desc) return -ENOMEM; acpi_nfit_desc_init(acpi_desc, &adev->dev); - acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); - if (!acpi_desc->nvdimm_bus) - return -ENOMEM; - /* - * Save the acpi header for later and then skip it, - * making nfit point to the first nfit table header. - */ + /* Save the acpi header for exporting the revision via sysfs */ acpi_desc->acpi_header = *tbl; - acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); - sz -= sizeof(struct acpi_table_nfit); /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { - union acpi_object *obj; - /* - * Adjust for the acpi_object header of the _FIT - */ - obj = buf.pointer; - if (obj->type == ACPI_TYPE_BUFFER) { - acpi_desc->nfit = - (struct acpi_nfit_header *)obj->buffer.pointer; - sz = obj->buffer.length; - } else + union acpi_object *obj = buf.pointer; + + if (obj->type == ACPI_TYPE_BUFFER) + rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); + else dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", __func__, (int) obj->type); - } - - rc = acpi_nfit_init(acpi_desc, sz); - if (rc) { - nvdimm_bus_unregister(acpi_desc->nvdimm_bus); - return rc; - } - return 0; + kfree(buf.pointer); + } else + /* skip over the lead-in header table */ + rc = acpi_nfit_init(acpi_desc, (void *) tbl + + sizeof(struct acpi_table_nfit), + sz - sizeof(struct acpi_table_nfit)); + return rc; } static int acpi_nfit_remove(struct acpi_device *adev) { - struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); - - acpi_desc->cancel = 1; - flush_workqueue(nfit_wq); - nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + /* see acpi_nfit_destruct */ return 0; } @@ -2596,9 +2673,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_nfit_header *nfit_saved; - union acpi_object *obj; struct device *dev = &adev->dev; + union acpi_object *obj; acpi_status status; int ret; @@ -2616,9 +2692,6 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) if (!acpi_desc) goto out_unlock; acpi_nfit_desc_init(acpi_desc, &adev->dev); - acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); - if (!acpi_desc->nvdimm_bus) - goto out_unlock; } else { /* * Finish previous registration before considering new @@ -2634,21 +2707,14 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) goto out_unlock; } - nfit_saved = acpi_desc->nfit; obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) { - acpi_desc->nfit = - (struct acpi_nfit_header *)obj->buffer.pointer; - ret = acpi_nfit_init(acpi_desc, obj->buffer.length); - if (ret) { - /* Merge failed, restore old nfit, and exit */ - acpi_desc->nfit = nfit_saved; + ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); + if (ret) dev_err(dev, "failed to merge updated NFIT\n"); - } - } else { - /* Bad _FIT, restore old nfit */ + } else dev_err(dev, "Invalid _FIT\n"); - } kfree(buf.pointer); out_unlock: @@ -2693,18 +2759,23 @@ static __init int nfit_init(void) acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); + acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) return -ENOMEM; + nfit_mce_register(); + return acpi_bus_register_driver(&acpi_nfit_driver); } static __exit void nfit_exit(void) { + nfit_mce_unregister(); acpi_bus_unregister_driver(&acpi_nfit_driver); destroy_workqueue(nfit_wq); + WARN_ON(!list_empty(&acpi_descs)); } module_init(nfit_init); diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c new file mode 100644 index 000000000000..4c745bf389fe --- /dev/null +++ b/drivers/acpi/nfit/mce.c @@ -0,0 +1,89 @@ +/* + * NFIT - Machine Check Handler + * + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include <linux/notifier.h> +#include <linux/acpi.h> +#include <asm/mce.h> +#include "nfit.h" + +static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct acpi_nfit_desc *acpi_desc; + struct nfit_spa *nfit_spa; + + /* We only care about memory errors */ + if (!(mce->status & MCACOD)) + return NOTIFY_DONE; + + /* + * mce->addr contains the physical addr accessed that caused the + * machine check. We need to walk through the list of NFITs, and see + * if any of them matches that address, and only then start a scrub. + */ + mutex_lock(&acpi_desc_lock); + list_for_each_entry(acpi_desc, &acpi_descs, list) { + struct device *dev = acpi_desc->dev; + int found_match = 0; + + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct acpi_nfit_system_address *spa = nfit_spa->spa; + + if (nfit_spa_type(spa) == NFIT_SPA_PM) + continue; + /* find the spa that covers the mce addr */ + if (spa->address > mce->addr) + continue; + if ((spa->address + spa->length - 1) < mce->addr) + continue; + found_match = 1; + dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n", + __func__, spa->range_index, spa->address, + spa->length); + /* + * We can break at the first match because we're going + * to rescan all the SPA ranges. There shouldn't be any + * aliasing anyway. + */ + break; + } + mutex_unlock(&acpi_desc->init_mutex); + + /* + * We can ignore an -EBUSY here because if an ARS is already + * in progress, just let that be the last authoritative one + */ + if (found_match) + acpi_nfit_ars_rescan(acpi_desc); + } + + mutex_unlock(&acpi_desc_lock); + return NOTIFY_DONE; +} + +static struct notifier_block nfit_mce_dec = { + .notifier_call = nfit_handle_mce, +}; + +void nfit_mce_register(void) +{ + mce_register_decode_chain(&nfit_mce_dec); +} + +void nfit_mce_unregister(void) +{ + mce_unregister_decode_chain(&nfit_mce_dec); +} diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit/nfit.h index 02b9ea1e8d2e..e894ded24d99 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -16,6 +16,7 @@ #define __NFIT_H__ #include <linux/workqueue.h> #include <linux/libnvdimm.h> +#include <linux/ndctl.h> #include <linux/types.h> #include <linux/uuid.h> #include <linux/acpi.h> @@ -31,6 +32,9 @@ #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" #define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" +/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ +#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" + #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED) @@ -40,6 +44,7 @@ enum nfit_uuids { NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL, NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, + NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, @@ -74,37 +79,37 @@ enum { }; struct nfit_spa { - struct acpi_nfit_system_address *spa; struct list_head list; struct nd_region *nd_region; - unsigned int ars_done:1; + unsigned int ars_required:1; u32 clear_err_unit; u32 max_ars; + struct acpi_nfit_system_address spa[0]; }; struct nfit_dcr { - struct acpi_nfit_control_region *dcr; struct list_head list; + struct acpi_nfit_control_region dcr[0]; }; struct nfit_bdw { - struct acpi_nfit_data_region *bdw; struct list_head list; + struct acpi_nfit_data_region bdw[0]; }; struct nfit_idt { - struct acpi_nfit_interleave *idt; struct list_head list; + struct acpi_nfit_interleave idt[0]; }; struct nfit_flush { - struct acpi_nfit_flush_address *flush; struct list_head list; + struct acpi_nfit_flush_address flush[0]; }; struct nfit_memdev { - struct acpi_nfit_memory_map *memdev; struct list_head list; + struct acpi_nfit_memory_map memdev[0]; }; /* assembled tables for a given dimm/memory-device */ @@ -123,6 +128,7 @@ struct nfit_mem { struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + struct resource *flush_wpq; unsigned long dsm_mask; int family; }; @@ -130,10 +136,7 @@ struct nfit_mem { struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; - struct acpi_nfit_header *nfit; - struct mutex spa_map_mutex; struct mutex init_mutex; - struct list_head spa_maps; struct list_head memdevs; struct list_head flushes; struct list_head dimms; @@ -146,6 +149,9 @@ struct acpi_nfit_desc { struct nd_cmd_ars_status *ars_status; size_t ars_status_size; struct work_struct work; + struct list_head list; + struct kernfs_node *scrub_count_state; + unsigned int scrub_count; unsigned int cancel:1; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; @@ -161,7 +167,7 @@ enum nd_blk_mmio_selector { struct nd_blk_addr { union { void __iomem *base; - void __pmem *aperture; + void *aperture; }; }; @@ -180,28 +186,26 @@ struct nfit_blk { u64 bdw_offset; /* post interleave offset */ u64 stat_offset; u64 cmd_offset; - void __iomem *nvdimm_flush; u32 dimm_flags; }; -enum spa_map_type { - SPA_MAP_CONTROL, - SPA_MAP_APERTURE, -}; - -struct nfit_spa_mapping { - struct acpi_nfit_desc *acpi_desc; - struct acpi_nfit_system_address *spa; - struct list_head list; - struct kref kref; - enum spa_map_type type; - struct nd_blk_addr addr; -}; +extern struct list_head acpi_descs; +extern struct mutex acpi_desc_lock; +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); -static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) +#ifdef CONFIG_X86_MCE +void nfit_mce_register(void); +void nfit_mce_unregister(void); +#else +static inline void nfit_mce_register(void) { - return container_of(kref, struct nfit_spa_mapping, kref); } +static inline void nfit_mce_unregister(void) +{ +} +#endif + +int nfit_spa_type(struct acpi_nfit_system_address *spa); static inline struct acpi_nfit_memory_map *__to_nfit_memdev( struct nfit_mem *nfit_mem) @@ -218,6 +222,6 @@ static inline struct acpi_nfit_desc *to_acpi_desc( } const u8 *to_nfit_uuid(enum nfit_uuids id); -int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); #endif /* __NFIT_H__ */ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b108f1358a32..4305ee9db4b2 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -309,7 +309,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) * During early init (when acpi_gbl_permanent_mmap has not been set yet) this * routine simply calls __acpi_map_table() to get the job done. */ -void __iomem *__init_refok +void __iomem *__ref acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) { struct acpi_ioremap *map; @@ -362,8 +362,7 @@ out: } EXPORT_SYMBOL_GPL(acpi_os_map_iomem); -void *__init_refok -acpi_os_map_memory(acpi_physical_address phys, acpi_size size) +void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) { return (void *)acpi_os_map_iomem(phys, size); } diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c new file mode 100644 index 000000000000..b5b376e081f5 --- /dev/null +++ b/drivers/acpi/pci_mcfg.c @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2016 Broadcom + * Author: Jayachandran C <jchandra@broadcom.com> + * Copyright (C) 2016 Semihalf + * Author: Tomasz Nowicki <tn@semihalf.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ + +#define pr_fmt(fmt) "ACPI: " fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci-acpi.h> + +/* Structure to hold entries from the MCFG table */ +struct mcfg_entry { + struct list_head list; + phys_addr_t addr; + u16 segment; + u8 bus_start; + u8 bus_end; +}; + +/* List to save MCFG entries */ +static LIST_HEAD(pci_mcfg_list); + +phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res) +{ + struct mcfg_entry *e; + + /* + * We expect exact match, unless MCFG entry end bus covers more than + * specified by caller. + */ + list_for_each_entry(e, &pci_mcfg_list, list) { + if (e->segment == seg && e->bus_start == bus_res->start && + e->bus_end >= bus_res->end) + return e->addr; + } + + return 0; +} + +static __init int pci_mcfg_parse(struct acpi_table_header *header) +{ + struct acpi_table_mcfg *mcfg; + struct acpi_mcfg_allocation *mptr; + struct mcfg_entry *e, *arr; + int i, n; + + if (header->length < sizeof(struct acpi_table_mcfg)) + return -EINVAL; + + n = (header->length - sizeof(struct acpi_table_mcfg)) / + sizeof(struct acpi_mcfg_allocation); + mcfg = (struct acpi_table_mcfg *)header; + mptr = (struct acpi_mcfg_allocation *) &mcfg[1]; + + arr = kcalloc(n, sizeof(*arr), GFP_KERNEL); + if (!arr) + return -ENOMEM; + + for (i = 0, e = arr; i < n; i++, mptr++, e++) { + e->segment = mptr->pci_segment; + e->addr = mptr->address; + e->bus_start = mptr->start_bus_number; + e->bus_end = mptr->end_bus_number; + list_add(&e->list, &pci_mcfg_list); + } + + pr_info("MCFG table detected, %d entries\n", n); + return 0; +} + +/* Interface called by ACPI - parse and save MCFG table */ +void __init pci_mmcfg_late_init(void) +{ + int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse); + if (err) + pr_err("Failed to parse MCFG (%d)\n", err); +} diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index ae3fe4e64203..d144168d4ef9 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -720,6 +720,36 @@ next: } } +static void acpi_pci_root_remap_iospace(struct resource_entry *entry) +{ +#ifdef PCI_IOBASE + struct resource *res = entry->res; + resource_size_t cpu_addr = res->start; + resource_size_t pci_addr = cpu_addr - entry->offset; + resource_size_t length = resource_size(res); + unsigned long port; + + if (pci_register_io_range(cpu_addr, length)) + goto err; + + port = pci_address_to_pio(cpu_addr); + if (port == (unsigned long)-1) + goto err; + + res->start = port; + res->end = port + length - 1; + entry->offset = port - pci_addr; + + if (pci_remap_iospace(res, cpu_addr) < 0) + goto err; + + pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res); + return; +err: + res->flags |= IORESOURCE_DISABLED; +#endif +} + int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info) { int ret; @@ -740,6 +770,9 @@ int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info) "no IO and memory resources present in _CRS\n"); else { resource_list_for_each_entry_safe(entry, tmp, list) { + if (entry->res->flags & IORESOURCE_IO) + acpi_pci_root_remap_iospace(entry); + if (entry->res->flags & IORESOURCE_DISABLED) resource_list_destroy_entry(entry); else @@ -811,6 +844,8 @@ static void acpi_pci_root_release_info(struct pci_host_bridge *bridge) resource_list_for_each_entry(entry, &bridge->windows) { res = entry->res; + if (res->flags & IORESOURCE_IO) + pci_unmap_iospace(res); if (res->parent && (res->flags & (IORESOURCE_MEM | IORESOURCE_IO))) release_resource(res); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0ca14ac7bb28..0553aeebb228 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, struct acpi_device *device; action &= ~CPU_TASKS_FROZEN; - /* - * CPU_STARTING and CPU_DYING must not sleep. Return here since - * acpi_bus_get_device() may sleep. - */ - if (action == CPU_STARTING || action == CPU_DYING) + switch (action) { + case CPU_ONLINE: + case CPU_DEAD: + break; + default: return NOTIFY_DONE; + } if (!pr || acpi_bus_get_device(pr->handle, &device)) return NOTIFY_DONE; diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 773fc3099769..22d1760a4278 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -46,7 +46,8 @@ MODULE_LICENSE("GPL"); extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; -static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) +static bool fw_get_builtin_firmware(struct firmware *fw, const char *name, + void *buf, size_t size) { struct builtin_fw *b_fw; @@ -54,6 +55,9 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; + + if (buf && fw->size <= size) + memcpy(buf, fw->data, fw->size); return true; } } @@ -74,7 +78,9 @@ static bool fw_is_builtin_firmware(const struct firmware *fw) #else /* Module case - no builtin firmware support */ -static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) +static inline bool fw_get_builtin_firmware(struct firmware *fw, + const char *name, void *buf, + size_t size) { return false; } @@ -112,6 +118,7 @@ static inline long firmware_loading_timeout(void) #define FW_OPT_FALLBACK 0 #endif #define FW_OPT_NO_WARN (1U << 3) +#define FW_OPT_NOCACHE (1U << 4) struct firmware_cache { /* firmware_buf instance will be added into the below list */ @@ -143,6 +150,7 @@ struct firmware_buf { unsigned long status; void *data; size_t size; + size_t allocated_size; #ifdef CONFIG_FW_LOADER_USER_HELPER bool is_paged_buf; bool need_uevent; @@ -178,7 +186,8 @@ static DEFINE_MUTEX(fw_lock); static struct firmware_cache fw_cache; static struct firmware_buf *__allocate_fw_buf(const char *fw_name, - struct firmware_cache *fwc) + struct firmware_cache *fwc, + void *dbuf, size_t size) { struct firmware_buf *buf; @@ -194,6 +203,8 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name, kref_init(&buf->ref); buf->fwc = fwc; + buf->data = dbuf; + buf->allocated_size = size; init_completion(&buf->completion); #ifdef CONFIG_FW_LOADER_USER_HELPER INIT_LIST_HEAD(&buf->pending_list); @@ -217,7 +228,8 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name) static int fw_lookup_and_allocate_buf(const char *fw_name, struct firmware_cache *fwc, - struct firmware_buf **buf) + struct firmware_buf **buf, void *dbuf, + size_t size) { struct firmware_buf *tmp; @@ -229,7 +241,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, *buf = tmp; return 1; } - tmp = __allocate_fw_buf(fw_name, fwc); + tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size); if (tmp) list_add(&tmp->list, &fwc->head); spin_unlock(&fwc->lock); @@ -261,6 +273,7 @@ static void __fw_free_buf(struct kref *ref) vfree(buf->pages); } else #endif + if (!buf->allocated_size) vfree(buf->data); kfree_const(buf->fw_id); kfree(buf); @@ -301,13 +314,21 @@ static void fw_finish_direct_load(struct device *device, mutex_unlock(&fw_lock); } -static int fw_get_filesystem_firmware(struct device *device, - struct firmware_buf *buf) +static int +fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf) { loff_t size; int i, len; int rc = -ENOENT; char *path; + enum kernel_read_file_id id = READING_FIRMWARE; + size_t msize = INT_MAX; + + /* Already populated data member means we're loading into a buffer */ + if (buf->data) { + id = READING_FIRMWARE_PREALLOC_BUFFER; + msize = buf->allocated_size; + } path = __getname(); if (!path) @@ -326,8 +347,8 @@ static int fw_get_filesystem_firmware(struct device *device, } buf->size = 0; - rc = kernel_read_file_from_path(path, &buf->data, &size, - INT_MAX, READING_FIRMWARE); + rc = kernel_read_file_from_path(path, &buf->data, &size, msize, + id); if (rc) { if (rc == -ENOENT) dev_dbg(device, "loading %s failed with error %d\n", @@ -691,6 +712,38 @@ out: static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); +static void firmware_rw_buf(struct firmware_buf *buf, char *buffer, + loff_t offset, size_t count, bool read) +{ + if (read) + memcpy(buffer, buf->data + offset, count); + else + memcpy(buf->data + offset, buffer, count); +} + +static void firmware_rw(struct firmware_buf *buf, char *buffer, + loff_t offset, size_t count, bool read) +{ + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE-1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(buf->pages[page_nr]); + + if (read) + memcpy(buffer, page_data + page_ofs, page_cnt); + else + memcpy(page_data + page_ofs, buffer, page_cnt); + + kunmap(buf->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } +} + static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) @@ -715,21 +768,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, ret_count = count; - while (count) { - void *page_data; - int page_nr = offset >> PAGE_SHIFT; - int page_ofs = offset & (PAGE_SIZE-1); - int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - - page_data = kmap(buf->pages[page_nr]); - - memcpy(buffer, page_data + page_ofs, page_cnt); + if (buf->data) + firmware_rw_buf(buf, buffer, offset, count, true); + else + firmware_rw(buf, buffer, offset, count, true); - kunmap(buf->pages[page_nr]); - buffer += page_cnt; - offset += page_cnt; - count -= page_cnt; - } out: mutex_unlock(&fw_lock); return ret_count; @@ -804,29 +847,23 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, goto out; } - retval = fw_realloc_buffer(fw_priv, offset + count); - if (retval) - goto out; - - retval = count; - - while (count) { - void *page_data; - int page_nr = offset >> PAGE_SHIFT; - int page_ofs = offset & (PAGE_SIZE - 1); - int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - - page_data = kmap(buf->pages[page_nr]); - - memcpy(page_data + page_ofs, buffer, page_cnt); + if (buf->data) { + if (offset + count > buf->allocated_size) { + retval = -ENOMEM; + goto out; + } + firmware_rw_buf(buf, buffer, offset, count, false); + retval = count; + } else { + retval = fw_realloc_buffer(fw_priv, offset + count); + if (retval) + goto out; - kunmap(buf->pages[page_nr]); - buffer += page_cnt; - offset += page_cnt; - count -= page_cnt; + retval = count; + firmware_rw(buf, buffer, offset, count, false); } - buf->size = max_t(size_t, offset, buf->size); + buf->size = max_t(size_t, offset + count, buf->size); out: mutex_unlock(&fw_lock); return retval; @@ -894,7 +931,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, struct firmware_buf *buf = fw_priv->buf; /* fall back on userspace loading */ - buf->is_paged_buf = true; + if (!buf->data) + buf->is_paged_buf = true; dev_set_uevent_suppress(f_dev, true); @@ -929,7 +967,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, if (is_fw_load_aborted(buf)) retval = -EAGAIN; - else if (!buf->data) + else if (buf->is_paged_buf && !buf->data) retval = -ENOMEM; device_del(f_dev); @@ -1012,7 +1050,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf) */ static int _request_firmware_prepare(struct firmware **firmware_p, const char *name, - struct device *device) + struct device *device, void *dbuf, size_t size) { struct firmware *firmware; struct firmware_buf *buf; @@ -1025,12 +1063,12 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return -ENOMEM; } - if (fw_get_builtin_firmware(firmware, name)) { + if (fw_get_builtin_firmware(firmware, name, dbuf, size)) { dev_dbg(device, "using built-in %s\n", name); return 0; /* assigned */ } - ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); + ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size); /* * bind with 'buf' now to avoid warning in failure path @@ -1070,14 +1108,16 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, * should be fixed in devres or driver core. */ /* don't cache firmware handled without uevent */ - if (device && (opt_flags & FW_OPT_UEVENT)) + if (device && (opt_flags & FW_OPT_UEVENT) && + !(opt_flags & FW_OPT_NOCACHE)) fw_add_devm_name(device, buf->fw_id); /* * After caching firmware image is started, let it piggyback * on request firmware. */ - if (buf->fwc->state == FW_LOADER_START_CACHE) { + if (!(opt_flags & FW_OPT_NOCACHE) && + buf->fwc->state == FW_LOADER_START_CACHE) { if (fw_cache_piggyback_on_request(buf->fw_id)) kref_get(&buf->ref); } @@ -1091,7 +1131,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, - struct device *device, unsigned int opt_flags) + struct device *device, void *buf, size_t size, + unsigned int opt_flags) { struct firmware *fw = NULL; long timeout; @@ -1105,7 +1146,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } - ret = _request_firmware_prepare(&fw, name, device); + ret = _request_firmware_prepare(&fw, name, device, buf, size); if (ret <= 0) /* error or already assigned */ goto out; @@ -1184,7 +1225,7 @@ request_firmware(const struct firmware **firmware_p, const char *name, /* Need to pin this module until return */ __module_get(THIS_MODULE); - ret = _request_firmware(firmware_p, name, device, + ret = _request_firmware(firmware_p, name, device, NULL, 0, FW_OPT_UEVENT | FW_OPT_FALLBACK); module_put(THIS_MODULE); return ret; @@ -1208,7 +1249,7 @@ int request_firmware_direct(const struct firmware **firmware_p, int ret; __module_get(THIS_MODULE); - ret = _request_firmware(firmware_p, name, device, + ret = _request_firmware(firmware_p, name, device, NULL, 0, FW_OPT_UEVENT | FW_OPT_NO_WARN); module_put(THIS_MODULE); return ret; @@ -1216,6 +1257,36 @@ int request_firmware_direct(const struct firmware **firmware_p, EXPORT_SYMBOL_GPL(request_firmware_direct); /** + * request_firmware_into_buf - load firmware into a previously allocated buffer + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded and DMA region allocated + * @buf: address of buffer to load firmware into + * @size: size of buffer + * + * This function works pretty much like request_firmware(), but it doesn't + * allocate a buffer to hold the firmware data. Instead, the firmware + * is loaded directly into the buffer pointed to by @buf and the @firmware_p + * data member is pointed at @buf. + * + * This function doesn't cache firmware either. + */ +int +request_firmware_into_buf(const struct firmware **firmware_p, const char *name, + struct device *device, void *buf, size_t size) +{ + int ret; + + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, buf, size, + FW_OPT_UEVENT | FW_OPT_FALLBACK | + FW_OPT_NOCACHE); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL(request_firmware_into_buf); + +/** * release_firmware: - release the resource associated with a firmware image * @fw: firmware resource to release **/ @@ -1247,7 +1318,7 @@ static void request_firmware_work_func(struct work_struct *work) fw_work = container_of(work, struct firmware_work, work); - _request_firmware(&fw, fw_work->name, fw_work->device, + _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, fw_work->opt_flags); fw_work->cont(fw, fw_work->context); put_device(fw_work->device); /* taken in request_firmware_nowait() */ @@ -1380,7 +1451,7 @@ static int uncache_firmware(const char *fw_name) pr_debug("%s: %s\n", __func__, fw_name); - if (fw_get_builtin_firmware(&fw, fw_name)) + if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0)) return 0; buf = fw_lookup_buf(fw_name); diff --git a/drivers/base/node.c b/drivers/base/node.c index 51c7db2c4ee2..5548f9686016 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -56,6 +56,7 @@ static ssize_t node_read_meminfo(struct device *dev, { int n; int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); struct sysinfo i; si_meminfo_node(&i, nid); @@ -74,16 +75,16 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(i.totalram), nid, K(i.freeram), nid, K(i.totalram - i.freeram), - nid, K(node_page_state(nid, NR_ACTIVE_ANON) + - node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON) + - node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_ACTIVE_ANON)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON)), - nid, K(node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_UNEVICTABLE)), - nid, K(node_page_state(nid, NR_MLOCK))); + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + + node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + + node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), + nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); #ifdef CONFIG_HIGHMEM n += sprintf(buf + n, @@ -117,31 +118,30 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d ShmemPmdMapped: %8lu kB\n" #endif , - nid, K(node_page_state(nid, NR_FILE_DIRTY)), - nid, K(node_page_state(nid, NR_WRITEBACK)), - nid, K(node_page_state(nid, NR_FILE_PAGES)), - nid, K(node_page_state(nid, NR_FILE_MAPPED)), - nid, K(node_page_state(nid, NR_ANON_PAGES)), + nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), + nid, K(node_page_state(pgdat, NR_WRITEBACK)), + nid, K(node_page_state(pgdat, NR_FILE_PAGES)), + nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), + nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(i.sharedram), - nid, node_page_state(nid, NR_KERNEL_STACK) * - THREAD_SIZE / 1024, - nid, K(node_page_state(nid, NR_PAGETABLE)), - nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), - nid, K(node_page_state(nid, NR_BOUNCE)), - nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)), - nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + - node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), - nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), + nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), + nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), + nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), + nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) + + sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), - nid, K(node_page_state(nid, NR_ANON_THPS) * + nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), - nid, K(node_page_state(nid, NR_SHMEM_THPS) * + nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), - nid, K(node_page_state(nid, NR_SHMEM_PMDMAPPED) * + nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR)); #else - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); + nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif n += hugetlb_report_node_meminfo(nid, buf + n); return n; @@ -160,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev, "interleave_hit %lu\n" "local_node %lu\n" "other_node %lu\n", - node_page_state(dev->id, NUMA_HIT), - node_page_state(dev->id, NUMA_MISS), - node_page_state(dev->id, NUMA_FOREIGN), - node_page_state(dev->id, NUMA_INTERLEAVE_HIT), - node_page_state(dev->id, NUMA_LOCAL), - node_page_state(dev->id, NUMA_OTHER)); + sum_zone_node_page_state(dev->id, NUMA_HIT), + sum_zone_node_page_state(dev->id, NUMA_MISS), + sum_zone_node_page_state(dev->id, NUMA_FOREIGN), + sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_node_page_state(dev->id, NUMA_LOCAL), + sum_zone_node_page_state(dev->id, NUMA_OTHER)); } static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); @@ -173,12 +173,18 @@ static ssize_t node_read_vmstat(struct device *dev, struct device_attribute *attr, char *buf) { int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); int i; int n = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], - node_page_state(nid, i)); + sum_zone_node_page_state(nid, i)); + + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + n += sprintf(buf+n, "%s %lu\n", + vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], + node_page_state(pgdat, i)); return n; } @@ -364,7 +370,7 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #define page_initialized(page) (page->lru.next) -static int __init_refok get_nid_for_pfn(unsigned long pfn) +static int __ref get_nid_for_pfn(unsigned long pfn) { struct page *page; diff --git a/drivers/base/property.c b/drivers/base/property.c index f38c21de29b7..43a36d68c3fd 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -888,6 +888,34 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, EXPORT_SYMBOL_GPL(device_get_next_child_node); /** + * device_get_named_child_node - Return first matching named child node handle + * @dev: Device to find the named child node for. + * @childname: String to match child node name against. + */ +struct fwnode_handle *device_get_named_child_node(struct device *dev, + const char *childname) +{ + struct fwnode_handle *child; + + /* + * Find first matching named child node of this device. + * For ACPI this will be a data only sub-node. + */ + device_for_each_child_node(dev, child) { + if (is_of_node(child)) { + if (!of_node_cmp(to_of_node(child)->name, childname)) + return child; + } else if (is_acpi_data_node(child)) { + if (acpi_data_node_match(child, childname)) + return child; + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(device_get_named_child_node); + +/** * fwnode_handle_put - Drop reference to a device node * @fwnode: Pointer to the device node to drop the reference to. * diff --git a/drivers/block/brd.c b/drivers/block/brd.c index ba5145d384d8..3022dad24071 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -379,7 +379,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, #ifdef CONFIG_BLK_DEV_RAM_DAX static long brd_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; @@ -389,7 +389,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector, page = brd_insert_page(brd, sector); if (!page) return -ENOSPC; - *kaddr = (void __pmem *)page_address(page); + *kaddr = page_address(page); *pfn = page_to_pfn_t(page); return PAGE_SIZE; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 0a1aaf8c24c4..2d3d50ab74bf 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -27,7 +27,6 @@ #include <linux/crc32c.h> #include <linux/drbd.h> #include <linux/drbd_limits.h> -#include <linux/dynamic_debug.h> #include "drbd_int.h" diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c index be91a8d7c22a..de5c3ee8a790 100644 --- a/drivers/block/drbd/drbd_debugfs.c +++ b/drivers/block/drbd/drbd_debugfs.c @@ -425,9 +425,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo /* Are we still linked, * or has debugfs_remove() already been called? */ parent = file->f_path.dentry->d_parent; - /* not sure if this can happen: */ - if (!parent || d_really_is_negative(parent)) - goto out; /* serialize with d_delete() */ inode_lock(d_inode(parent)); /* Make sure the object is still alive */ @@ -440,7 +437,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo if (ret) kref_put(kref, release); } -out: return ret; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7b54354976a5..4cb8f21ff4ef 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -41,6 +41,7 @@ #include <linux/backing-dev.h> #include <linux/genhd.h> #include <linux/idr.h> +#include <linux/dynamic_debug.h> #include <net/tcp.h> #include <linux/lru_cache.h> #include <linux/prefetch.h> diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 450662055d97..1a04af6d2421 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1937,7 +1937,7 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req->r_callback = rbd_osd_req_callback; osd_req->r_priv = obj_request; - osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); + osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", obj_request->object_name)) goto fail; @@ -1991,7 +1991,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) osd_req->r_callback = rbd_osd_req_callback; osd_req->r_priv = obj_request; - osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); + osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id; if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", obj_request->object_name)) goto fail; @@ -3995,10 +3995,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, /* Initialize the layout used for all rbd requests */ - rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); - rbd_dev->layout.fl_stripe_count = cpu_to_le32(1); - rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); - rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id); + rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER; + rbd_dev->layout.stripe_count = 1; + rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER; + rbd_dev->layout.pool_id = spec->pool_id; + RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); /* * If this is a mapping rbd_dev (as opposed to a parent one), @@ -5187,7 +5188,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); + rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id; if (rbd_dev->image_format == 1) ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", spec->image_name, RBD_SUFFIX); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index c5a7de9bc783..3b205e212337 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -132,6 +132,19 @@ config SUNXI_RSB with various RSB based devices, such as AXP223, AXP8XX PMICs, and AC100/AC200 ICs. +# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence +# the driver will fail to build as a module. However there are patches to +# address that queued for v4.8, so this can be turned into a tristate symbol +# after v4.8-rc1. +config TEGRA_ACONNECT + bool "Tegra ACONNECT Bus Driver" + depends on ARCH_TEGRA_210_SOC + depends on OF && PM + select PM_CLK + help + Driver for the Tegra ACONNECT bus which is used to interface with + the devices inside the Audio Processing Engine (APE) for Tegra210. + config UNIPHIER_SYSTEM_BUS tristate "UniPhier System Bus driver" depends on ARCH_UNIPHIER && OF diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index ccff007ee7e8..ac84cc4348e3 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -17,5 +17,6 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o +obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index a49b28378d59..5755907f836f 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -144,12 +144,15 @@ struct cci_pmu { int num_cntrs; atomic_t active_events; struct mutex reserve_mutex; - struct notifier_block cpu_nb; + struct list_head entry; cpumask_t cpus; }; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) +static DEFINE_MUTEX(cci_pmu_mutex); +static LIST_HEAD(cci_pmu_list); + enum cci_models { #ifdef CONFIG_ARM_CCI400_PMU CCI400_R0, @@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) return perf_pmu_register(&cci_pmu->pmu, name, -1); } -static int cci_pmu_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) +static int cci_pmu_offline_cpu(unsigned int cpu) { - struct cci_pmu *cci_pmu = container_of(self, - struct cci_pmu, cpu_nb); - unsigned int cpu = (long)hcpu; + struct cci_pmu *cci_pmu; unsigned int target; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: + mutex_lock(&cci_pmu_mutex); + list_for_each_entry(cci_pmu, &cci_pmu_list, entry) { if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) - break; + continue; target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) // UP, last CPU - break; + if (target >= nr_cpu_ids) + continue; /* * TODO: migrate context once core races on event->ctx have * been fixed. */ cpumask_set_cpu(target, &cci_pmu->cpus); - default: - break; } - - return NOTIFY_OK; + mutex_unlock(&cci_pmu_mutex); + return 0; } static struct cci_pmu_model cci_pmu_models[] = { @@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev) atomic_set(&cci_pmu->active_events, 0); cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); - cci_pmu->cpu_nb = (struct notifier_block) { - .notifier_call = cci_pmu_cpu_notifier, - /* - * to migrate uncore events, our notifier should be executed - * before perf core's notifier. - */ - .priority = CPU_PRI_PERF + 1, - }; - - ret = register_cpu_notifier(&cci_pmu->cpu_nb); + ret = cci_pmu_init(cci_pmu, pdev); if (ret) return ret; - ret = cci_pmu_init(cci_pmu, pdev); - if (ret) { - unregister_cpu_notifier(&cci_pmu->cpu_nb); - return ret; - } + mutex_lock(&cci_pmu_mutex); + list_add(&cci_pmu->entry, &cci_pmu_list); + mutex_unlock(&cci_pmu_mutex); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; @@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void) { int ret; + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "AP_PERF_ARM_CCI_ONLINE", NULL, + cci_pmu_offline_cpu); + if (ret) + return ret; + ret = platform_driver_register(&cci_pmu_driver); if (ret) return ret; diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index acc3eb542c74..97a9185af433 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -167,7 +167,7 @@ struct arm_ccn_dt { struct hrtimer hrtimer; cpumask_t cpu; - struct notifier_block cpu_nb; + struct list_head entry; struct pmu pmu; }; @@ -189,6 +189,8 @@ struct arm_ccn { struct arm_ccn_dt dt; }; +static DEFINE_MUTEX(arm_ccn_mutex); +static LIST_HEAD(arm_ccn_list); static int arm_ccn_node_to_xp(int node) { @@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) } -static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, - unsigned long action, void *hcpu) +static int arm_ccn_pmu_offline_cpu(unsigned int cpu) { - struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); - struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); - unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ + struct arm_ccn_dt *dt; unsigned int target; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: + mutex_lock(&arm_ccn_mutex); + list_for_each_entry(dt, &arm_ccn_list, entry) { + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); + if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) - break; + continue; target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) - break; + continue; perf_pmu_migrate_context(&dt->pmu, cpu, target); cpumask_set_cpu(target, &dt->cpu); if (ccn->irq) WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); - default: - break; } - - return NOTIFY_OK; + mutex_unlock(&arm_ccn_mutex); + return 0; } @@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Pick one CPU which we will use to collect data from CCN... */ cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); - /* - * ... and change the selection when it goes offline. Priority is - * picked to have a chance to migrate events before perf is notified. - */ - ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; - ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, - err = register_cpu_notifier(&ccn->dt.cpu_nb); - if (err) - goto error_cpu_notifier; - /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); @@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) if (err) goto error_pmu_register; + mutex_lock(&arm_ccn_mutex); + list_add(&ccn->dt.entry, &arm_ccn_list); + mutex_unlock(&arm_ccn_mutex); return 0; error_pmu_register: error_set_affinity: - unregister_cpu_notifier(&ccn->dt.cpu_nb); -error_cpu_notifier: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); @@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) { int i; + mutex_lock(&arm_ccn_mutex); + list_del(&ccn->dt.entry); + mutex_unlock(&arm_ccn_mutex); + if (ccn->irq) irq_set_affinity_hint(ccn->irq, NULL); - unregister_cpu_notifier(&ccn->dt.cpu_nb); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); @@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); } - static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, int (*callback)(struct arm_ccn *ccn, int region, void __iomem *base, u32 type, u32 id)) @@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = { static int __init arm_ccn_init(void) { - int i; + int i, ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + "AP_PERF_ARM_CCN_ONLINE", NULL, + arm_ccn_pmu_offline_cpu); + if (ret) + return ret; for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; @@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); } diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 1827fc4d15c1..4bd361d64270 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -163,9 +163,8 @@ static int __init weim_parse_dt(struct platform_device *pdev, } if (have_child) - ret = of_platform_populate(pdev->dev.of_node, - of_default_bus_match_table, - NULL, &pdev->dev); + ret = of_platform_default_populate(pdev->dev.of_node, + NULL, &pdev->dev); if (ret) dev_err(&pdev->dev, "%s fail to create devices.\n", pdev->dev.of_node->full_name); diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index ce54a0160faa..c7f396903184 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -117,7 +117,7 @@ struct mvebu_mbus_soc_data { unsigned int (*win_remap_offset)(const int win); void (*setup_cpu_target)(struct mvebu_mbus_state *s); int (*save_cpu_target)(struct mvebu_mbus_state *s, - u32 *store_addr); + u32 __iomem *store_addr); int (*show_cpu_target)(struct mvebu_mbus_state *s, struct seq_file *seq, void *v); }; @@ -728,7 +728,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) static int mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus, - u32 *store_addr) + u32 __iomem *store_addr) { int i; @@ -780,7 +780,7 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus) static int mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus, - u32 *store_addr) + u32 __iomem *store_addr) { int i; @@ -796,7 +796,7 @@ mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus, return 4; } -int mvebu_mbus_save_cpu_target(u32 *store_addr) +int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr) { return mbus_state.soc->save_cpu_target(&mbus_state, store_addr); } @@ -1089,7 +1089,7 @@ static void mvebu_mbus_resume(void) } } -struct syscore_ops mvebu_mbus_syscore_ops = { +static struct syscore_ops mvebu_mbus_syscore_ops = { .suspend = mvebu_mbus_suspend, .resume = mvebu_mbus_resume, }; diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c new file mode 100644 index 000000000000..7e4104b74fa8 --- /dev/null +++ b/drivers/bus/tegra-aconnect.c @@ -0,0 +1,112 @@ +/* + * Tegra ACONNECT Bus Driver + * + * Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> + +static int tegra_aconnect_add_clock(struct device *dev, char *name) +{ + struct clk *clk; + int ret; + + clk = clk_get(dev, name); + if (IS_ERR(clk)) { + dev_err(dev, "%s clock not found\n", name); + return PTR_ERR(clk); + } + + ret = pm_clk_add_clk(dev, clk); + if (ret) + clk_put(clk); + + return ret; +} + +static int tegra_aconnect_probe(struct platform_device *pdev) +{ + int ret; + + if (!pdev->dev.of_node) + return -EINVAL; + + ret = pm_clk_create(&pdev->dev); + if (ret) + return ret; + + ret = tegra_aconnect_add_clock(&pdev->dev, "ape"); + if (ret) + goto clk_destroy; + + ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape"); + if (ret) + goto clk_destroy; + + pm_runtime_enable(&pdev->dev); + + of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + + dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n"); + + return 0; + +clk_destroy: + pm_clk_destroy(&pdev->dev); + + return ret; +} + +static int tegra_aconnect_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + + pm_clk_destroy(&pdev->dev); + + return 0; +} + +static int tegra_aconnect_runtime_resume(struct device *dev) +{ + return pm_clk_resume(dev); +} + +static int tegra_aconnect_runtime_suspend(struct device *dev) +{ + return pm_clk_suspend(dev); +} + +static const struct dev_pm_ops tegra_aconnect_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend, + tegra_aconnect_runtime_resume, NULL) +}; + +static const struct of_device_id tegra_aconnect_of_match[] = { + { .compatible = "nvidia,tegra210-aconnect", }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match); + +static struct platform_driver tegra_aconnect_driver = { + .probe = tegra_aconnect_probe, + .remove = tegra_aconnect_remove, + .driver = { + .name = "tegra-aconnect", + .of_match_table = tegra_aconnect_of_match, + .pm = &tegra_aconnect_pm_ops, + }, +}; +module_platform_driver(tegra_aconnect_driver); + +MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver"); +MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index 350b7309c26d..1e6e0269edcc 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c @@ -257,8 +257,7 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) uniphier_system_bus_set_reg(priv); /* Now, the bus is configured. Populate platform_devices below it */ - return of_platform_populate(dev->of_node, of_default_bus_match_table, - NULL, dev); + return of_platform_default_populate(dev->of_node, NULL, dev); } static const struct of_device_id uniphier_system_bus_match[] = { diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 601f64fcc890..fdb8f3e10b6f 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -178,6 +178,20 @@ config IBM_BSR of threads across a large system which avoids bouncing a cacheline between several cores on a system +config POWERNV_OP_PANEL + tristate "IBM POWERNV Operator Panel Display support" + depends on PPC_POWERNV + default m + help + If you say Y here, a special character device node, /dev/op_panel, + will be created which exposes the operator panel display on IBM + Power Systems machines with FSPs. + + If you don't require access to the operator panel display from user + space, say N. + + If unsure, say M here to build it as a module called powernv-op-panel. + source "drivers/char/ipmi/Kconfig" config DS1620 diff --git a/drivers/char/Makefile b/drivers/char/Makefile index d8a7579300d2..55d16bf3ccc5 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -60,3 +60,4 @@ js-rtc-y = rtc.o obj-$(CONFIG_TILE_SROM) += tile-srom.o obj-$(CONFIG_XILLYBUS) += xillybus/ +obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index aef87fdbd187..44311296ec02 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags) return false; } +void intel_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags) +{ + intel_private.driver->write_entry(addr, pg, flags); +} +EXPORT_SYMBOL(intel_gtt_insert_page); + void intel_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags) diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c new file mode 100644 index 000000000000..a45dabcc8e10 --- /dev/null +++ b/drivers/char/powernv-op-panel.c @@ -0,0 +1,223 @@ +/* + * OPAL Operator Panel Display Driver + * + * Copyright 2016, Suraj Jitindar Singh, IBM Corporation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/miscdevice.h> + +#include <asm/opal.h> + +/* + * This driver creates a character device (/dev/op_panel) which exposes the + * operator panel (character LCD display) on IBM Power Systems machines + * with FSPs. + * A character buffer written to the device will be displayed on the + * operator panel. + */ + +static DEFINE_MUTEX(oppanel_mutex); + +static u32 num_lines, oppanel_size; +static oppanel_line_t *oppanel_lines; +static char *oppanel_data; + +static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence) +{ + return fixed_size_llseek(filp, offset, whence, oppanel_size); +} + +static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len, + loff_t *f_pos) +{ + return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data, + oppanel_size); +} + +static int __op_panel_update_display(void) +{ + struct opal_msg msg; + int rc, token; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + if (token != -ERESTARTSYS) + pr_debug("Couldn't get OPAL async token [token=%d]\n", + token); + return token; + } + + rc = opal_write_oppanel_async(token, oppanel_lines, num_lines); + switch (rc) { + case OPAL_ASYNC_COMPLETION: + rc = opal_async_wait_response(token, &msg); + if (rc) { + pr_debug("Failed to wait for async response [rc=%d]\n", + rc); + break; + } + rc = opal_get_async_rc(msg); + if (rc != OPAL_SUCCESS) { + pr_debug("OPAL async call returned failed [rc=%d]\n", + rc); + break; + } + case OPAL_SUCCESS: + break; + default: + pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc); + } + + opal_async_release_token(token); + return rc; +} + +static ssize_t oppanel_write(struct file *filp, const char __user *userbuf, + size_t len, loff_t *f_pos) +{ + loff_t f_pos_prev = *f_pos; + ssize_t ret; + int rc; + + if (!*f_pos) + memset(oppanel_data, ' ', oppanel_size); + else if (*f_pos >= oppanel_size) + return -EFBIG; + + ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf, + len); + if (ret > 0) { + rc = __op_panel_update_display(); + if (rc != OPAL_SUCCESS) { + pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n", + rc); + *f_pos = f_pos_prev; + return -EIO; + } + } + return ret; +} + +static int oppanel_open(struct inode *inode, struct file *filp) +{ + if (!mutex_trylock(&oppanel_mutex)) { + pr_debug("Device Busy\n"); + return -EBUSY; + } + return 0; +} + +static int oppanel_release(struct inode *inode, struct file *filp) +{ + mutex_unlock(&oppanel_mutex); + return 0; +} + +static const struct file_operations oppanel_fops = { + .owner = THIS_MODULE, + .llseek = oppanel_llseek, + .read = oppanel_read, + .write = oppanel_write, + .open = oppanel_open, + .release = oppanel_release +}; + +static struct miscdevice oppanel_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "op_panel", + .fops = &oppanel_fops +}; + +static int oppanel_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + u32 line_len; + int rc, i; + + rc = of_property_read_u32(np, "#length", &line_len); + if (rc) { + pr_err_ratelimited("Operator panel length property not found\n"); + return rc; + } + rc = of_property_read_u32(np, "#lines", &num_lines); + if (rc) { + pr_err_ratelimited("Operator panel lines property not found\n"); + return rc; + } + oppanel_size = line_len * num_lines; + + pr_devel("Operator panel of size %u found with %u lines of length %u\n", + oppanel_size, num_lines, line_len); + + oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL); + if (!oppanel_data) + return -ENOMEM; + + oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL); + if (!oppanel_lines) { + rc = -ENOMEM; + goto free_oppanel_data; + } + + memset(oppanel_data, ' ', oppanel_size); + for (i = 0; i < num_lines; i++) { + oppanel_lines[i].line_len = cpu_to_be64(line_len); + oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i * + line_len])); + } + + rc = misc_register(&oppanel_dev); + if (rc) { + pr_err_ratelimited("Failed to register as misc device\n"); + goto free_oppanel; + } + + return 0; + +free_oppanel: + kfree(oppanel_lines); +free_oppanel_data: + kfree(oppanel_data); + return rc; +} + +static int oppanel_remove(struct platform_device *pdev) +{ + misc_deregister(&oppanel_dev); + kfree(oppanel_lines); + kfree(oppanel_data); + return 0; +} + +static const struct of_device_id oppanel_match[] = { + { .compatible = "ibm,opal-oppanel" }, + { }, +}; + +static struct platform_driver oppanel_driver = { + .driver = { + .name = "powernv-op-panel", + .of_match_table = oppanel_match, + }, + .probe = oppanel_probe, + .remove = oppanel_remove, +}; + +module_platform_driver(oppanel_driver); + +MODULE_DEVICE_TABLE(of, oppanel_match); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver"); +MODULE_AUTHOR("Suraj Jitindar Singh <sjitindarsingh@gmail.com>"); diff --git a/drivers/char/random.c b/drivers/char/random.c index 8d0af74f6569..3efb3bf0ab83 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -249,6 +249,7 @@ #include <linux/genhd.h> #include <linux/interrupt.h> #include <linux/mm.h> +#include <linux/nodemask.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/percpu.h> @@ -1656,7 +1657,6 @@ static int rand_initialize(void) { #ifdef CONFIG_NUMA int i; - int num_nodes = num_possible_nodes(); struct crng_state *crng; struct crng_state **pool; #endif @@ -1666,15 +1666,13 @@ static int rand_initialize(void) crng_initialize(&primary_crng); #ifdef CONFIG_NUMA - pool = kmalloc(num_nodes * sizeof(void *), - GFP_KERNEL|__GFP_NOFAIL|__GFP_ZERO); - for (i=0; i < num_nodes; i++) { + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { crng = kmalloc_node(sizeof(struct crng_state), GFP_KERNEL | __GFP_NOFAIL, i); spin_lock_init(&crng->lock); crng_initialize(crng); pool[i] = crng; - } mb(); crng_node_pool = pool; diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 3b84a8b1bfbe..9faa0b1e7766 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -24,9 +24,16 @@ menuconfig TCG_TPM if TCG_TPM +config TCG_TIS_CORE + tristate + ---help--- + TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks + into the TPM kernel APIs. Physical layers will register against it. + config TCG_TIS tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface" depends on X86 + select TCG_TIS_CORE ---help--- If you have a TPM security chip that is compliant with the TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO @@ -34,6 +41,18 @@ config TCG_TIS within Linux. To compile this driver as a module, choose M here; the module will be called tpm_tis. +config TCG_TIS_SPI + tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)" + depends on SPI + select TCG_TIS_CORE + ---help--- + If you have a TPM security chip which is connected to a regular, + non-tcg SPI master (i.e. most embedded platforms) that is compliant with the + TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO + specification (TPM2.0) say Yes and it will be accessible from + within Linux. To compile this driver as a module, choose M here; + the module will be called tpm_tis_spi. + config TCG_TIS_I2C_ATMEL tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" depends on I2C @@ -122,5 +141,16 @@ config TCG_CRB from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_crb. +config TCG_VTPM_PROXY + tristate "VTPM Proxy Interface" + depends on TCG_TPM + select ANON_INODES + ---help--- + This driver proxies for an emulated TPM (vTPM) running in userspace. + A device /dev/vtpmx is provided that creates a device pair + /dev/vtpmX and a server-side file descriptor on which the vTPM + can receive commands. + + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 56e8f1f3dc7e..a385fb8c17de 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -12,7 +12,9 @@ ifdef CONFIG_TCG_IBMVTPM tpm-y += tpm_eventlog.o tpm_of.o endif endif +obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o obj-$(CONFIG_TCG_TIS) += tpm_tis.o +obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o @@ -23,3 +25,4 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o +obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig index 19c007461d1c..e74c6f29fc85 100644 --- a/drivers/char/tpm/st33zp24/Kconfig +++ b/drivers/char/tpm/st33zp24/Kconfig @@ -1,6 +1,5 @@ config TCG_TIS_ST33ZP24 - tristate "STMicroelectronics TPM Interface Specification 1.2 Interface" - depends on GPIOLIB || COMPILE_TEST + tristate ---help--- STMicroelectronics ST33ZP24 core driver. It implements the core TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will @@ -10,9 +9,9 @@ config TCG_TIS_ST33ZP24 tpm_st33zp24. config TCG_TIS_ST33ZP24_I2C - tristate "TPM 1.2 ST33ZP24 I2C support" - depends on TCG_TIS_ST33ZP24 + tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)" depends on I2C + select TCG_TIS_ST33ZP24 ---help--- This module adds support for the STMicroelectronics TPM security chip ST33ZP24 with i2c interface. @@ -20,9 +19,9 @@ config TCG_TIS_ST33ZP24_I2C called tpm_st33zp24_i2c. config TCG_TIS_ST33ZP24_SPI - tristate "TPM 1.2 ST33ZP24 SPI support" - depends on TCG_TIS_ST33ZP24 + tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)" depends on SPI + select TCG_TIS_ST33ZP24 ---help--- This module adds support for the STMicroelectronics TPM security chip ST33ZP24 with spi interface. diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 309d2767c6a1..028a9cd76b63 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -1,6 +1,6 @@ /* * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 - * Copyright (C) 2009 - 2015 STMicroelectronics + * Copyright (C) 2009 - 2016 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,11 +19,14 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <linux/acpi.h> #include <linux/tpm.h> #include <linux/platform_data/st33zp24.h> +#include "../tpm.h" #include "st33zp24.h" #define TPM_DUMMY_BYTE 0xAA @@ -108,11 +111,40 @@ static const struct st33zp24_phy_ops i2c_phy_ops = { .recv = st33zp24_i2c_recv, }; -#ifdef CONFIG_OF -static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) +static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client) { + struct tpm_chip *chip = i2c_get_clientdata(client); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; + struct gpio_desc *gpiod_lpcpd; + struct device *dev = &client->dev; + + /* Get LPCPD GPIO from ACPI */ + gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1, + GPIOD_OUT_HIGH); + if (IS_ERR(gpiod_lpcpd)) { + dev_err(&client->dev, + "Failed to retrieve lpcpd-gpios from acpi.\n"); + phy->io_lpcpd = -1; + /* + * lpcpd pin is not specified. This is not an issue as + * power management can be also managed by TPM specific + * commands. So leave with a success status code. + */ + return 0; + } + + phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd); + + return 0; +} + +static int st33zp24_i2c_of_request_resources(struct i2c_client *client) +{ + struct tpm_chip *chip = i2c_get_clientdata(client); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; struct device_node *pp; - struct i2c_client *client = phy->client; int gpio; int ret; @@ -146,16 +178,12 @@ static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) return 0; } -#else -static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy) -{ - return -ENODEV; -} -#endif -static int st33zp24_i2c_request_resources(struct i2c_client *client, - struct st33zp24_i2c_phy *phy) +static int st33zp24_i2c_request_resources(struct i2c_client *client) { + struct tpm_chip *chip = i2c_get_clientdata(client); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_i2c_phy *phy = tpm_dev->phy_id; struct st33zp24_platform_data *pdata; int ret; @@ -212,13 +240,18 @@ static int st33zp24_i2c_probe(struct i2c_client *client, return -ENOMEM; phy->client = client; + pdata = client->dev.platform_data; if (!pdata && client->dev.of_node) { - ret = st33zp24_i2c_of_request_resources(phy); + ret = st33zp24_i2c_of_request_resources(client); if (ret) return ret; } else if (pdata) { - ret = st33zp24_i2c_request_resources(client, phy); + ret = st33zp24_i2c_request_resources(client); + if (ret) + return ret; + } else if (ACPI_HANDLE(&client->dev)) { + ret = st33zp24_i2c_acpi_request_resources(client); if (ret) return ret; } @@ -245,13 +278,17 @@ static const struct i2c_device_id st33zp24_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); -#ifdef CONFIG_OF static const struct of_device_id of_st33zp24_i2c_match[] = { { .compatible = "st,st33zp24-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match); -#endif + +static const struct acpi_device_id st33zp24_i2c_acpi_match[] = { + {"SMO3324"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match); static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend, st33zp24_pm_resume); @@ -261,6 +298,7 @@ static struct i2c_driver st33zp24_i2c_driver = { .name = TPM_ST33_I2C, .pm = &st33zp24_i2c_ops, .of_match_table = of_match_ptr(of_st33zp24_i2c_match), + .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match), }, .probe = st33zp24_i2c_probe, .remove = st33zp24_i2c_remove, diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index f974c945c97a..9f5a0117098c 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -1,6 +1,6 @@ /* * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24 - * Copyright (C) 2009 - 2015 STMicroelectronics + * Copyright (C) 2009 - 2016 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,11 +19,14 @@ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <linux/acpi.h> #include <linux/tpm.h> #include <linux/platform_data/st33zp24.h> +#include "../tpm.h" #include "st33zp24.h" #define TPM_DATA_FIFO 0x24 @@ -66,7 +69,7 @@ struct st33zp24_spi_phy { struct spi_device *spi_device; - struct spi_transfer spi_xfer; + u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE]; u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE]; @@ -110,43 +113,39 @@ static int st33zp24_status_to_errno(u8 code) static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { - u8 data = 0; - int total_length = 0, nbr_dummy_bytes = 0, ret = 0; + int total_length = 0, ret = 0; struct st33zp24_spi_phy *phy = phy_id; struct spi_device *dev = phy->spi_device; - u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; - u8 *rx_buf = phy->spi_xfer.rx_buf; + struct spi_transfer spi_xfer = { + .tx_buf = phy->tx_buf, + .rx_buf = phy->rx_buf, + }; /* Pre-Header */ - data = TPM_WRITE_DIRECTION | LOCALITY0; - memcpy(tx_buf + total_length, &data, sizeof(data)); - total_length++; - data = tpm_register; - memcpy(tx_buf + total_length, &data, sizeof(data)); - total_length++; + phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0; + phy->tx_buf[total_length++] = tpm_register; if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) { - tx_buf[total_length++] = tpm_size >> 8; - tx_buf[total_length++] = tpm_size; + phy->tx_buf[total_length++] = tpm_size >> 8; + phy->tx_buf[total_length++] = tpm_size; } - memcpy(&tx_buf[total_length], tpm_data, tpm_size); + memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size); total_length += tpm_size; - nbr_dummy_bytes = phy->latency; - memset(&tx_buf[total_length], TPM_DUMMY_BYTE, nbr_dummy_bytes); + memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency); - phy->spi_xfer.len = total_length + nbr_dummy_bytes; + spi_xfer.len = total_length + phy->latency; - ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); + ret = spi_sync_transfer(dev, &spi_xfer, 1); if (ret == 0) - ret = rx_buf[total_length + nbr_dummy_bytes - 1]; + ret = phy->rx_buf[total_length + phy->latency - 1]; return st33zp24_status_to_errno(ret); } /* st33zp24_spi_send() */ /* - * read8_recv + * st33zp24_spi_read8_recv * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be read @@ -154,40 +153,37 @@ static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, * @param: tpm_size, tpm TPM response size to read. * @return: should be zero if success else a negative error code. */ -static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) +static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, + int tpm_size) { - u8 data = 0; - int total_length = 0, nbr_dummy_bytes, ret; + int total_length = 0, ret; struct st33zp24_spi_phy *phy = phy_id; struct spi_device *dev = phy->spi_device; - u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf; - u8 *rx_buf = phy->spi_xfer.rx_buf; + struct spi_transfer spi_xfer = { + .tx_buf = phy->tx_buf, + .rx_buf = phy->rx_buf, + }; /* Pre-Header */ - data = LOCALITY0; - memcpy(tx_buf + total_length, &data, sizeof(data)); - total_length++; - data = tpm_register; - memcpy(tx_buf + total_length, &data, sizeof(data)); - total_length++; + phy->tx_buf[total_length++] = LOCALITY0; + phy->tx_buf[total_length++] = tpm_register; - nbr_dummy_bytes = phy->latency; - memset(&tx_buf[total_length], TPM_DUMMY_BYTE, - nbr_dummy_bytes + tpm_size); + memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, + phy->latency + tpm_size); - phy->spi_xfer.len = total_length + nbr_dummy_bytes + tpm_size; + spi_xfer.len = total_length + phy->latency + tpm_size; /* header + status byte + size of the data + status byte */ - ret = spi_sync_transfer(dev, &phy->spi_xfer, 1); + ret = spi_sync_transfer(dev, &spi_xfer, 1); if (tpm_size > 0 && ret == 0) { - ret = rx_buf[total_length + nbr_dummy_bytes - 1]; + ret = phy->rx_buf[total_length + phy->latency - 1]; - memcpy(tpm_data, rx_buf + total_length + nbr_dummy_bytes, + memcpy(tpm_data, phy->rx_buf + total_length + phy->latency, tpm_size); } return ret; -} /* read8_reg() */ +} /* st33zp24_spi_read8_reg() */ /* * st33zp24_spi_recv @@ -203,13 +199,13 @@ static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, { int ret; - ret = read8_reg(phy_id, tpm_register, tpm_data, tpm_size); + ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size); if (!st33zp24_status_to_errno(ret)) return tpm_size; return ret; } /* st33zp24_spi_recv() */ -static int evaluate_latency(void *phy_id) +static int st33zp24_spi_evaluate_latency(void *phy_id) { struct st33zp24_spi_phy *phy = phy_id; int latency = 1, status = 0; @@ -217,9 +213,15 @@ static int evaluate_latency(void *phy_id) while (!status && latency < MAX_SPI_LATENCY) { phy->latency = latency; - status = read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1); + status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY, + &data, 1); latency++; } + if (status < 0) + return status; + if (latency == MAX_SPI_LATENCY) + return -ENODEV; + return latency - 1; } /* evaluate_latency() */ @@ -228,24 +230,52 @@ static const struct st33zp24_phy_ops spi_phy_ops = { .recv = st33zp24_spi_recv, }; -#ifdef CONFIG_OF -static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) +static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev) { + struct tpm_chip *chip = spi_get_drvdata(spi_dev); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_spi_phy *phy = tpm_dev->phy_id; + struct gpio_desc *gpiod_lpcpd; + struct device *dev = &spi_dev->dev; + + /* Get LPCPD GPIO from ACPI */ + gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1, + GPIOD_OUT_HIGH); + if (IS_ERR(gpiod_lpcpd)) { + dev_err(dev, "Failed to retrieve lpcpd-gpios from acpi.\n"); + phy->io_lpcpd = -1; + /* + * lpcpd pin is not specified. This is not an issue as + * power management can be also managed by TPM specific + * commands. So leave with a success status code. + */ + return 0; + } + + phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd); + + return 0; +} + +static int st33zp24_spi_of_request_resources(struct spi_device *spi_dev) +{ + struct tpm_chip *chip = spi_get_drvdata(spi_dev); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_spi_phy *phy = tpm_dev->phy_id; struct device_node *pp; - struct spi_device *dev = phy->spi_device; int gpio; int ret; - pp = dev->dev.of_node; + pp = spi_dev->dev.of_node; if (!pp) { - dev_err(&dev->dev, "No platform data\n"); + dev_err(&spi_dev->dev, "No platform data\n"); return -ENODEV; } /* Get GPIO from device tree */ gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); if (gpio < 0) { - dev_err(&dev->dev, + dev_err(&spi_dev->dev, "Failed to retrieve lpcpd-gpios from dts.\n"); phy->io_lpcpd = -1; /* @@ -256,26 +286,22 @@ static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) return 0; } /* GPIO request and configuration */ - ret = devm_gpio_request_one(&dev->dev, gpio, + ret = devm_gpio_request_one(&spi_dev->dev, gpio, GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD"); if (ret) { - dev_err(&dev->dev, "Failed to request lpcpd pin\n"); + dev_err(&spi_dev->dev, "Failed to request lpcpd pin\n"); return -ENODEV; } phy->io_lpcpd = gpio; return 0; } -#else -static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy) -{ - return -ENODEV; -} -#endif -static int tpm_stm_spi_request_resources(struct spi_device *dev, - struct st33zp24_spi_phy *phy) +static int st33zp24_spi_request_resources(struct spi_device *dev) { + struct tpm_chip *chip = spi_get_drvdata(dev); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); + struct st33zp24_spi_phy *phy = tpm_dev->phy_id; struct st33zp24_platform_data *pdata; int ret; @@ -303,13 +329,12 @@ static int tpm_stm_spi_request_resources(struct spi_device *dev, } /* - * tpm_st33_spi_probe initialize the TPM device + * st33zp24_spi_probe initialize the TPM device * @param: dev, the spi_device drescription (TPM SPI description). * @return: 0 in case of success. * or a negative value describing the error. */ -static int -tpm_st33_spi_probe(struct spi_device *dev) +static int st33zp24_spi_probe(struct spi_device *dev) { int ret; struct st33zp24_platform_data *pdata; @@ -328,21 +353,23 @@ tpm_st33_spi_probe(struct spi_device *dev) return -ENOMEM; phy->spi_device = dev; + pdata = dev->dev.platform_data; if (!pdata && dev->dev.of_node) { - ret = tpm_stm_spi_of_request_resources(phy); + ret = st33zp24_spi_of_request_resources(dev); if (ret) return ret; } else if (pdata) { - ret = tpm_stm_spi_request_resources(dev, phy); + ret = st33zp24_spi_request_resources(dev); + if (ret) + return ret; + } else if (ACPI_HANDLE(&dev->dev)) { + ret = st33zp24_spi_acpi_request_resources(dev); if (ret) return ret; } - phy->spi_xfer.tx_buf = phy->tx_buf; - phy->spi_xfer.rx_buf = phy->rx_buf; - - phy->latency = evaluate_latency(phy); + phy->latency = st33zp24_spi_evaluate_latency(phy); if (phy->latency <= 0) return -ENODEV; @@ -351,11 +378,11 @@ tpm_st33_spi_probe(struct spi_device *dev) } /* - * tpm_st33_spi_remove remove the TPM device + * st33zp24_spi_remove remove the TPM device * @param: client, the spi_device drescription (TPM SPI description). * @return: 0 in case of success. */ -static int tpm_st33_spi_remove(struct spi_device *dev) +static int st33zp24_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); @@ -368,29 +395,34 @@ static const struct spi_device_id st33zp24_spi_id[] = { }; MODULE_DEVICE_TABLE(spi, st33zp24_spi_id); -#ifdef CONFIG_OF static const struct of_device_id of_st33zp24_spi_match[] = { { .compatible = "st,st33zp24-spi", }, {} }; MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match); -#endif + +static const struct acpi_device_id st33zp24_spi_acpi_match[] = { + {"SMO3324"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match); static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend, st33zp24_pm_resume); -static struct spi_driver tpm_st33_spi_driver = { +static struct spi_driver st33zp24_spi_driver = { .driver = { .name = TPM_ST33_SPI, .pm = &st33zp24_spi_ops, .of_match_table = of_match_ptr(of_st33zp24_spi_match), + .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match), }, - .probe = tpm_st33_spi_probe, - .remove = tpm_st33_spi_remove, + .probe = st33zp24_spi_probe, + .remove = st33zp24_spi_remove, .id_table = st33zp24_spi_id, }; -module_spi_driver(tpm_st33_spi_driver); +module_spi_driver(st33zp24_spi_driver); MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)"); MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 8d626784cd8d..c2ee30451e41 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -1,6 +1,6 @@ /* * STMicroelectronics TPM Linux driver for TPM ST33ZP24 - * Copyright (C) 2009 - 2015 STMicroelectronics + * Copyright (C) 2009 - 2016 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -73,14 +73,6 @@ enum tis_defaults { TIS_LONG_TIMEOUT = 2000, }; -struct st33zp24_dev { - struct tpm_chip *chip; - void *phy_id; - const struct st33zp24_phy_ops *ops; - u32 intrs; - int io_lpcpd; -}; - /* * clear_interruption clear the pending interrupt. * @param: tpm_dev, the tpm device device. @@ -102,11 +94,9 @@ static u8 clear_interruption(struct st33zp24_dev *tpm_dev) */ static void st33zp24_cancel(struct tpm_chip *chip) { - struct st33zp24_dev *tpm_dev; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); - data = TPM_STS_COMMAND_READY; tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); } /* st33zp24_cancel() */ @@ -118,11 +108,9 @@ static void st33zp24_cancel(struct tpm_chip *chip) */ static u8 st33zp24_status(struct tpm_chip *chip) { - struct st33zp24_dev *tpm_dev; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); - tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1); return data; } /* st33zp24_status() */ @@ -134,17 +122,15 @@ static u8 st33zp24_status(struct tpm_chip *chip) */ static int check_locality(struct tpm_chip *chip) { - struct st33zp24_dev *tpm_dev; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; u8 status; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); - status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1); if (status && (data & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) - return chip->vendor.locality; + return tpm_dev->locality; return -EACCES; } /* check_locality() */ @@ -156,27 +142,25 @@ static int check_locality(struct tpm_chip *chip) */ static int request_locality(struct tpm_chip *chip) { + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; long ret; - struct st33zp24_dev *tpm_dev; u8 data; - if (check_locality(chip) == chip->vendor.locality) - return chip->vendor.locality; - - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + if (check_locality(chip) == tpm_dev->locality) + return tpm_dev->locality; data = TPM_ACCESS_REQUEST_USE; ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); if (ret < 0) return ret; - stop = jiffies + chip->vendor.timeout_a; + stop = jiffies + chip->timeout_a; /* Request locality is usually effective after the request */ do { if (check_locality(chip) >= 0) - return chip->vendor.locality; + return tpm_dev->locality; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); @@ -190,10 +174,9 @@ static int request_locality(struct tpm_chip *chip) */ static void release_locality(struct tpm_chip *chip) { - struct st33zp24_dev *tpm_dev; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); data = TPM_ACCESS_ACTIVE_LOCALITY; tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); @@ -206,23 +189,21 @@ static void release_locality(struct tpm_chip *chip) */ static int get_burstcount(struct tpm_chip *chip) { + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; int burstcnt, status; - u8 tpm_reg, temp; - struct st33zp24_dev *tpm_dev; - - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + u8 temp; - stop = jiffies + chip->vendor.timeout_d; + stop = jiffies + chip->timeout_d; do { - tpm_reg = TPM_STS + 1; - status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); + status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1, + &temp, 1); if (status < 0) return -EBUSY; - tpm_reg = TPM_STS + 2; burstcnt = temp; - status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1); + status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2, + &temp, 1); if (status < 0) return -EBUSY; @@ -271,15 +252,13 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; int ret = 0; bool canceled = false; bool condition; u32 cur_intrs; u8 status; - struct st33zp24_dev *tpm_dev; - - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); /* check current status */ status = st33zp24_status(chip); @@ -288,10 +267,10 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, stop = jiffies + timeout; - if (chip->vendor.irq) { + if (chip->flags & TPM_CHIP_FLAG_IRQ) { cur_intrs = tpm_dev->intrs; clear_interruption(tpm_dev); - enable_irq(chip->vendor.irq); + enable_irq(tpm_dev->irq); do { if (ret == -ERESTARTSYS && freezing(current)) @@ -314,7 +293,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, } } while (ret == -ERESTARTSYS && freezing(current)); - disable_irq_nosync(chip->vendor.irq); + disable_irq_nosync(tpm_dev->irq); } else { do { @@ -337,16 +316,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, */ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); int size = 0, burstcnt, len, ret; - struct st33zp24_dev *tpm_dev; - - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); while (size < count && wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, - chip->vendor.timeout_c, - &chip->vendor.read_queue, true) == 0) { + chip->timeout_c, + &tpm_dev->read_queue, true) == 0) { burstcnt = get_burstcount(chip); if (burstcnt < 0) return burstcnt; @@ -370,13 +347,11 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) { struct tpm_chip *chip = dev_id; - struct st33zp24_dev *tpm_dev; - - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); tpm_dev->intrs++; - wake_up_interruptible(&chip->vendor.read_queue); - disable_irq_nosync(chip->vendor.irq); + wake_up_interruptible(&tpm_dev->read_queue); + disable_irq_nosync(tpm_dev->irq); return IRQ_HANDLED; } /* tpm_ioserirq_handler() */ @@ -393,19 +368,17 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, size_t len) { + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u32 status, i, size, ordinal; int burstcnt = 0; int ret; u8 data; - struct st33zp24_dev *tpm_dev; if (!chip) return -EBUSY; if (len < TPM_HEADER_SIZE) return -EBUSY; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); - ret = request_locality(chip); if (ret < 0) return ret; @@ -414,8 +387,8 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, if ((status & TPM_STS_COMMAND_READY) == 0) { st33zp24_cancel(chip); if (wait_for_stat - (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, - &chip->vendor.read_queue, false) < 0) { + (chip, TPM_STS_COMMAND_READY, chip->timeout_b, + &tpm_dev->read_queue, false) < 0) { ret = -ETIME; goto out_err; } @@ -456,12 +429,12 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, if (ret < 0) goto out_err; - if (chip->vendor.irq) { + if (chip->flags & TPM_CHIP_FLAG_IRQ) { ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, tpm_calc_ordinal_duration(chip, ordinal), - &chip->vendor.read_queue, false); + &tpm_dev->read_queue, false); if (ret < 0) goto out_err; } @@ -532,6 +505,7 @@ static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops st33zp24_tpm = { + .flags = TPM_OPS_AUTO_STARTUP, .send = st33zp24_send, .recv = st33zp24_recv, .cancel = st33zp24_cancel, @@ -565,20 +539,20 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, if (!tpm_dev) return -ENOMEM; - TPM_VPRIV(chip) = tpm_dev; tpm_dev->phy_id = phy_id; tpm_dev->ops = ops; + dev_set_drvdata(&chip->dev, tpm_dev); - chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); - chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); + chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - chip->vendor.locality = LOCALITY0; + tpm_dev->locality = LOCALITY0; if (irq) { /* INTERRUPT Setup */ - init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&tpm_dev->read_queue); tpm_dev->intrs = 0; if (request_locality(chip) != LOCALITY0) { @@ -611,16 +585,14 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, if (ret < 0) goto _tpm_clean_answer; - chip->vendor.irq = irq; + tpm_dev->irq = irq; + chip->flags |= TPM_CHIP_FLAG_IRQ; - disable_irq_nosync(chip->vendor.irq); + disable_irq_nosync(tpm_dev->irq); tpm_gen_interrupt(chip); } - tpm_get_timeouts(chip); - tpm_do_selftest(chip); - return tpm_chip_register(chip); _tpm_clean_answer: dev_info(&chip->dev, "TPM initialization fail\n"); @@ -650,10 +622,9 @@ EXPORT_SYMBOL(st33zp24_remove); int st33zp24_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); - struct st33zp24_dev *tpm_dev; - int ret = 0; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); + int ret = 0; if (gpio_is_valid(tpm_dev->io_lpcpd)) gpio_set_value(tpm_dev->io_lpcpd, 0); @@ -672,16 +643,14 @@ EXPORT_SYMBOL(st33zp24_pm_suspend); int st33zp24_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); - struct st33zp24_dev *tpm_dev; + struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); int ret = 0; - tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip); - if (gpio_is_valid(tpm_dev->io_lpcpd)) { gpio_set_value(tpm_dev->io_lpcpd, 1); ret = wait_for_stat(chip, - TPM_STS_VALID, chip->vendor.timeout_b, - &chip->vendor.read_queue, false); + TPM_STS_VALID, chip->timeout_b, + &tpm_dev->read_queue, false); } else { ret = tpm_pm_resume(dev); if (!ret) diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h index c207cebf67dd..6f4a4198af6a 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.h +++ b/drivers/char/tpm/st33zp24/st33zp24.h @@ -1,6 +1,6 @@ /* * STMicroelectronics TPM Linux driver for TPM ST33ZP24 - * Copyright (C) 2009 - 2015 STMicroelectronics + * Copyright (C) 2009 - 2016 STMicroelectronics * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,18 @@ #define TPM_WRITE_DIRECTION 0x80 #define TPM_BUFSIZE 2048 +struct st33zp24_dev { + struct tpm_chip *chip; + void *phy_id; + const struct st33zp24_phy_ops *ops; + int locality; + int irq; + u32 intrs; + int io_lpcpd; + wait_queue_head_t read_queue; +}; + + struct st33zp24_phy_ops { int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 274dd0123237..e5950131bd90 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -29,33 +29,88 @@ #include "tpm.h" #include "tpm_eventlog.h" -static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); -static LIST_HEAD(tpm_chip_list); -static DEFINE_SPINLOCK(driver_lock); +DEFINE_IDR(dev_nums_idr); +static DEFINE_MUTEX(idr_lock); struct class *tpm_class; dev_t tpm_devt; -/* - * tpm_chip_find_get - return tpm_chip for a given chip number - * @chip_num the device number for the chip +/** + * tpm_try_get_ops() - Get a ref to the tpm_chip + * @chip: Chip to ref + * + * The caller must already have some kind of locking to ensure that chip is + * valid. This function will lock the chip so that the ops member can be + * accessed safely. The locking prevents tpm_chip_unregister from + * completing, so it should not be held for long periods. + * + * Returns -ERRNO if the chip could not be got. */ -struct tpm_chip *tpm_chip_find_get(int chip_num) +int tpm_try_get_ops(struct tpm_chip *chip) { - struct tpm_chip *pos, *chip = NULL; + int rc = -EIO; - rcu_read_lock(); - list_for_each_entry_rcu(pos, &tpm_chip_list, list) { - if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) - continue; + get_device(&chip->dev); - if (try_module_get(pos->pdev->driver->owner)) { - chip = pos; - break; - } + down_read(&chip->ops_sem); + if (!chip->ops) + goto out_lock; + + return 0; +out_lock: + up_read(&chip->ops_sem); + put_device(&chip->dev); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_try_get_ops); + +/** + * tpm_put_ops() - Release a ref to the tpm_chip + * @chip: Chip to put + * + * This is the opposite pair to tpm_try_get_ops(). After this returns chip may + * be kfree'd. + */ +void tpm_put_ops(struct tpm_chip *chip) +{ + up_read(&chip->ops_sem); + put_device(&chip->dev); +} +EXPORT_SYMBOL_GPL(tpm_put_ops); + +/** + * tpm_chip_find_get() - return tpm_chip for a given chip number + * @chip_num: id to find + * + * The return'd chip has been tpm_try_get_ops'd and must be released via + * tpm_put_ops + */ +struct tpm_chip *tpm_chip_find_get(int chip_num) +{ + struct tpm_chip *chip, *res = NULL; + int chip_prev; + + mutex_lock(&idr_lock); + + if (chip_num == TPM_ANY_NUM) { + chip_num = 0; + do { + chip_prev = chip_num; + chip = idr_get_next(&dev_nums_idr, &chip_num); + if (chip && !tpm_try_get_ops(chip)) { + res = chip; + break; + } + } while (chip_prev != chip_num); + } else { + chip = idr_find_slowpath(&dev_nums_idr, chip_num); + if (chip && !tpm_try_get_ops(chip)) + res = chip; } - rcu_read_unlock(); - return chip; + + mutex_unlock(&idr_lock); + + return res; } /** @@ -68,24 +123,25 @@ static void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); - spin_lock(&driver_lock); - clear_bit(chip->dev_num, dev_mask); - spin_unlock(&driver_lock); + mutex_lock(&idr_lock); + idr_remove(&dev_nums_idr, chip->dev_num); + mutex_unlock(&idr_lock); + kfree(chip); } /** - * tpmm_chip_alloc() - allocate a new struct tpm_chip instance - * @dev: device to which the chip is associated + * tpm_chip_alloc() - allocate a new struct tpm_chip instance + * @pdev: device to which the chip is associated + * At this point pdev mst be initialized, but does not have to + * be registered * @ops: struct tpm_class_ops instance * * Allocates a new struct tpm_chip instance and assigns a free - * device number for it. Caller does not have to worry about - * freeing the allocated resources. When the devices is removed - * devres calls tpmm_chip_remove() to do the job. + * device number for it. Must be paired with put_device(&chip->dev). */ -struct tpm_chip *tpmm_chip_alloc(struct device *dev, - const struct tpm_class_ops *ops) +struct tpm_chip *tpm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops) { struct tpm_chip *chip; int rc; @@ -95,53 +151,75 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, return ERR_PTR(-ENOMEM); mutex_init(&chip->tpm_mutex); - INIT_LIST_HEAD(&chip->list); + init_rwsem(&chip->ops_sem); chip->ops = ops; - spin_lock(&driver_lock); - chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); - spin_unlock(&driver_lock); - - if (chip->dev_num >= TPM_NUM_DEVICES) { + mutex_lock(&idr_lock); + rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); + mutex_unlock(&idr_lock); + if (rc < 0) { dev_err(dev, "No available tpm device numbers\n"); kfree(chip); - return ERR_PTR(-ENOMEM); + return ERR_PTR(rc); } + chip->dev_num = rc; - set_bit(chip->dev_num, dev_mask); - - scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); - - chip->pdev = dev; - - dev_set_drvdata(dev, chip); + device_initialize(&chip->dev); chip->dev.class = tpm_class; chip->dev.release = tpm_dev_release; - chip->dev.parent = chip->pdev; -#ifdef CONFIG_ACPI + chip->dev.parent = dev; chip->dev.groups = chip->groups; -#endif if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - dev_set_name(&chip->dev, "%s", chip->devname); + rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); + if (rc) + goto out; - device_initialize(&chip->dev); + if (!dev) + chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - chip->cdev.owner = chip->pdev->driver->owner; + chip->cdev.owner = THIS_MODULE; chip->cdev.kobj.parent = &chip->dev.kobj; - rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); - if (rc) { - put_device(&chip->dev); + return chip; + +out: + put_device(&chip->dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(tpm_chip_alloc); + +/** + * tpmm_chip_alloc() - allocate a new struct tpm_chip instance + * @pdev: parent device to which the chip is associated + * @ops: struct tpm_class_ops instance + * + * Same as tpm_chip_alloc except devm is used to do the put_device + */ +struct tpm_chip *tpmm_chip_alloc(struct device *pdev, + const struct tpm_class_ops *ops) +{ + struct tpm_chip *chip; + int rc; + + chip = tpm_chip_alloc(pdev, ops); + if (IS_ERR(chip)) + return chip; + + rc = devm_add_action_or_reset(pdev, + (void (*)(void *)) put_device, + &chip->dev); + if (rc) return ERR_PTR(rc); - } + + dev_set_drvdata(pdev, chip); return chip; } @@ -155,7 +233,7 @@ static int tpm_add_char_device(struct tpm_chip *chip) if (rc) { dev_err(&chip->dev, "unable to cdev_add() %s, major %d, minor %d, err=%d\n", - chip->devname, MAJOR(chip->dev.devt), + dev_name(&chip->dev), MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); return rc; @@ -165,13 +243,18 @@ static int tpm_add_char_device(struct tpm_chip *chip) if (rc) { dev_err(&chip->dev, "unable to device_register() %s, major %d, minor %d, err=%d\n", - chip->devname, MAJOR(chip->dev.devt), + dev_name(&chip->dev), MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); cdev_del(&chip->cdev); return rc; } + /* Make the chip available. */ + mutex_lock(&idr_lock); + idr_replace(&dev_nums_idr, chip, chip->dev_num); + mutex_unlock(&idr_lock); + return rc; } @@ -179,20 +262,28 @@ static void tpm_del_char_device(struct tpm_chip *chip) { cdev_del(&chip->cdev); device_del(&chip->dev); + + /* Make the chip unavailable. */ + mutex_lock(&idr_lock); + idr_replace(&dev_nums_idr, NULL, chip->dev_num); + mutex_unlock(&idr_lock); + + /* Make the driver uncallable. */ + down_write(&chip->ops_sem); + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_shutdown(chip, TPM2_SU_CLEAR); + chip->ops = NULL; + up_write(&chip->ops_sem); } static int tpm1_chip_register(struct tpm_chip *chip) { - int rc; - if (chip->flags & TPM_CHIP_FLAG_TPM2) return 0; - rc = tpm_sysfs_add_device(chip); - if (rc) - return rc; + tpm_sysfs_add_device(chip); - chip->bios_dir = tpm_bios_log_setup(chip->devname); + chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev)); return 0; } @@ -204,10 +295,50 @@ static void tpm1_chip_unregister(struct tpm_chip *chip) if (chip->bios_dir) tpm_bios_log_teardown(chip->bios_dir); +} + +static void tpm_del_legacy_sysfs(struct tpm_chip *chip) +{ + struct attribute **i; + + if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) + return; + + sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); - tpm_sysfs_del_device(chip); + for (i = chip->groups[0]->attrs; *i != NULL; ++i) + sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name); } +/* For compatibility with legacy sysfs paths we provide symlinks from the + * parent dev directory to selected names within the tpm chip directory. Old + * kernel versions created these files directly under the parent. + */ +static int tpm_add_legacy_sysfs(struct tpm_chip *chip) +{ + struct attribute **i; + int rc; + + if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) + return 0; + + rc = __compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); + if (rc && rc != -ENOENT) + return rc; + + /* All the names from tpm-sysfs */ + for (i = chip->groups[0]->attrs; *i != NULL; ++i) { + rc = __compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name); + if (rc) { + tpm_del_legacy_sysfs(chip); + return rc; + } + } + + return 0; +} /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. @@ -223,6 +354,15 @@ int tpm_chip_register(struct tpm_chip *chip) { int rc; + if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) { + if (chip->flags & TPM_CHIP_FLAG_TPM2) + rc = tpm2_auto_startup(chip); + else + rc = tpm1_auto_startup(chip); + if (rc) + return rc; + } + rc = tpm1_chip_register(chip); if (rc) return rc; @@ -230,30 +370,20 @@ int tpm_chip_register(struct tpm_chip *chip) tpm_add_ppi(chip); rc = tpm_add_char_device(chip); - if (rc) - goto out_err; - - /* Make the chip available. */ - spin_lock(&driver_lock); - list_add_tail_rcu(&chip->list, &tpm_chip_list); - spin_unlock(&driver_lock); + if (rc) { + tpm1_chip_unregister(chip); + return rc; + } chip->flags |= TPM_CHIP_FLAG_REGISTERED; - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { - rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj, - &chip->dev.kobj, - "ppi"); - if (rc && rc != -ENOENT) { - tpm_chip_unregister(chip); - return rc; - } + rc = tpm_add_legacy_sysfs(chip); + if (rc) { + tpm_chip_unregister(chip); + return rc; } return 0; -out_err: - tpm1_chip_unregister(chip); - return rc; } EXPORT_SYMBOL_GPL(tpm_chip_register); @@ -264,6 +394,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); * Takes the chip first away from the list of available TPM chips and then * cleans up all the resources reserved by tpm_chip_register(). * + * Once this function returns the driver call backs in 'op's will not be + * running and will no longer start. + * * NOTE: This function should be only called before deinitializing chip * resources. */ @@ -272,13 +405,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) return; - spin_lock(&driver_lock); - list_del_rcu(&chip->list); - spin_unlock(&driver_lock); - synchronize_rcu(); - - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - sysfs_remove_link(&chip->pdev->kobj, "ppi"); + tpm_del_legacy_sysfs(chip); tpm1_chip_unregister(chip); tpm_del_char_device(chip); diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index de0337ebd658..f5d452151c6b 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file) * by the check of is_open variable, which is protected * by driver_lock. */ if (test_and_set_bit(0, &chip->is_open)) { - dev_dbg(chip->pdev, "Another process owns this TPM\n"); + dev_dbg(&chip->dev, "Another process owns this TPM\n"); return -EBUSY; } @@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file) INIT_WORK(&priv->work, timeout_work); file->private_data = priv; - get_device(chip->pdev); return 0; } @@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, return -EFAULT; } - /* atomic tpm command send and result receive */ + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + mutex_unlock(&priv->buffer_mutex); + return -EPIPE; + } out_size = tpm_transmit(priv->chip, priv->data_buffer, sizeof(priv->data_buffer)); + + tpm_put_ops(priv->chip); if (out_size < 0) { mutex_unlock(&priv->buffer_mutex); return out_size; @@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file) file->private_data = NULL; atomic_set(&priv->data_pending, 0); clear_bit(0, &priv->chip->is_open); - put_device(priv->chip->pdev); kfree(priv); return 0; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index e2fa89c88304..1abe2d7a2610 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -319,7 +319,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, duration_idx = tpm_ordinal_duration[ordinal]; if (duration_idx != TPM_UNDEFINED) - duration = chip->vendor.duration[duration_idx]; + duration = chip->duration[duration_idx]; if (duration <= 0) return 2 * 60 * HZ; else @@ -345,7 +345,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, if (count == 0) return -ENODATA; if (count > bufsiz) { - dev_err(chip->pdev, + dev_err(&chip->dev, "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } @@ -354,12 +354,12 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, rc = chip->ops->send(chip, (u8 *) buf, count); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; } - if (chip->vendor.irq) + if (chip->flags & TPM_CHIP_FLAG_IRQ) goto out_recv; if (chip->flags & TPM_CHIP_FLAG_TPM2) @@ -373,7 +373,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, goto out_recv; if (chip->ops->req_canceled(chip, status)) { - dev_err(chip->pdev, "Operation Canceled\n"); + dev_err(&chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; } @@ -383,14 +383,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, } while (time_before(jiffies, stop)); chip->ops->cancel(chip); - dev_err(chip->pdev, "Operation Timed out\n"); + dev_err(&chip->dev, "Operation Timed out\n"); rc = -ETIME; goto out; out_recv: rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); if (rc < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: mutex_unlock(&chip->tpm_mutex); @@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, err = be32_to_cpu(header->return_code); if (err != 0 && desc) - dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, + dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); return err; @@ -432,12 +432,11 @@ static const struct tpm_input_header tpm_getcap_header = { .ordinal = TPM_ORD_GET_CAP }; -ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, +ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap, const char *desc) { struct tpm_cmd_t tpm_cmd; int rc; - struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_getcap_header; if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { @@ -505,15 +504,15 @@ int tpm_get_timeouts(struct tpm_chip *chip) if (chip->flags & TPM_CHIP_FLAG_TPM2) { /* Fixed timeouts for TPM2 */ - chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); - chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); - chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); - chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); - chip->vendor.duration[TPM_SHORT] = + chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); + chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); + chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); + chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); + chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); - chip->vendor.duration[TPM_MEDIUM] = + chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); - chip->vendor.duration[TPM_LONG] = + chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); return 0; } @@ -527,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) if (rc == TPM_ERR_INVALID_POSTINIT) { /* The TPM is not started, we are the first to talk to it. Execute a startup command. */ - dev_info(chip->pdev, "Issuing TPM_STARTUP"); + dev_info(&chip->dev, "Issuing TPM_STARTUP"); if (tpm_startup(chip, TPM_ST_CLEAR)) return rc; @@ -539,7 +538,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) NULL); } if (rc) { - dev_err(chip->pdev, + dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n", rc); goto duration; @@ -561,10 +560,10 @@ int tpm_get_timeouts(struct tpm_chip *chip) * of misreporting. */ if (chip->ops->update_timeouts != NULL) - chip->vendor.timeout_adjusted = + chip->timeout_adjusted = chip->ops->update_timeouts(chip, new_timeout); - if (!chip->vendor.timeout_adjusted) { + if (!chip->timeout_adjusted) { /* Don't overwrite default if value is 0 */ if (new_timeout[0] != 0 && new_timeout[0] < 1000) { int i; @@ -572,13 +571,13 @@ int tpm_get_timeouts(struct tpm_chip *chip) /* timeouts in msec rather usec */ for (i = 0; i != ARRAY_SIZE(new_timeout); i++) new_timeout[i] *= 1000; - chip->vendor.timeout_adjusted = true; + chip->timeout_adjusted = true; } } /* Report adjusted timeouts */ - if (chip->vendor.timeout_adjusted) { - dev_info(chip->pdev, + if (chip->timeout_adjusted) { + dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", old_timeout[0], new_timeout[0], old_timeout[1], new_timeout[1], @@ -586,10 +585,10 @@ int tpm_get_timeouts(struct tpm_chip *chip) old_timeout[3], new_timeout[3]); } - chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]); - chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]); - chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]); - chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]); + chip->timeout_a = usecs_to_jiffies(new_timeout[0]); + chip->timeout_b = usecs_to_jiffies(new_timeout[1]); + chip->timeout_c = usecs_to_jiffies(new_timeout[2]); + chip->timeout_d = usecs_to_jiffies(new_timeout[3]); duration: tpm_cmd.header.in = tpm_getcap_header; @@ -608,11 +607,11 @@ duration: return -EINVAL; duration_cap = &tpm_cmd.params.getcap_out.cap.duration; - chip->vendor.duration[TPM_SHORT] = + chip->duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); - chip->vendor.duration[TPM_MEDIUM] = + chip->duration[TPM_MEDIUM] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); - chip->vendor.duration[TPM_LONG] = + chip->duration[TPM_LONG] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above @@ -620,12 +619,12 @@ duration: * fix up the resulting too-small TPM_SHORT value to make things work. * We also scale the TPM_MEDIUM and -_LONG values by 1000. */ - if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) { - chip->vendor.duration[TPM_SHORT] = HZ; - chip->vendor.duration[TPM_MEDIUM] *= 1000; - chip->vendor.duration[TPM_LONG] *= 1000; - chip->vendor.duration_adjusted = true; - dev_info(chip->pdev, "Adjusting TPM timeout parameters."); + if (chip->duration[TPM_SHORT] < (HZ / 100)) { + chip->duration[TPM_SHORT] = HZ; + chip->duration[TPM_MEDIUM] *= 1000; + chip->duration[TPM_LONG] *= 1000; + chip->duration_adjusted = true; + dev_info(&chip->dev, "Adjusting TPM timeout parameters."); } return 0; } @@ -700,7 +699,7 @@ int tpm_is_tpm2(u32 chip_num) rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -729,7 +728,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) rc = tpm2_pcr_read(chip, pcr_idx, res_buf); else rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); @@ -764,7 +763,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_pcr_extend(chip, pcr_idx, hash); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -774,7 +773,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, "attempting extend a PCR value"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); @@ -815,7 +814,9 @@ int tpm_do_selftest(struct tpm_chip *chip) * around 300ms while the self test is ongoing, keep trying * until the self test duration expires. */ if (rc == -ETIME) { - dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); + dev_info( + &chip->dev, HW_ERR + "TPM command timed out during continue self test"); msleep(delay_msec); continue; } @@ -825,7 +826,7 @@ int tpm_do_selftest(struct tpm_chip *chip) rc = be32_to_cpu(cmd.header.out.return_code); if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { - dev_info(chip->pdev, + dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n", rc); /* TPM is disabled and/or deactivated; driver can * proceed and TPM does handle commands for @@ -842,6 +843,33 @@ int tpm_do_selftest(struct tpm_chip *chip) } EXPORT_SYMBOL_GPL(tpm_do_selftest); +/** + * tpm1_auto_startup - Perform the standard automatic TPM initialization + * sequence + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error. + */ +int tpm1_auto_startup(struct tpm_chip *chip) +{ + int rc; + + rc = tpm_get_timeouts(chip); + if (rc) + goto out; + rc = tpm_do_selftest(chip); + if (rc) { + dev_err(&chip->dev, "TPM self test failed\n"); + goto out; + } + + return rc; +out: + if (rc > 0) + rc = -ENODEV; + return rc; +} + int tpm_send(u32 chip_num, void *cmd, size_t buflen) { struct tpm_chip *chip; @@ -853,7 +881,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_send); @@ -888,7 +916,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, stop = jiffies + timeout; - if (chip->vendor.irq) { + if (chip->flags & TPM_CHIP_FLAG_IRQ) { again: timeout = stop - jiffies; if ((long)timeout <= 0) @@ -978,10 +1006,10 @@ int tpm_pm_suspend(struct device *dev) } if (rc) - dev_err(chip->pdev, + dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n", rc); else if (try > 0) - dev_warn(chip->pdev, "TPM savestate took %dms\n", + dev_warn(&chip->dev, "TPM savestate took %dms\n", try * TPM_TIMEOUT_RETRY); return rc; @@ -1035,7 +1063,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) if (chip->flags & TPM_CHIP_FLAG_TPM2) { err = tpm2_get_random(chip, out, max); - tpm_chip_put(chip); + tpm_put_ops(chip); return err; } @@ -1057,7 +1085,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) num_bytes -= recd; } while (retries-- && total < max); - tpm_chip_put(chip); + tpm_put_ops(chip); return total ? total : -EIO; } EXPORT_SYMBOL_GPL(tpm_get_random); @@ -1083,7 +1111,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_seal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_seal_trusted); @@ -1109,7 +1137,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_unseal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); + return rc; } EXPORT_SYMBOL_GPL(tpm_unseal_trusted); @@ -1136,6 +1165,7 @@ static int __init tpm_init(void) static void __exit tpm_exit(void) { + idr_destroy(&dev_nums_idr); class_destroy(tpm_class); unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); } diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index ee66fd4673f3..b46cf70c8b16 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -36,7 +36,7 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, int i, rc; char *str = buf; - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = to_tpm_chip(dev); tpm_cmd.header.in = tpm_readpubek_header; err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, @@ -92,9 +92,9 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, ssize_t rc; int i, j, num_pcrs; char *str = buf; - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = to_tpm_chip(dev); - rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap, + rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap, "attempting to determine the number of PCRS"); if (rc) return 0; @@ -119,8 +119,8 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent enabled state"); + rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent enabled state"); if (rc) return 0; @@ -135,8 +135,8 @@ static ssize_t active_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent active state"); + rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent active state"); if (rc) return 0; @@ -151,8 +151,8 @@ static ssize_t owned_show(struct device *dev, struct device_attribute *attr, cap_t cap; ssize_t rc; - rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap, - "attempting to determine the owner state"); + rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, + "attempting to determine the owner state"); if (rc) return 0; @@ -167,8 +167,8 @@ static ssize_t temp_deactivated_show(struct device *dev, cap_t cap; ssize_t rc; - rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap, - "attempting to determine the temporary state"); + rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, + "attempting to determine the temporary state"); if (rc) return 0; @@ -180,11 +180,12 @@ static DEVICE_ATTR_RO(temp_deactivated); static ssize_t caps_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); cap_t cap; ssize_t rc; char *str = buf; - rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, + rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer"); if (rc) return 0; @@ -192,8 +193,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, be32_to_cpu(cap.manufacturer_id)); /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ - rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, - "attempting to determine the 1.2 version"); + rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap, + "attempting to determine the 1.2 version"); if (!rc) { str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", @@ -203,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, cap.tpm_version_1_2.revMinor); } else { /* Otherwise just use TPM_STRUCT_VER */ - rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, + rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap, "attempting to determine the 1.1 version"); if (rc) return 0; @@ -222,7 +223,7 @@ static DEVICE_ATTR_RO(caps); static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = to_tpm_chip(dev); if (chip == NULL) return 0; @@ -234,16 +235,16 @@ static DEVICE_ATTR_WO(cancel); static ssize_t durations_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = to_tpm_chip(dev); - if (chip->vendor.duration[TPM_LONG] == 0) + if (chip->duration[TPM_LONG] == 0) return 0; return sprintf(buf, "%d %d %d [%s]\n", - jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), - jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), - jiffies_to_usecs(chip->vendor.duration[TPM_LONG]), - chip->vendor.duration_adjusted + jiffies_to_usecs(chip->duration[TPM_SHORT]), + jiffies_to_usecs(chip->duration[TPM_MEDIUM]), + jiffies_to_usecs(chip->duration[TPM_LONG]), + chip->duration_adjusted ? "adjusted" : "original"); } static DEVICE_ATTR_RO(durations); @@ -251,14 +252,14 @@ static DEVICE_ATTR_RO(durations); static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = to_tpm_chip(dev); return sprintf(buf, "%d %d %d %d [%s]\n", - jiffies_to_usecs(chip->vendor.timeout_a), - jiffies_to_usecs(chip->vendor.timeout_b), - jiffies_to_usecs(chip->vendor.timeout_c), - jiffies_to_usecs(chip->vendor.timeout_d), - chip->vendor.timeout_adjusted + jiffies_to_usecs(chip->timeout_a), + jiffies_to_usecs(chip->timeout_b), + jiffies_to_usecs(chip->timeout_c), + jiffies_to_usecs(chip->timeout_d), + chip->timeout_adjusted ? "adjusted" : "original"); } static DEVICE_ATTR_RO(timeouts); @@ -281,19 +282,12 @@ static const struct attribute_group tpm_dev_group = { .attrs = tpm_dev_attrs, }; -int tpm_sysfs_add_device(struct tpm_chip *chip) +void tpm_sysfs_add_device(struct tpm_chip *chip) { - int err; - err = sysfs_create_group(&chip->pdev->kobj, - &tpm_dev_group); - - if (err) - dev_err(chip->pdev, - "failed to create sysfs attributes, %d\n", err); - return err; -} - -void tpm_sysfs_del_device(struct tpm_chip *chip) -{ - sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); + /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del + * is called before ops is null'd and the sysfs core synchronizes this + * removal so that no callbacks are running or can run again + */ + WARN_ON(chip->groups_cnt != 0); + chip->groups[chip->groups_cnt++] = &tpm_dev_group; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 28b477e8da6a..3e32d5bd2dc6 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -19,6 +19,10 @@ * License. * */ + +#ifndef __TPM_H__ +#define __TPM_H__ + #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> @@ -34,7 +38,7 @@ enum tpm_const { TPM_MINOR = 224, /* officially assigned */ TPM_BUFSIZE = 4096, - TPM_NUM_DEVICES = 256, + TPM_NUM_DEVICES = 65536, TPM_RETRY = 50, /* 5 seconds */ }; @@ -128,33 +132,6 @@ enum tpm2_startup_types { TPM2_SU_STATE = 0x0001, }; -struct tpm_chip; - -struct tpm_vendor_specific { - void __iomem *iobase; /* ioremapped address */ - unsigned long base; /* TPM base address */ - - int irq; - - int region_size; - int have_region; - - struct list_head list; - int locality; - unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ - bool timeout_adjusted; - unsigned long duration[3]; /* jiffies */ - bool duration_adjusted; - void *priv; - - wait_queue_head_t read_queue; - wait_queue_head_t int_queue; - - u16 manufacturer_id; -}; - -#define TPM_VPRIV(c) ((c)->vendor.priv) - #define TPM_VID_INTEL 0x8086 #define TPM_VID_WINBOND 0x1050 #define TPM_VID_STM 0x104A @@ -164,44 +141,48 @@ struct tpm_vendor_specific { enum tpm_chip_flags { TPM_CHIP_FLAG_REGISTERED = BIT(0), TPM_CHIP_FLAG_TPM2 = BIT(1), + TPM_CHIP_FLAG_IRQ = BIT(2), + TPM_CHIP_FLAG_VIRTUAL = BIT(3), }; struct tpm_chip { - struct device *pdev; /* Device stuff */ struct device dev; struct cdev cdev; + /* A driver callback under ops cannot be run unless ops_sem is held + * (sometimes implicitly, eg for the sysfs code). ops becomes null + * when the driver is unregistered, see tpm_try_get_ops. + */ + struct rw_semaphore ops_sem; const struct tpm_class_ops *ops; + unsigned int flags; int dev_num; /* /dev/tpm# */ - char devname[7]; unsigned long is_open; /* only one allowed */ - int time_expired; struct mutex tpm_mutex; /* tpm is processing */ - struct tpm_vendor_specific vendor; + unsigned long timeout_a; /* jiffies */ + unsigned long timeout_b; /* jiffies */ + unsigned long timeout_c; /* jiffies */ + unsigned long timeout_d; /* jiffies */ + bool timeout_adjusted; + unsigned long duration[3]; /* jiffies */ + bool duration_adjusted; struct dentry **bios_dir; -#ifdef CONFIG_ACPI - const struct attribute_group *groups[2]; + const struct attribute_group *groups[3]; unsigned int groups_cnt; +#ifdef CONFIG_ACPI acpi_handle acpi_dev_handle; char ppi_version[TPM_PPI_VERSION_LEN + 1]; #endif /* CONFIG_ACPI */ - - struct list_head list; }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) -static inline void tpm_chip_put(struct tpm_chip *chip) -{ - module_put(chip->pdev->driver->owner); -} - static inline int tpm_read_index(int base, int index) { outb(index, base); @@ -493,14 +474,17 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) extern struct class *tpm_class; extern dev_t tpm_devt; extern const struct file_operations tpm_fops; +extern struct idr dev_nums_idr; -ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *); +ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap, + const char *desc); ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, size_t bufsiz); ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len, const char *desc); extern int tpm_get_timeouts(struct tpm_chip *); extern void tpm_gen_interrupt(struct tpm_chip *); +int tpm1_auto_startup(struct tpm_chip *chip); extern int tpm_do_selftest(struct tpm_chip *); extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32); extern int tpm_pm_suspend(struct device *); @@ -509,13 +493,17 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, wait_queue_head_t *, bool); struct tpm_chip *tpm_chip_find_get(int chip_num); -extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, +__must_check int tpm_try_get_ops(struct tpm_chip *chip); +void tpm_put_ops(struct tpm_chip *chip); + +extern struct tpm_chip *tpm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops); +extern struct tpm_chip *tpmm_chip_alloc(struct device *pdev, const struct tpm_class_ops *ops); extern int tpm_chip_register(struct tpm_chip *chip); extern void tpm_chip_unregister(struct tpm_chip *chip); -int tpm_sysfs_add_device(struct tpm_chip *chip); -void tpm_sysfs_del_device(struct tpm_chip *chip); +void tpm_sysfs_add_device(struct tpm_chip *chip); int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); @@ -539,9 +527,9 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); -extern int tpm2_startup(struct tpm_chip *chip, u16 startup_type); +int tpm2_auto_startup(struct tpm_chip *chip); extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32); -extern int tpm2_do_selftest(struct tpm_chip *chip); extern int tpm2_gen_interrupt(struct tpm_chip *chip); extern int tpm2_probe(struct tpm_chip *chip); +#endif diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index b28e4da3d2cf..08c7e23ed535 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -597,7 +597,7 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); if (rc) { - dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n", + dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", handle); return; } @@ -606,7 +606,7 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle) rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context"); if (rc) - dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, + dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle, rc); tpm_buf_destroy(&buf); @@ -703,7 +703,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc); if (!rc) - *value = cmd.params.get_tpm_pt_out.value; + *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value); return rc; } @@ -728,7 +728,7 @@ static const struct tpm_input_header tpm2_startup_header = { * returned it remarks a POSIX error code. If a positive number is returned * it remarks a TPM error. */ -int tpm2_startup(struct tpm_chip *chip, u16 startup_type) +static int tpm2_startup(struct tpm_chip *chip, u16 startup_type) { struct tpm2_cmd cmd; @@ -738,7 +738,6 @@ int tpm2_startup(struct tpm_chip *chip, u16 startup_type) return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), "attempting to start the TPM"); } -EXPORT_SYMBOL_GPL(tpm2_startup); #define TPM2_SHUTDOWN_IN_SIZE \ (sizeof(struct tpm_input_header) + \ @@ -770,10 +769,9 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) * except print the error code on a system failure. */ if (rc < 0) - dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", + dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", rc); } -EXPORT_SYMBOL_GPL(tpm2_shutdown); /* * tpm2_calc_ordinal_duration() - maximum duration for a command @@ -793,7 +791,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST]; if (index != TPM_UNDEFINED) - duration = chip->vendor.duration[index]; + duration = chip->duration[index]; if (duration <= 0) duration = 2 * 60 * HZ; @@ -837,7 +835,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) * immediately. This is a workaround for that. */ if (rc == TPM2_RC_TESTING) { - dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); + dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); rc = 0; } @@ -855,7 +853,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) * returned it remarks a POSIX error code. If a positive number is returned * it remarks a TPM error. */ -int tpm2_do_selftest(struct tpm_chip *chip) +static int tpm2_do_selftest(struct tpm_chip *chip) { int rc; unsigned int loops; @@ -895,7 +893,6 @@ int tpm2_do_selftest(struct tpm_chip *chip) return rc; } -EXPORT_SYMBOL_GPL(tpm2_do_selftest); /** * tpm2_gen_interrupt() - generate an interrupt @@ -943,3 +940,43 @@ int tpm2_probe(struct tpm_chip *chip) return 0; } EXPORT_SYMBOL_GPL(tpm2_probe); + +/** + * tpm2_auto_startup - Perform the standard automatic TPM initialization + * sequence + * @chip: TPM chip to use + * + * Returns 0 on success, < 0 in case of fatal error. + */ +int tpm2_auto_startup(struct tpm_chip *chip) +{ + int rc; + + rc = tpm_get_timeouts(chip); + if (rc) + goto out; + + rc = tpm2_do_selftest(chip); + if (rc != TPM2_RC_INITIALIZE) { + dev_err(&chip->dev, "TPM self test failed\n"); + goto out; + } + + if (rc == TPM2_RC_INITIALIZE) { + rc = tpm2_startup(chip, TPM2_SU_CLEAR); + if (rc) + goto out; + + rc = tpm2_do_selftest(chip); + if (rc) { + dev_err(&chip->dev, "TPM self test failed\n"); + goto out; + } + } + + return rc; +out: + if (rc > 0) + rc = -ENODEV; + return rc; +} diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index dfadad0916a1..0d322ab11faa 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -37,6 +37,7 @@ enum tpm_atmel_read_status { static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) { + struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); u8 status, *hdr = buf; u32 size; int i; @@ -47,12 +48,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) return -EIO; for (i = 0; i < 6; i++) { - status = ioread8(chip->vendor.iobase + 1); + status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading header\n"); + dev_err(&chip->dev, "error reading header\n"); return -EIO; } - *buf++ = ioread8(chip->vendor.iobase); + *buf++ = ioread8(priv->iobase); } /* size of the data received */ @@ -60,12 +61,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) size = be32_to_cpu(*native_size); if (count < size) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Recv size(%d) less than available space\n", size); for (; i < size; i++) { /* clear the waiting data anyway */ - status = ioread8(chip->vendor.iobase + 1); + status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } } @@ -74,19 +75,19 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read all the data available */ for (; i < size; i++) { - status = ioread8(chip->vendor.iobase + 1); + status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } - *buf++ = ioread8(chip->vendor.iobase); + *buf++ = ioread8(priv->iobase); } /* make sure data available is gone */ - status = ioread8(chip->vendor.iobase + 1); + status = ioread8(priv->iobase + 1); if (status & ATML_STATUS_DATA_AVAIL) { - dev_err(chip->pdev, "data available is stuck\n"); + dev_err(&chip->dev, "data available is stuck\n"); return -EIO; } @@ -95,12 +96,13 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) { + struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); int i; - dev_dbg(chip->pdev, "tpm_atml_send:\n"); + dev_dbg(&chip->dev, "tpm_atml_send:\n"); for (i = 0; i < count; i++) { - dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); - iowrite8(buf[i], chip->vendor.iobase); + dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); + iowrite8(buf[i], priv->iobase); } return count; @@ -108,12 +110,16 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) static void tpm_atml_cancel(struct tpm_chip *chip) { - iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1); + struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); + + iowrite8(ATML_STATUS_ABORT, priv->iobase + 1); } static u8 tpm_atml_status(struct tpm_chip *chip) { - return ioread8(chip->vendor.iobase + 1); + struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); + + return ioread8(priv->iobase + 1); } static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status) @@ -136,13 +142,13 @@ static struct platform_device *pdev; static void atml_plat_remove(void) { struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); + struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); if (chip) { tpm_chip_unregister(chip); - if (chip->vendor.have_region) - atmel_release_region(chip->vendor.base, - chip->vendor.region_size); - atmel_put_base_addr(chip->vendor.iobase); + if (priv->have_region) + atmel_release_region(priv->base, priv->region_size); + atmel_put_base_addr(priv->iobase); platform_device_unregister(pdev); } } @@ -163,6 +169,7 @@ static int __init init_atmel(void) int have_region, region_size; unsigned long base; struct tpm_chip *chip; + struct tpm_atmel_priv *priv; rc = platform_driver_register(&atml_drv); if (rc) @@ -183,16 +190,24 @@ static int __init init_atmel(void) goto err_rel_reg; } + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + rc = -ENOMEM; + goto err_unreg_dev; + } + + priv->iobase = iobase; + priv->base = base; + priv->have_region = have_region; + priv->region_size = region_size; + chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel); if (IS_ERR(chip)) { rc = PTR_ERR(chip); goto err_unreg_dev; } - chip->vendor.iobase = iobase; - chip->vendor.base = base; - chip->vendor.have_region = have_region; - chip->vendor.region_size = region_size; + dev_set_drvdata(&chip->dev, priv); rc = tpm_chip_register(chip); if (rc) diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h index 6c831f9466b7..4f96d80cdce9 100644 --- a/drivers/char/tpm/tpm_atmel.h +++ b/drivers/char/tpm/tpm_atmel.h @@ -22,12 +22,19 @@ * */ +struct tpm_atmel_priv { + int region_size; + int have_region; + unsigned long base; + void __iomem *iobase; +}; + #ifdef CONFIG_PPC64 #include <asm/prom.h> -#define atmel_getb(chip, offset) readb(chip->vendor->iobase + offset); -#define atmel_putb(val, chip, offset) writeb(val, chip->vendor->iobase + offset) +#define atmel_getb(priv, offset) readb(priv->iobase + offset) +#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset) #define atmel_request_region request_mem_region #define atmel_release_region release_mem_region @@ -78,8 +85,9 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) return ioremap(*base, *region_size); } #else -#define atmel_getb(chip, offset) inb(chip->vendor->base + offset) -#define atmel_putb(val, chip, offset) outb(val, chip->vendor->base + offset) +#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset) +#define atmel_putb(val, chip, offset) \ + outb(val, atmel_get_priv(chip)->base + offset) #define atmel_request_region request_region #define atmel_release_region release_region /* Atmel definitions */ diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a12b31940344..018c382554ba 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -77,7 +77,6 @@ enum crb_flags { struct crb_priv { unsigned int flags; - struct resource res; void __iomem *iobase; struct crb_control_area __iomem *cca; u8 __iomem *cmd; @@ -88,7 +87,7 @@ static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume); static u8 crb_status(struct tpm_chip *chip) { - struct crb_priv *priv = chip->vendor.priv; + struct crb_priv *priv = dev_get_drvdata(&chip->dev); u8 sts = 0; if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) != @@ -100,7 +99,7 @@ static u8 crb_status(struct tpm_chip *chip) static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct crb_priv *priv = chip->vendor.priv; + struct crb_priv *priv = dev_get_drvdata(&chip->dev); unsigned int expected; /* sanity check */ @@ -140,7 +139,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip) static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) { - struct crb_priv *priv = chip->vendor.priv; + struct crb_priv *priv = dev_get_drvdata(&chip->dev); int rc = 0; if (len > ioread32(&priv->cca->cmd_size)) { @@ -167,7 +166,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) static void crb_cancel(struct tpm_chip *chip) { - struct crb_priv *priv = chip->vendor.priv; + struct crb_priv *priv = dev_get_drvdata(&chip->dev); iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel); @@ -182,13 +181,14 @@ static void crb_cancel(struct tpm_chip *chip) static bool crb_req_canceled(struct tpm_chip *chip, u8 status) { - struct crb_priv *priv = chip->vendor.priv; + struct crb_priv *priv = dev_get_drvdata(&chip->dev); u32 cancel = ioread32(&priv->cca->cancel); return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; } static const struct tpm_class_ops tpm_crb = { + .flags = TPM_OPS_AUTO_STARTUP, .status = crb_status, .recv = crb_recv, .send = crb_send, @@ -201,42 +201,33 @@ static const struct tpm_class_ops tpm_crb = { static int crb_init(struct acpi_device *device, struct crb_priv *priv) { struct tpm_chip *chip; - int rc; chip = tpmm_chip_alloc(&device->dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); - chip->vendor.priv = priv; + dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - rc = tpm_get_timeouts(chip); - if (rc) - return rc; - - rc = tpm2_do_selftest(chip); - if (rc) - return rc; - return tpm_chip_register(chip); } static int crb_check_resource(struct acpi_resource *ares, void *data) { - struct crb_priv *priv = data; + struct resource *io_res = data; struct resource res; if (acpi_dev_resource_memory(ares, &res)) { - priv->res = res; - priv->res.name = NULL; + *io_res = res; + io_res->name = NULL; } return 1; } static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, - u64 start, u32 size) + struct resource *io_res, u64 start, u32 size) { struct resource new_res = { .start = start, @@ -246,53 +237,74 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) - return ERR_PTR(-EINVAL); + return (void __iomem *) ERR_PTR(-EINVAL); - if (!resource_contains(&priv->res, &new_res)) + if (!resource_contains(io_res, &new_res)) return devm_ioremap_resource(dev, &new_res); - return priv->iobase + (new_res.start - priv->res.start); + return priv->iobase + (new_res.start - io_res->start); } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head resources; + struct resource io_res; struct device *dev = &device->dev; - u64 pa; + u64 cmd_pa; + u32 cmd_size; + u64 rsp_pa; + u32 rsp_size; int ret; INIT_LIST_HEAD(&resources); ret = acpi_dev_get_resources(device, &resources, crb_check_resource, - priv); + &io_res); if (ret < 0) return ret; acpi_dev_free_resource_list(&resources); - if (resource_type(&priv->res) != IORESOURCE_MEM) { + if (resource_type(&io_res) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } - priv->iobase = devm_ioremap_resource(dev, &priv->res); + priv->iobase = devm_ioremap_resource(dev, &io_res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); - priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000); + priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address, + sizeof(struct crb_control_area)); if (IS_ERR(priv->cca)) return PTR_ERR(priv->cca); - pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) | - (u64) ioread32(&priv->cca->cmd_pa_low); - priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size)); + cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) | + (u64) ioread32(&priv->cca->cmd_pa_low); + cmd_size = ioread32(&priv->cca->cmd_size); + priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) return PTR_ERR(priv->cmd); - memcpy_fromio(&pa, &priv->cca->rsp_pa, 8); - pa = le64_to_cpu(pa); - priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size)); - return PTR_ERR_OR_ZERO(priv->rsp); + memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8); + rsp_pa = le64_to_cpu(rsp_pa); + rsp_size = ioread32(&priv->cca->rsp_size); + + if (cmd_pa != rsp_pa) { + priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); + return PTR_ERR_OR_ZERO(priv->rsp); + } + + /* According to the PTP specification, overlapping command and response + * buffer sizes must be identical. + */ + if (cmd_size != rsp_size) { + dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); + return -EINVAL; + } + + priv->rsp = priv->cmd; + return 0; } static int crb_acpi_add(struct acpi_device *device) @@ -344,9 +356,6 @@ static int crb_acpi_remove(struct acpi_device *device) struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_CLEAR); - tpm_chip_unregister(chip); return 0; diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index 4e6940acf639..e7228863290e 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c @@ -403,7 +403,7 @@ static int is_bad(void *p) return 0; } -struct dentry **tpm_bios_log_setup(char *name) +struct dentry **tpm_bios_log_setup(const char *name) { struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file; diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h index 267bfbd1b7bb..8de62b09be51 100644 --- a/drivers/char/tpm/tpm_eventlog.h +++ b/drivers/char/tpm/tpm_eventlog.h @@ -77,10 +77,10 @@ int read_log(struct tpm_bios_log *log); #if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \ defined(CONFIG_ACPI) -extern struct dentry **tpm_bios_log_setup(char *); +extern struct dentry **tpm_bios_log_setup(const char *); extern void tpm_bios_log_teardown(struct dentry **); #else -static inline struct dentry **tpm_bios_log_setup(char *name) +static inline struct dentry **tpm_bios_log_setup(const char *name) { return NULL; } diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 8dfb88b9739c..95ce2e9ccdc6 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -51,8 +51,8 @@ struct priv_data { static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) { - struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct priv_data *priv = dev_get_drvdata(&chip->dev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; priv->len = 0; @@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) status = i2c_master_send(client, buf, len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); return status; @@ -70,8 +70,8 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct priv_data *priv = dev_get_drvdata(&chip->dev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); struct tpm_output_header *hdr = (struct tpm_output_header *)priv->buffer; u32 expected_len; @@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) return -ENOMEM; if (priv->len >= expected_len) { - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) } rc = i2c_master_recv(client, buf, expected_len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) static void i2c_atmel_cancel(struct tpm_chip *chip) { - dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); + dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); } static u8 i2c_atmel_read_status(struct tpm_chip *chip) { - struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct priv_data *priv = dev_get_drvdata(&chip->dev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); int rc; /* The TPM fails the I2C read until it is ready, so we do the entire @@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip) /* Once the TPM has completed the command the command remains readable * until another command is issued. */ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s: sts=%d", __func__, rc); if (rc <= 0) return 0; @@ -141,6 +141,7 @@ static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops i2c_atmel = { + .flags = TPM_OPS_AUTO_STARTUP, .status = i2c_atmel_read_status, .recv = i2c_atmel_recv, .send = i2c_atmel_send, @@ -155,6 +156,7 @@ static int i2c_atmel_probe(struct i2c_client *client, { struct tpm_chip *chip; struct device *dev = &client->dev; + struct priv_data *priv; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -163,26 +165,21 @@ static int i2c_atmel_probe(struct i2c_client *client, if (IS_ERR(chip)) return PTR_ERR(chip); - chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), - GFP_KERNEL); - if (!chip->vendor.priv) + priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (!priv) return -ENOMEM; /* Default timeouts */ - chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); - chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); - chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); - chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); - chip->vendor.irq = 0; + chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + dev_set_drvdata(&chip->dev, priv); /* There is no known way to probe for this device, and all version * information seems to be read via TPM commands. Thus we rely on the * TPM startup process in the common code to detect the device. */ - if (tpm_get_timeouts(chip)) - return -ENODEV; - - if (tpm_do_selftest(chip)) - return -ENODEV; return tpm_chip_register(chip); } diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 63d5d22e9e60..62ee44e57ddc 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -66,6 +66,7 @@ enum i2c_chip_type { /* Structure to store I2C TPM specific stuff */ struct tpm_inf_dev { struct i2c_client *client; + int locality; u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ struct tpm_chip *chip; enum i2c_chip_type chip_type; @@ -288,7 +289,7 @@ static int check_locality(struct tpm_chip *chip, int loc) if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { - chip->vendor.locality = loc; + tpm_dev.locality = loc; return loc; } @@ -320,7 +321,7 @@ static int request_locality(struct tpm_chip *chip, int loc) iic_tpm_write(TPM_ACCESS(loc), &buf, 1); /* wait for burstcount */ - stop = jiffies + chip->vendor.timeout_a; + stop = jiffies + chip->timeout_a; do { if (check_locality(chip, loc) >= 0) return loc; @@ -337,7 +338,7 @@ static u8 tpm_tis_i2c_status(struct tpm_chip *chip) u8 i = 0; do { - if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0) + if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0) return 0; i++; @@ -351,7 +352,7 @@ static void tpm_tis_i2c_ready(struct tpm_chip *chip) { /* this causes the current command to be aborted */ u8 buf = TPM_STS_COMMAND_READY; - iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1); + iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1); } static ssize_t get_burstcount(struct tpm_chip *chip) @@ -362,10 +363,10 @@ static ssize_t get_burstcount(struct tpm_chip *chip) /* wait for burstcount */ /* which timeout value, spec has 2 answers (c & d) */ - stop = jiffies + chip->vendor.timeout_d; + stop = jiffies + chip->timeout_d; do { /* Note: STS is little endian */ - if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0) + if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0) burstcnt = 0; else burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0]; @@ -419,7 +420,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) if (burstcnt > (count - size)) burstcnt = count - size; - rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality), + rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality), &(buf[size]), burstcnt); if (rc == 0) size += burstcnt; @@ -446,7 +447,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read first 10 bytes, including tag, paramsize, and result */ size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); + dev_err(&chip->dev, "Unable to read header\n"); goto out; } @@ -459,14 +460,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); + dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } - wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); + wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); + dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } @@ -477,7 +478,7 @@ out: * so we sleep rather than keeping the bus busy */ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); - release_locality(chip, chip->vendor.locality, 0); + release_locality(chip, tpm_dev.locality, 0); return size; } @@ -500,7 +501,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) tpm_tis_i2c_ready(chip); if (wait_for_stat (chip, TPM_STS_COMMAND_READY, - chip->vendor.timeout_b, &status) < 0) { + chip->timeout_b, &status) < 0) { rc = -ETIME; goto out_err; } @@ -516,7 +517,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) if (burstcnt > (len - 1 - count)) burstcnt = len - 1 - count; - rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), + rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), burstcnt); if (rc == 0) count += burstcnt; @@ -530,7 +531,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) } wait_for_stat(chip, TPM_STS_VALID, - chip->vendor.timeout_c, &status); + chip->timeout_c, &status); if ((status & TPM_STS_DATA_EXPECT) == 0) { rc = -EIO; @@ -539,15 +540,15 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) } /* write last byte */ - iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1); - wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); + iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1); + wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); if ((status & TPM_STS_DATA_EXPECT) != 0) { rc = -EIO; goto out_err; } /* go and do it */ - iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1); + iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1); return len; out_err: @@ -556,7 +557,7 @@ out_err: * so we sleep rather than keeping the bus busy */ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); - release_locality(chip, chip->vendor.locality, 0); + release_locality(chip, tpm_dev.locality, 0); return rc; } @@ -566,6 +567,7 @@ static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops tpm_tis_i2c = { + .flags = TPM_OPS_AUTO_STARTUP, .status = tpm_tis_i2c_status, .recv = tpm_tis_i2c_recv, .send = tpm_tis_i2c_send, @@ -585,14 +587,11 @@ static int tpm_tis_i2c_init(struct device *dev) if (IS_ERR(chip)) return PTR_ERR(chip); - /* Disable interrupts */ - chip->vendor.irq = 0; - /* Default timeouts */ - chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); - chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); - chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); + chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); if (request_locality(chip, 0) != 0) { dev_err(dev, "could not request locality\n"); @@ -619,15 +618,11 @@ static int tpm_tis_i2c_init(struct device *dev) dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16); - INIT_LIST_HEAD(&chip->vendor.list); tpm_dev.chip = chip; - tpm_get_timeouts(chip); - tpm_do_selftest(chip); - return tpm_chip_register(chip); out_release: - release_locality(chip, chip->vendor.locality, 1); + release_locality(chip, tpm_dev.locality, 1); tpm_dev.client = NULL; out_err: return rc; @@ -699,7 +694,7 @@ static int tpm_tis_i2c_remove(struct i2c_client *client) struct tpm_chip *chip = tpm_dev.chip; tpm_chip_unregister(chip); - release_locality(chip, chip->vendor.locality, 1); + release_locality(chip, tpm_dev.locality, 1); tpm_dev.client = NULL; return 0; diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 847f1597fe9b..e3a9155ee671 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -1,5 +1,5 @@ -/****************************************************************************** - * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, + /****************************************************************************** + * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX, * based on the TCG TPM Interface Spec version 1.2. * Specifications at www.trustedcomputinggroup.org * @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/i2c.h> +#include <linux/of_device.h> #include "tpm.h" /* I2C interface offsets */ @@ -52,10 +53,13 @@ #define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ #define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ -#define I2C_DRIVER_NAME "tpm_i2c_nuvoton" +#define OF_IS_TPM2 ((void *)1) +#define I2C_IS_TPM2 1 struct priv_data { + int irq; unsigned int intrs; + wait_queue_head_t read_queue; }; static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, @@ -96,13 +100,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, /* read TPM_STS register */ static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; u8 data; status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); if (status <= 0) { - dev_err(chip->pdev, "%s() error return %d\n", __func__, + dev_err(&chip->dev, "%s() error return %d\n", __func__, status); data = TPM_STS_ERR_VAL; } @@ -127,13 +131,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) /* write commandReady to TPM_STS register */ static void i2c_nuvoton_ready(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; /* this causes the current command to be aborted */ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); if (status < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to write TPM_STS.commandReady\n", __func__); } @@ -142,7 +146,7 @@ static void i2c_nuvoton_ready(struct tpm_chip *chip) static int i2c_nuvoton_get_burstcount(struct i2c_client *client, struct tpm_chip *chip) { - unsigned long stop = jiffies + chip->vendor.timeout_d; + unsigned long stop = jiffies + chip->timeout_d; s32 status; int burst_count = -1; u8 data; @@ -163,7 +167,7 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client, } /* - * WPCT301/NPCT501 SINT# supports only dataAvail + * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail * any call to this function which is not waiting for dataAvail will * set queue to NULL to avoid waiting for interrupt */ @@ -176,12 +180,12 @@ static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, u32 timeout, wait_queue_head_t *queue) { - if (chip->vendor.irq && queue) { + if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) { s32 rc; - struct priv_data *priv = chip->vendor.priv; + struct priv_data *priv = dev_get_drvdata(&chip->dev); unsigned int cur_intrs = priv->intrs; - enable_irq(chip->vendor.irq); + enable_irq(priv->irq); rc = wait_event_interruptible_timeout(*queue, cur_intrs != priv->intrs, timeout); @@ -212,7 +216,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, return 0; } while (time_before(jiffies, stop)); } - dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, value); return -ETIMEDOUT; } @@ -231,16 +235,17 @@ static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, static int i2c_nuvoton_recv_data(struct i2c_client *client, struct tpm_chip *chip, u8 *buf, size_t count) { + struct priv_data *priv = dev_get_drvdata(&chip->dev); s32 rc; int burst_count, bytes2read, size = 0; while (size < count && i2c_nuvoton_wait_for_data_avail(chip, - chip->vendor.timeout_c, - &chip->vendor.read_queue) == 0) { + chip->timeout_c, + &priv->read_queue) == 0) { burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to read burstCount=%d\n", __func__, burst_count); return -EIO; @@ -249,12 +254,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, bytes2read, &buf[size]); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail on i2c_nuvoton_read_buf()=%d\n", __func__, rc); return -EIO; } - dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); + dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); size += bytes2read; } @@ -264,7 +269,8 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, /* Read TPM command results */ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct device *dev = chip->pdev; + struct priv_data *priv = dev_get_drvdata(&chip->dev); + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; int expected, status, burst_count, retries, size = 0; @@ -285,7 +291,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * tag, paramsize, and result */ status = i2c_nuvoton_wait_for_data_avail( - chip, chip->vendor.timeout_c, &chip->vendor.read_queue); + chip, chip->timeout_c, &priv->read_queue); if (status != 0) { dev_err(dev, "%s() timeout on dataAvail\n", __func__); size = -ETIMEDOUT; @@ -325,7 +331,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) } if (i2c_nuvoton_wait_for_stat( chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, - TPM_STS_VALID, chip->vendor.timeout_c, + TPM_STS_VALID, chip->timeout_c, NULL)) { dev_err(dev, "%s() error left over data\n", __func__); size = -ETIMEDOUT; @@ -334,7 +340,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) break; } i2c_nuvoton_ready(chip); - dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); + dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); return size; } @@ -347,7 +353,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) */ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) { - struct device *dev = chip->pdev; + struct priv_data *priv = dev_get_drvdata(&chip->dev); + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; size_t count = 0; @@ -357,7 +364,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) i2c_nuvoton_ready(chip); if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, TPM_STS_COMMAND_READY, - chip->vendor.timeout_b, NULL)) { + chip->timeout_b, NULL)) { dev_err(dev, "%s() timeout on commandReady\n", __func__); rc = -EIO; @@ -389,7 +396,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) TPM_STS_EXPECT, TPM_STS_VALID | TPM_STS_EXPECT, - chip->vendor.timeout_c, + chip->timeout_c, NULL); if (rc < 0) { dev_err(dev, "%s() timeout on Expect\n", @@ -414,7 +421,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) rc = i2c_nuvoton_wait_for_stat(chip, TPM_STS_VALID | TPM_STS_EXPECT, TPM_STS_VALID, - chip->vendor.timeout_c, NULL); + chip->timeout_c, NULL); if (rc) { dev_err(dev, "%s() timeout on Expect to clear\n", __func__); @@ -439,7 +446,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) rc = i2c_nuvoton_wait_for_data_avail(chip, tpm_calc_ordinal_duration(chip, ordinal), - &chip->vendor.read_queue); + &priv->read_queue); if (rc) { dev_err(dev, "%s() timeout command duration\n", __func__); i2c_nuvoton_ready(chip); @@ -456,6 +463,7 @@ static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) } static const struct tpm_class_ops tpm_i2c = { + .flags = TPM_OPS_AUTO_STARTUP, .status = i2c_nuvoton_read_status, .recv = i2c_nuvoton_recv, .send = i2c_nuvoton_send, @@ -473,11 +481,11 @@ static const struct tpm_class_ops tpm_i2c = { static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) { struct tpm_chip *chip = dev_id; - struct priv_data *priv = chip->vendor.priv; + struct priv_data *priv = dev_get_drvdata(&chip->dev); priv->intrs++; - wake_up(&chip->vendor.read_queue); - disable_irq_nosync(chip->vendor.irq); + wake_up(&priv->read_queue); + disable_irq_nosync(priv->irq); return IRQ_HANDLED; } @@ -521,6 +529,7 @@ static int i2c_nuvoton_probe(struct i2c_client *client, int rc; struct tpm_chip *chip; struct device *dev = &client->dev; + struct priv_data *priv; u32 vid = 0; rc = get_vid(client, &vid); @@ -534,46 +543,56 @@ static int i2c_nuvoton_probe(struct i2c_client *client, if (IS_ERR(chip)) return PTR_ERR(chip); - chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), - GFP_KERNEL); - if (!chip->vendor.priv) + priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (!priv) return -ENOMEM; - init_waitqueue_head(&chip->vendor.read_queue); - init_waitqueue_head(&chip->vendor.int_queue); + if (dev->of_node) { + const struct of_device_id *of_id; + + of_id = of_match_device(dev->driver->of_match_table, dev); + if (of_id && of_id->data == OF_IS_TPM2) + chip->flags |= TPM_CHIP_FLAG_TPM2; + } else + if (id->driver_data == I2C_IS_TPM2) + chip->flags |= TPM_CHIP_FLAG_TPM2; + + init_waitqueue_head(&priv->read_queue); /* Default timeouts */ - chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); - chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); - chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); - chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + dev_set_drvdata(&chip->dev, priv); /* * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT * The IRQ should be set in the i2c_board_info (which is done * automatically in of_i2c_register_devices, for device tree users */ - chip->vendor.irq = client->irq; - - if (chip->vendor.irq) { - dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); - rc = devm_request_irq(dev, chip->vendor.irq, + priv->irq = client->irq; + if (client->irq) { + dev_dbg(dev, "%s() priv->irq\n", __func__); + rc = devm_request_irq(dev, client->irq, i2c_nuvoton_int_handler, IRQF_TRIGGER_LOW, - chip->devname, + dev_name(&chip->dev), chip); if (rc) { dev_err(dev, "%s() Unable to request irq: %d for use\n", - __func__, chip->vendor.irq); - chip->vendor.irq = 0; + __func__, priv->irq); + priv->irq = 0; } else { + chip->flags |= TPM_CHIP_FLAG_IRQ; /* Clear any pending interrupt */ i2c_nuvoton_ready(chip); /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ rc = i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, TPM_STS_COMMAND_READY, - chip->vendor.timeout_b, + chip->timeout_b, NULL); if (rc == 0) { /* @@ -601,25 +620,20 @@ static int i2c_nuvoton_probe(struct i2c_client *client, } } - if (tpm_get_timeouts(chip)) - return -ENODEV; - - if (tpm_do_selftest(chip)) - return -ENODEV; - return tpm_chip_register(chip); } static int i2c_nuvoton_remove(struct i2c_client *client) { - struct device *dev = &(client->dev); - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = i2c_get_clientdata(client); + tpm_chip_unregister(chip); return 0; } static const struct i2c_device_id i2c_nuvoton_id[] = { - {I2C_DRIVER_NAME, 0}, + {"tpm_i2c_nuvoton"}, + {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2}, {} }; MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); @@ -628,6 +642,7 @@ MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); static const struct of_device_id i2c_nuvoton_of_match[] = { {.compatible = "nuvoton,npct501"}, {.compatible = "winbond,wpct301"}, + {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2}, {}, }; MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); @@ -640,7 +655,7 @@ static struct i2c_driver i2c_nuvoton_driver = { .probe = i2c_nuvoton_probe, .remove = i2c_nuvoton_remove, .driver = { - .name = I2C_DRIVER_NAME, + .name = "tpm_i2c_nuvoton", .pm = &i2c_nuvoton_pm_ops, .of_match_table = of_match_ptr(i2c_nuvoton_of_match), }, diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b0a9a9e34241..946025a7413b 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -54,21 +54,6 @@ static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) } /** - * ibmvtpm_get_data - Retrieve ibm vtpm data - * @dev: device struct - * - * Return value: - * vtpm device struct - */ -static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - if (chip) - return (struct ibmvtpm_dev *)TPM_VPRIV(chip); - return NULL; -} - -/** * tpm_ibmvtpm_recv - Receive data after send * @chip: tpm chip struct * @buf: buffer to read @@ -79,12 +64,10 @@ static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev) */ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct ibmvtpm_dev *ibmvtpm; + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); u16 len; int sig; - ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); - if (!ibmvtpm->rtce_buf) { dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); return 0; @@ -122,13 +105,11 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) */ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { - struct ibmvtpm_dev *ibmvtpm; + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); struct ibmvtpm_crq crq; __be64 *word = (__be64 *)&crq; int rc, sig; - ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); - if (!ibmvtpm->rtce_buf) { dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); return 0; @@ -289,8 +270,8 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) */ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) { - struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); - struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev); + struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); int rc = 0; tpm_chip_unregister(chip); @@ -327,7 +308,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) */ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) { - struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); + struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); /* ibmvtpm initializes at probe time, so the data we are * asking for may not be set yet. Estimate that 4K required @@ -348,7 +330,8 @@ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) */ static int tpm_ibmvtpm_suspend(struct device *dev) { - struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); struct ibmvtpm_crq crq; u64 *buf = (u64 *) &crq; int rc = 0; @@ -400,7 +383,8 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) */ static int tpm_ibmvtpm_resume(struct device *dev) { - struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); int rc = 0; do { @@ -643,7 +627,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, crq_q->index = 0; - TPM_VPRIV(chip) = (void *)ibmvtpm; + dev_set_drvdata(&chip->dev, ibmvtpm); spin_lock_init(&ibmvtpm->rtce_lock); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 6c488e635fdd..e3cf9f3545c5 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) - dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) - dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; @@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; - dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", + dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); @@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip) static void tpm_wtx_abort(struct tpm_chip *chip) { - dev_info(chip->pdev, "Aborting WTX\n"); + dev_info(&chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); @@ -257,7 +257,7 @@ recv_begin: } if (buf[0] != TPM_VL_VER) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } @@ -272,7 +272,7 @@ recv_begin: } if ((size == 0x6D00) && (buf[1] == 0x80)) { - dev_err(chip->pdev, "Error handling on vendor layer!\n"); + dev_err(&chip->dev, "Error handling on vendor layer!\n"); return -EIO; } @@ -284,7 +284,7 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX) { - dev_info(chip->pdev, "WTX-package received\n"); + dev_info(&chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; @@ -295,14 +295,14 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { - dev_info(chip->pdev, "WTX-abort acknowledged\n"); + dev_info(&chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { - dev_err(chip->pdev, "ERROR-package received:\n"); + dev_err(&chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) - dev_err(chip->pdev, + dev_err(&chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; @@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) ret = empty_fifo(chip, 1); if (ret) { - dev_err(chip->pdev, "Timeout while clearing FIFO\n"); + dev_err(&chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 289389ecef84..9ff0e072c476 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -64,15 +64,21 @@ enum tpm_nsc_cmd_mode { NSC_COMMAND_EOC = 0x03, NSC_COMMAND_CANCEL = 0x22 }; + +struct tpm_nsc_priv { + unsigned long base; +}; + /* * Wait for a certain status to appear */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) { + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); unsigned long stop; /* status immediately available check */ - *data = inb(chip->vendor.base + NSC_STATUS); + *data = inb(priv->base + NSC_STATUS); if ((*data & mask) == val) return 0; @@ -80,7 +86,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) stop = jiffies + 10 * HZ; do { msleep(TPM_TIMEOUT); - *data = inb(chip->vendor.base + 1); + *data = inb(priv->base + 1); if ((*data & mask) == val) return 0; } @@ -91,13 +97,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) static int nsc_wait_for_ready(struct tpm_chip *chip) { + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); int status; unsigned long stop; /* status immediately available check */ - status = inb(chip->vendor.base + NSC_STATUS); + status = inb(priv->base + NSC_STATUS); if (status & NSC_STATUS_OBF) - status = inb(chip->vendor.base + NSC_DATA); + status = inb(priv->base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; @@ -105,21 +112,22 @@ static int nsc_wait_for_ready(struct tpm_chip *chip) stop = jiffies + 100; do { msleep(TPM_TIMEOUT); - status = inb(chip->vendor.base + NSC_STATUS); + status = inb(priv->base + NSC_STATUS); if (status & NSC_STATUS_OBF) - status = inb(chip->vendor.base + NSC_DATA); + status = inb(priv->base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; } while (time_before(jiffies, stop)); - dev_info(chip->pdev, "wait for ready failed\n"); + dev_info(&chip->dev, "wait for ready failed\n"); return -EBUSY; } static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) { + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 *buffer = buf; u8 data, *p; u32 size; @@ -129,12 +137,13 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) return -EIO; if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { - dev_err(chip->pdev, "F0 timeout\n"); + dev_err(&chip->dev, "F0 timeout\n"); return -EIO; } - if ((data = - inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { - dev_err(chip->pdev, "not in normal mode (0x%x)\n", + + data = inb(priv->base + NSC_DATA); + if (data != NSC_COMMAND_NORMAL) { + dev_err(&chip->dev, "not in normal mode (0x%x)\n", data); return -EIO; } @@ -143,22 +152,24 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) for (p = buffer; p < &buffer[count]; p++) { if (wait_for_stat (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "OBF timeout (while reading data)\n"); return -EIO; } if (data & NSC_STATUS_F0) break; - *p = inb(chip->vendor.base + NSC_DATA); + *p = inb(priv->base + NSC_DATA); } if ((data & NSC_STATUS_F0) == 0 && (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { - dev_err(chip->pdev, "F0 not set\n"); + dev_err(&chip->dev, "F0 not set\n"); return -EIO; } - if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { - dev_err(chip->pdev, + + data = inb(priv->base + NSC_DATA); + if (data != NSC_COMMAND_EOC) { + dev_err(&chip->dev, "expected end of command(0x%x)\n", data); return -EIO; } @@ -174,6 +185,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) { + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 data; int i; @@ -183,48 +195,52 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) * fix it. Not sure why this is needed, we followed the flow * chart in the manual to the letter. */ - outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); + outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); if (nsc_wait_for_ready(chip) != 0) return -EIO; if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } - outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); + outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND); if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { - dev_err(chip->pdev, "IBR timeout\n"); + dev_err(&chip->dev, "IBR timeout\n"); return -EIO; } for (i = 0; i < count; i++) { if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "IBF timeout (while writing data)\n"); return -EIO; } - outb(buf[i], chip->vendor.base + NSC_DATA); + outb(buf[i], priv->base + NSC_DATA); } if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } - outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); + outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND); return count; } static void tpm_nsc_cancel(struct tpm_chip *chip) { - outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); + + outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); } static u8 tpm_nsc_status(struct tpm_chip *chip) { - return inb(chip->vendor.base + NSC_STATUS); + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); + + return inb(priv->base + NSC_STATUS); } static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status) @@ -247,9 +263,10 @@ static struct platform_device *pdev = NULL; static void tpm_nsc_remove(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); tpm_chip_unregister(chip); - release_region(chip->vendor.base, 2); + release_region(priv->base, 2); } static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); @@ -268,6 +285,7 @@ static int __init init_nsc(void) int nscAddrBase = TPM_ADDR; struct tpm_chip *chip; unsigned long base; + struct tpm_nsc_priv *priv; /* verify that it is a National part (SID) */ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { @@ -301,6 +319,14 @@ static int __init init_nsc(void) if ((rc = platform_device_add(pdev)) < 0) goto err_put_dev; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + rc = -ENOMEM; + goto err_del_dev; + } + + priv->base = base; + if (request_region(base, 2, "tpm_nsc0") == NULL ) { rc = -EBUSY; goto err_del_dev; @@ -312,6 +338,8 @@ static int __init init_nsc(void) goto err_rel_reg; } + dev_set_drvdata(&chip->dev, priv); + rc = tpm_chip_register(chip); if (rc) goto err_rel_reg; @@ -349,8 +377,6 @@ static int __init init_nsc(void) "NSC TPM revision %d\n", tpm_read_index(nscAddrBase, 0x27) & 0x1F); - chip->vendor.base = base; - return 0; err_rel_reg: diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index a507006728e0..eaf5730d79eb 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -29,40 +29,7 @@ #include <linux/acpi.h> #include <linux/freezer.h> #include "tpm.h" - -enum tis_access { - TPM_ACCESS_VALID = 0x80, - TPM_ACCESS_ACTIVE_LOCALITY = 0x20, - TPM_ACCESS_REQUEST_PENDING = 0x04, - TPM_ACCESS_REQUEST_USE = 0x02, -}; - -enum tis_status { - TPM_STS_VALID = 0x80, - TPM_STS_COMMAND_READY = 0x40, - TPM_STS_GO = 0x20, - TPM_STS_DATA_AVAIL = 0x10, - TPM_STS_DATA_EXPECT = 0x08, -}; - -enum tis_int_flags { - TPM_GLOBAL_INT_ENABLE = 0x80000000, - TPM_INTF_BURST_COUNT_STATIC = 0x100, - TPM_INTF_CMD_READY_INT = 0x080, - TPM_INTF_INT_EDGE_FALLING = 0x040, - TPM_INTF_INT_EDGE_RISING = 0x020, - TPM_INTF_INT_LEVEL_LOW = 0x010, - TPM_INTF_INT_LEVEL_HIGH = 0x008, - TPM_INTF_LOCALITY_CHANGE_INT = 0x004, - TPM_INTF_STS_VALID_INT = 0x002, - TPM_INTF_DATA_AVAIL_INT = 0x001, -}; - -enum tis_defaults { - TIS_MEM_LEN = 0x5000, - TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ -}; +#include "tpm_tis_core.h" struct tpm_info { struct resource res; @@ -73,30 +40,30 @@ struct tpm_info { int irq; }; -/* Some timeout values are needed before it is known whether the chip is - * TPM 1.0 or TPM 2.0. - */ -#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) -#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) -#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) -#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) - -#define TPM_ACCESS(l) (0x0000 | ((l) << 12)) -#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) -#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) -#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) -#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) -#define TPM_STS(l) (0x0018 | ((l) << 12)) -#define TPM_STS3(l) (0x001b | ((l) << 12)) -#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) - -#define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) -#define TPM_RID(l) (0x0F04 | ((l) << 12)) - -struct priv_data { - bool irq_tested; +struct tpm_tis_tcg_phy { + struct tpm_tis_data priv; + void __iomem *iobase; }; +static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data) +{ + return container_of(data, struct tpm_tis_tcg_phy, priv); +} + +static bool interrupts = true; +module_param(interrupts, bool, 0444); +MODULE_PARM_DESC(interrupts, "Enable interrupts"); + +static bool itpm; +module_param(itpm, bool, 0444); +MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); + +static bool force; +#ifdef CONFIG_X86 +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); +#endif + #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int has_hid(struct acpi_device *dev, const char *hid) { @@ -120,744 +87,82 @@ static inline int is_itpm(struct acpi_device *dev) } #endif -/* Before we attempt to access the TPM we must see that the valid bit is set. - * The specification says that this bit is 0 at reset and remains 0 until the - * 'TPM has gone through its self test and initialization and has established - * correct values in the other bits.' */ -static int wait_startup(struct tpm_chip *chip, int l) -{ - unsigned long stop = jiffies + chip->vendor.timeout_a; - do { - if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & - TPM_ACCESS_VALID) - return 0; - msleep(TPM_TIMEOUT); - } while (time_before(jiffies, stop)); - return -1; -} - -static int check_locality(struct tpm_chip *chip, int l) -{ - if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & - (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == - (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) - return chip->vendor.locality = l; - - return -1; -} - -static void release_locality(struct tpm_chip *chip, int l, int force) -{ - if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & - (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == - (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) - iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, - chip->vendor.iobase + TPM_ACCESS(l)); -} - -static int request_locality(struct tpm_chip *chip, int l) -{ - unsigned long stop, timeout; - long rc; - - if (check_locality(chip, l) >= 0) - return l; - - iowrite8(TPM_ACCESS_REQUEST_USE, - chip->vendor.iobase + TPM_ACCESS(l)); - - stop = jiffies + chip->vendor.timeout_a; - - if (chip->vendor.irq) { -again: - timeout = stop - jiffies; - if ((long)timeout <= 0) - return -1; - rc = wait_event_interruptible_timeout(chip->vendor.int_queue, - (check_locality - (chip, l) >= 0), - timeout); - if (rc > 0) - return l; - if (rc == -ERESTARTSYS && freezing(current)) { - clear_thread_flag(TIF_SIGPENDING); - goto again; - } - } else { - /* wait for burstcount */ - do { - if (check_locality(chip, l) >= 0) - return l; - msleep(TPM_TIMEOUT); - } - while (time_before(jiffies, stop)); - } - return -1; -} - -static u8 tpm_tis_status(struct tpm_chip *chip) -{ - return ioread8(chip->vendor.iobase + - TPM_STS(chip->vendor.locality)); -} - -static void tpm_tis_ready(struct tpm_chip *chip) -{ - /* this causes the current command to be aborted */ - iowrite8(TPM_STS_COMMAND_READY, - chip->vendor.iobase + TPM_STS(chip->vendor.locality)); -} - -static int get_burstcount(struct tpm_chip *chip) -{ - unsigned long stop; - int burstcnt; - - /* wait for burstcount */ - /* which timeout value, spec has 2 answers (c & d) */ - stop = jiffies + chip->vendor.timeout_d; - do { - burstcnt = ioread8(chip->vendor.iobase + - TPM_STS(chip->vendor.locality) + 1); - burstcnt += ioread8(chip->vendor.iobase + - TPM_STS(chip->vendor.locality) + - 2) << 8; - if (burstcnt) - return burstcnt; - msleep(TPM_TIMEOUT); - } while (time_before(jiffies, stop)); - return -EBUSY; -} - -static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, + u8 *result) { - int size = 0, burstcnt; - while (size < count && - wait_for_tpm_stat(chip, - TPM_STS_DATA_AVAIL | TPM_STS_VALID, - chip->vendor.timeout_c, - &chip->vendor.read_queue, true) - == 0) { - burstcnt = get_burstcount(chip); - for (; burstcnt > 0 && size < count; burstcnt--) - buf[size++] = ioread8(chip->vendor.iobase + - TPM_DATA_FIFO(chip->vendor. - locality)); - } - return size; -} - -static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) -{ - int size = 0; - int expected, status; - - if (count < TPM_HEADER_SIZE) { - size = -EIO; - goto out; - } - - /* read first 10 bytes, including tag, paramsize, and result */ - if ((size = - recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); - goto out; - } - - expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { - size = -EIO; - goto out; - } - - if ((size += - recv_data(chip, &buf[TPM_HEADER_SIZE], - expected - TPM_HEADER_SIZE)) < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); - size = -ETIME; - goto out; - } - - wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, - &chip->vendor.int_queue, false); - status = tpm_tis_status(chip); - if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); - size = -EIO; - goto out; - } - -out: - tpm_tis_ready(chip); - release_locality(chip, chip->vendor.locality, 0); - return size; -} - -static bool itpm; -module_param(itpm, bool, 0444); -MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); - -/* - * If interrupts are used (signaled by an irq set in the vendor structure) - * tpm.c can skip polling for the data to be available as the interrupt is - * waited for here - */ -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) -{ - int rc, status, burstcnt; - size_t count = 0; - - if (request_locality(chip, 0) < 0) - return -EBUSY; - - status = tpm_tis_status(chip); - if ((status & TPM_STS_COMMAND_READY) == 0) { - tpm_tis_ready(chip); - if (wait_for_tpm_stat - (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, - &chip->vendor.int_queue, false) < 0) { - rc = -ETIME; - goto out_err; - } - } - - while (count < len - 1) { - burstcnt = get_burstcount(chip); - for (; burstcnt > 0 && count < len - 1; burstcnt--) { - iowrite8(buf[count], chip->vendor.iobase + - TPM_DATA_FIFO(chip->vendor.locality)); - count++; - } - - wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, - &chip->vendor.int_queue, false); - status = tpm_tis_status(chip); - if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { - rc = -EIO; - goto out_err; - } - } - - /* write last byte */ - iowrite8(buf[count], - chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); - wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, - &chip->vendor.int_queue, false); - status = tpm_tis_status(chip); - if ((status & TPM_STS_DATA_EXPECT) != 0) { - rc = -EIO; - goto out_err; - } + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); + while (len--) + *result++ = ioread8(phy->iobase + addr); return 0; - -out_err: - tpm_tis_ready(chip); - release_locality(chip, chip->vendor.locality, 0); - return rc; -} - -static void disable_interrupts(struct tpm_chip *chip) -{ - u32 intmask; - - intmask = - ioread32(chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor.locality)); - intmask &= ~TPM_GLOBAL_INT_ENABLE; - iowrite32(intmask, - chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor.locality)); - devm_free_irq(chip->pdev, chip->vendor.irq, chip); - chip->vendor.irq = 0; -} - -/* - * If interrupts are used (signaled by an irq set in the vendor structure) - * tpm.c can skip polling for the data to be available as the interrupt is - * waited for here - */ -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) -{ - int rc; - u32 ordinal; - unsigned long dur; - - rc = tpm_tis_send_data(chip, buf, len); - if (rc < 0) - return rc; - - /* go and do it */ - iowrite8(TPM_STS_GO, - chip->vendor.iobase + TPM_STS(chip->vendor.locality)); - - if (chip->vendor.irq) { - ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); - - if (chip->flags & TPM_CHIP_FLAG_TPM2) - dur = tpm2_calc_ordinal_duration(chip, ordinal); - else - dur = tpm_calc_ordinal_duration(chip, ordinal); - - if (wait_for_tpm_stat - (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, - &chip->vendor.read_queue, false) < 0) { - rc = -ETIME; - goto out_err; - } - } - return len; -out_err: - tpm_tis_ready(chip); - release_locality(chip, chip->vendor.locality, 0); - return rc; -} - -static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) -{ - int rc, irq; - struct priv_data *priv = chip->vendor.priv; - - if (!chip->vendor.irq || priv->irq_tested) - return tpm_tis_send_main(chip, buf, len); - - /* Verify receipt of the expected IRQ */ - irq = chip->vendor.irq; - chip->vendor.irq = 0; - rc = tpm_tis_send_main(chip, buf, len); - chip->vendor.irq = irq; - if (!priv->irq_tested) - msleep(1); - if (!priv->irq_tested) - disable_interrupts(chip); - priv->irq_tested = true; - return rc; } -struct tis_vendor_timeout_override { - u32 did_vid; - unsigned long timeout_us[4]; -}; - -static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { - /* Atmel 3204 */ - { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), - (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, -}; - -static bool tpm_tis_update_timeouts(struct tpm_chip *chip, - unsigned long *timeout_cap) +static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, + u8 *value) { - int i; - u32 did_vid; + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); - - for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { - if (vendor_timeout_overrides[i].did_vid != did_vid) - continue; - memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, - sizeof(vendor_timeout_overrides[i].timeout_us)); - return true; - } - - return false; + while (len--) + iowrite8(*value++, phy->iobase + addr); + return 0; } -/* - * Early probing for iTPM with STS_DATA_EXPECT flaw. - * Try sending command without itpm flag set and if that - * fails, repeat with itpm flag set. - */ -static int probe_itpm(struct tpm_chip *chip) +static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result) { - int rc = 0; - u8 cmd_getticks[] = { - 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, - 0x00, 0x00, 0x00, 0xf1 - }; - size_t len = sizeof(cmd_getticks); - bool rem_itpm = itpm; - u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); - - /* probe only iTPMS */ - if (vendor != TPM_VID_INTEL) - return 0; - - itpm = false; - - rc = tpm_tis_send_data(chip, cmd_getticks, len); - if (rc == 0) - goto out; - - tpm_tis_ready(chip); - release_locality(chip, chip->vendor.locality, 0); + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - itpm = true; - - rc = tpm_tis_send_data(chip, cmd_getticks, len); - if (rc == 0) { - dev_info(chip->pdev, "Detected an iTPM.\n"); - rc = 1; - } else - rc = -EFAULT; - -out: - itpm = rem_itpm; - tpm_tis_ready(chip); - release_locality(chip, chip->vendor.locality, 0); - - return rc; + *result = ioread16(phy->iobase + addr); + return 0; } -static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) +static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result) { - switch (chip->vendor.manufacturer_id) { - case TPM_VID_WINBOND: - return ((status == TPM_STS_VALID) || - (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); - case TPM_VID_STM: - return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); - default: - return (status == TPM_STS_COMMAND_READY); - } -} - -static const struct tpm_class_ops tpm_tis = { - .status = tpm_tis_status, - .recv = tpm_tis_recv, - .send = tpm_tis_send, - .cancel = tpm_tis_ready, - .update_timeouts = tpm_tis_update_timeouts, - .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, - .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, - .req_canceled = tpm_tis_req_canceled, -}; + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); -static irqreturn_t tis_int_handler(int dummy, void *dev_id) -{ - struct tpm_chip *chip = dev_id; - u32 interrupt; - int i; - - interrupt = ioread32(chip->vendor.iobase + - TPM_INT_STATUS(chip->vendor.locality)); - - if (interrupt == 0) - return IRQ_NONE; - - ((struct priv_data *)chip->vendor.priv)->irq_tested = true; - if (interrupt & TPM_INTF_DATA_AVAIL_INT) - wake_up_interruptible(&chip->vendor.read_queue); - if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) - for (i = 0; i < 5; i++) - if (check_locality(chip, i) >= 0) - break; - if (interrupt & - (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | - TPM_INTF_CMD_READY_INT)) - wake_up_interruptible(&chip->vendor.int_queue); - - /* Clear interrupts handled with TPM_EOI */ - iowrite32(interrupt, - chip->vendor.iobase + - TPM_INT_STATUS(chip->vendor.locality)); - ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); - return IRQ_HANDLED; + *result = ioread32(phy->iobase + addr); + return 0; } -/* Register the IRQ and issue a command that will cause an interrupt. If an - * irq is seen then leave the chip setup for IRQ operation, otherwise reverse - * everything and leave in polling mode. Returns 0 on success. - */ -static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, - int flags, int irq) +static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { - struct priv_data *priv = chip->vendor.priv; - u8 original_int_vec; - - if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags, - chip->devname, chip) != 0) { - dev_info(chip->pdev, "Unable to request irq: %d for probe\n", - irq); - return -1; - } - chip->vendor.irq = irq; - - original_int_vec = ioread8(chip->vendor.iobase + - TPM_INT_VECTOR(chip->vendor.locality)); - iowrite8(irq, - chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); - - /* Clear all existing */ - iowrite32(ioread32(chip->vendor.iobase + - TPM_INT_STATUS(chip->vendor.locality)), - chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); - - /* Turn on */ - iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, - chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); - - priv->irq_tested = false; - - /* Generate an interrupt by having the core call through to - * tpm_tis_send - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_gen_interrupt(chip); - else - tpm_gen_interrupt(chip); - - /* tpm_tis_send will either confirm the interrupt is working or it - * will call disable_irq which undoes all of the above. - */ - if (!chip->vendor.irq) { - iowrite8(original_int_vec, - chip->vendor.iobase + - TPM_INT_VECTOR(chip->vendor.locality)); - return 1; - } + struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); + iowrite32(value, phy->iobase + addr); return 0; } -/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that - * do not have ACPI/etc. We typically expect the interrupt to be declared if - * present. - */ -static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) -{ - u8 original_int_vec; - int i; - - original_int_vec = ioread8(chip->vendor.iobase + - TPM_INT_VECTOR(chip->vendor.locality)); - - if (!original_int_vec) { - if (IS_ENABLED(CONFIG_X86)) - for (i = 3; i <= 15; i++) - if (!tpm_tis_probe_irq_single(chip, intmask, 0, - i)) - return; - } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, - original_int_vec)) - return; -} - -static bool interrupts = true; -module_param(interrupts, bool, 0444); -MODULE_PARM_DESC(interrupts, "Enable interrupts"); - -static void tpm_tis_remove(struct tpm_chip *chip) -{ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_CLEAR); - - iowrite32(~TPM_GLOBAL_INT_ENABLE & - ioread32(chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor. - locality)), - chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor.locality)); - release_locality(chip, chip->vendor.locality, 1); -} +static const struct tpm_tis_phy_ops tpm_tcg = { + .read_bytes = tpm_tcg_read_bytes, + .write_bytes = tpm_tcg_write_bytes, + .read16 = tpm_tcg_read16, + .read32 = tpm_tcg_read32, + .write32 = tpm_tcg_write32, +}; static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, acpi_handle acpi_dev_handle) { - u32 vendor, intfcaps, intmask; - int rc, probe; - struct tpm_chip *chip; - struct priv_data *priv; + struct tpm_tis_tcg_phy *phy; + int irq = -1; - priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); - if (priv == NULL) + phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL); + if (phy == NULL) return -ENOMEM; - chip = tpmm_chip_alloc(dev, &tpm_tis); - if (IS_ERR(chip)) - return PTR_ERR(chip); - - chip->vendor.priv = priv; -#ifdef CONFIG_ACPI - chip->acpi_dev_handle = acpi_dev_handle; -#endif + phy->iobase = devm_ioremap_resource(dev, &tpm_info->res); + if (IS_ERR(phy->iobase)) + return PTR_ERR(phy->iobase); - chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res); - if (IS_ERR(chip->vendor.iobase)) - return PTR_ERR(chip->vendor.iobase); - - /* Maximum timeouts */ - chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX; - chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX; - chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX; - chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX; - - if (wait_startup(chip, 0) != 0) { - rc = -ENODEV; - goto out_err; - } - - /* Take control of the TPM's interrupt hardware and shut it off */ - intmask = ioread32(chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor.locality)); - intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | - TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; - intmask &= ~TPM_GLOBAL_INT_ENABLE; - iowrite32(intmask, - chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); - - if (request_locality(chip, 0) != 0) { - rc = -ENODEV; - goto out_err; - } - - rc = tpm2_probe(chip); - if (rc) - goto out_err; - - vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); - chip->vendor.manufacturer_id = vendor; - - dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", - (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", - vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); - - if (!itpm) { - probe = probe_itpm(chip); - if (probe < 0) { - rc = -ENODEV; - goto out_err; - } - itpm = !!probe; - } + if (interrupts) + irq = tpm_info->irq; if (itpm) - dev_info(dev, "Intel iTPM workaround enabled\n"); - - - /* Figure out the capabilities */ - intfcaps = - ioread32(chip->vendor.iobase + - TPM_INTF_CAPS(chip->vendor.locality)); - dev_dbg(dev, "TPM interface capabilities (0x%x):\n", - intfcaps); - if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) - dev_dbg(dev, "\tBurst Count Static\n"); - if (intfcaps & TPM_INTF_CMD_READY_INT) - dev_dbg(dev, "\tCommand Ready Int Support\n"); - if (intfcaps & TPM_INTF_INT_EDGE_FALLING) - dev_dbg(dev, "\tInterrupt Edge Falling\n"); - if (intfcaps & TPM_INTF_INT_EDGE_RISING) - dev_dbg(dev, "\tInterrupt Edge Rising\n"); - if (intfcaps & TPM_INTF_INT_LEVEL_LOW) - dev_dbg(dev, "\tInterrupt Level Low\n"); - if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) - dev_dbg(dev, "\tInterrupt Level High\n"); - if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) - dev_dbg(dev, "\tLocality Change Int Support\n"); - if (intfcaps & TPM_INTF_STS_VALID_INT) - dev_dbg(dev, "\tSts Valid Int Support\n"); - if (intfcaps & TPM_INTF_DATA_AVAIL_INT) - dev_dbg(dev, "\tData Avail Int Support\n"); - - /* Very early on issue a command to the TPM in polling mode to make - * sure it works. May as well use that command to set the proper - * timeouts for the driver. - */ - if (tpm_get_timeouts(chip)) { - dev_err(dev, "Could not get TPM timeouts and durations\n"); - rc = -ENODEV; - goto out_err; - } - - /* INTERRUPT Setup */ - init_waitqueue_head(&chip->vendor.read_queue); - init_waitqueue_head(&chip->vendor.int_queue); - if (interrupts && tpm_info->irq != -1) { - if (tpm_info->irq) { - tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, - tpm_info->irq); - if (!chip->vendor.irq) - dev_err(chip->pdev, FW_BUG - "TPM interrupt not working, polling instead\n"); - } else - tpm_tis_probe_irq(chip, intmask); - } + phy->priv.flags |= TPM_TIS_ITPM_POSSIBLE; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = tpm2_do_selftest(chip); - if (rc == TPM2_RC_INITIALIZE) { - dev_warn(dev, "Firmware has not started TPM\n"); - rc = tpm2_startup(chip, TPM2_SU_CLEAR); - if (!rc) - rc = tpm2_do_selftest(chip); - } - - if (rc) { - dev_err(dev, "TPM self test failed\n"); - if (rc > 0) - rc = -ENODEV; - goto out_err; - } - } else { - if (tpm_do_selftest(chip)) { - dev_err(dev, "TPM self test failed\n"); - rc = -ENODEV; - goto out_err; - } - } - - return tpm_chip_register(chip); -out_err: - tpm_tis_remove(chip); - return rc; + return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg, + acpi_dev_handle); } -#ifdef CONFIG_PM_SLEEP -static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) -{ - u32 intmask; - - /* reenable interrupts that device may have lost or - BIOS/firmware may have disabled */ - iowrite8(chip->vendor.irq, chip->vendor.iobase + - TPM_INT_VECTOR(chip->vendor.locality)); - - intmask = - ioread32(chip->vendor.iobase + - TPM_INT_ENABLE(chip->vendor.locality)); - - intmask |= TPM_INTF_CMD_READY_INT - | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT - | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; - - iowrite32(intmask, - chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); -} - -static int tpm_tis_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - int ret; - - if (chip->vendor.irq) - tpm_tis_reenable_interrupts(chip); - - ret = tpm_pm_resume(dev); - if (ret) - return ret; - - /* TPM 1.2 requires self-test on resume. This function actually returns - * an error code but for unknown reason it isn't handled. - */ - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - tpm_do_selftest(chip); - - return 0; -} -#endif - static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, @@ -1058,12 +363,6 @@ static struct platform_driver tis_drv = { }, }; -static bool force; -#ifdef CONFIG_X86 -module_param(force, bool, 0444); -MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); -#endif - static int tpm_tis_force_device(void) { struct platform_device *pdev; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c new file mode 100644 index 000000000000..d66f51b3648e --- /dev/null +++ b/drivers/char/tpm/tpm_tis_core.c @@ -0,0 +1,835 @@ +/* + * Copyright (C) 2005, 2006 IBM Corporation + * Copyright (C) 2014, 2015 Intel Corporation + * + * Authors: + * Leendert van Doorn <leendert@watson.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * Maintained by: <tpmdd-devel@lists.sourceforge.net> + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This device driver implements the TPM interface as defined in + * the TCG TPM Interface Spec version 1.2, revision 1.0. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pnp.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/acpi.h> +#include <linux/freezer.h> +#include "tpm.h" +#include "tpm_tis_core.h" + +/* Before we attempt to access the TPM we must see that the valid bit is set. + * The specification says that this bit is 0 at reset and remains 0 until the + * 'TPM has gone through its self test and initialization and has established + * correct values in the other bits.' + */ +static int wait_startup(struct tpm_chip *chip, int l) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + unsigned long stop = jiffies + chip->timeout_a; + + do { + int rc; + u8 access; + + rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); + if (rc < 0) + return rc; + + if (access & TPM_ACCESS_VALID) + return 0; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + return -1; +} + +static int check_locality(struct tpm_chip *chip, int l) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc; + u8 access; + + rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); + if (rc < 0) + return rc; + + if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) + return priv->locality = l; + + return -1; +} + +static void release_locality(struct tpm_chip *chip, int l, int force) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc; + u8 access; + + rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); + if (rc < 0) + return; + + if (force || (access & + (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == + (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) + tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); + +} + +static int request_locality(struct tpm_chip *chip, int l) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + unsigned long stop, timeout; + long rc; + + if (check_locality(chip, l) >= 0) + return l; + + rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE); + if (rc < 0) + return rc; + + stop = jiffies + chip->timeout_a; + + if (chip->flags & TPM_CHIP_FLAG_IRQ) { +again: + timeout = stop - jiffies; + if ((long)timeout <= 0) + return -1; + rc = wait_event_interruptible_timeout(priv->int_queue, + (check_locality + (chip, l) >= 0), + timeout); + if (rc > 0) + return l; + if (rc == -ERESTARTSYS && freezing(current)) { + clear_thread_flag(TIF_SIGPENDING); + goto again; + } + } else { + /* wait for burstcount */ + do { + if (check_locality(chip, l) >= 0) + return l; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + } + return -1; +} + +static u8 tpm_tis_status(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc; + u8 status; + + rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status); + if (rc < 0) + return 0; + + return status; +} + +static void tpm_tis_ready(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + + /* this causes the current command to be aborted */ + tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY); +} + +static int get_burstcount(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + unsigned long stop; + int burstcnt, rc; + u32 value; + + /* wait for burstcount */ + /* which timeout value, spec has 2 answers (c & d) */ + stop = jiffies + chip->timeout_d; + do { + rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value); + if (rc < 0) + return rc; + + burstcnt = (value >> 8) & 0xFFFF; + if (burstcnt) + return burstcnt; + msleep(TPM_TIMEOUT); + } while (time_before(jiffies, stop)); + return -EBUSY; +} + +static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int size = 0, burstcnt, rc; + + while (size < count && + wait_for_tpm_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + chip->timeout_c, + &priv->read_queue, true) == 0) { + burstcnt = min_t(int, get_burstcount(chip), count - size); + + rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality), + burstcnt, buf + size); + if (rc < 0) + return rc; + + size += burstcnt; + } + return size; +} + +static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int size = 0; + int expected, status; + + if (count < TPM_HEADER_SIZE) { + size = -EIO; + goto out; + } + + size = recv_data(chip, buf, TPM_HEADER_SIZE); + /* read first 10 bytes, including tag, paramsize, and result */ + if (size < TPM_HEADER_SIZE) { + dev_err(&chip->dev, "Unable to read header\n"); + goto out; + } + + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + size = -EIO; + goto out; + } + + size += recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (size < expected) { + dev_err(&chip->dev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } + + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, + &priv->int_queue, false); + status = tpm_tis_status(chip); + if (status & TPM_STS_DATA_AVAIL) { /* retry? */ + dev_err(&chip->dev, "Error left over data\n"); + size = -EIO; + goto out; + } + +out: + tpm_tis_ready(chip); + release_locality(chip, priv->locality, 0); + return size; +} + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc, status, burstcnt; + size_t count = 0; + bool itpm = priv->flags & TPM_TIS_ITPM_POSSIBLE; + + if (request_locality(chip, 0) < 0) + return -EBUSY; + + status = tpm_tis_status(chip); + if ((status & TPM_STS_COMMAND_READY) == 0) { + tpm_tis_ready(chip); + if (wait_for_tpm_stat + (chip, TPM_STS_COMMAND_READY, chip->timeout_b, + &priv->int_queue, false) < 0) { + rc = -ETIME; + goto out_err; + } + } + + while (count < len - 1) { + burstcnt = min_t(int, get_burstcount(chip), len - count - 1); + rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality), + burstcnt, buf + count); + if (rc < 0) + goto out_err; + + count += burstcnt; + + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, + &priv->int_queue, false); + status = tpm_tis_status(chip); + if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { + rc = -EIO; + goto out_err; + } + } + + /* write last byte */ + rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]); + if (rc < 0) + goto out_err; + + wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, + &priv->int_queue, false); + status = tpm_tis_status(chip); + if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) { + rc = -EIO; + goto out_err; + } + + return 0; + +out_err: + tpm_tis_ready(chip); + release_locality(chip, priv->locality, 0); + return rc; +} + +static void disable_interrupts(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u32 intmask; + int rc; + + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); + if (rc < 0) + intmask = 0; + + intmask &= ~TPM_GLOBAL_INT_ENABLE; + rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + + devm_free_irq(chip->dev.parent, priv->irq, chip); + priv->irq = 0; + chip->flags &= ~TPM_CHIP_FLAG_IRQ; +} + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc; + u32 ordinal; + unsigned long dur; + + rc = tpm_tis_send_data(chip, buf, len); + if (rc < 0) + return rc; + + /* go and do it */ + rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO); + if (rc < 0) + goto out_err; + + if (chip->flags & TPM_CHIP_FLAG_IRQ) { + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + dur = tpm2_calc_ordinal_duration(chip, ordinal); + else + dur = tpm_calc_ordinal_duration(chip, ordinal); + + if (wait_for_tpm_stat + (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, + &priv->read_queue, false) < 0) { + rc = -ETIME; + goto out_err; + } + } + return len; +out_err: + tpm_tis_ready(chip); + release_locality(chip, priv->locality, 0); + return rc; +} + +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc, irq; + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + + if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested) + return tpm_tis_send_main(chip, buf, len); + + /* Verify receipt of the expected IRQ */ + irq = priv->irq; + priv->irq = 0; + chip->flags &= ~TPM_CHIP_FLAG_IRQ; + rc = tpm_tis_send_main(chip, buf, len); + priv->irq = irq; + chip->flags |= TPM_CHIP_FLAG_IRQ; + if (!priv->irq_tested) + msleep(1); + if (!priv->irq_tested) + disable_interrupts(chip); + priv->irq_tested = true; + return rc; +} + +struct tis_vendor_timeout_override { + u32 did_vid; + unsigned long timeout_us[4]; +}; + +static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { + /* Atmel 3204 */ + { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), + (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, +}; + +static bool tpm_tis_update_timeouts(struct tpm_chip *chip, + unsigned long *timeout_cap) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int i, rc; + u32 did_vid; + + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); + if (rc < 0) + return rc; + + for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { + if (vendor_timeout_overrides[i].did_vid != did_vid) + continue; + memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, + sizeof(vendor_timeout_overrides[i].timeout_us)); + return true; + } + + return false; +} + +/* + * Early probing for iTPM with STS_DATA_EXPECT flaw. + * Try sending command without itpm flag set and if that + * fails, repeat with itpm flag set. + */ +static int probe_itpm(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + int rc = 0; + u8 cmd_getticks[] = { + 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0xf1 + }; + size_t len = sizeof(cmd_getticks); + bool itpm; + u16 vendor; + + rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor); + if (rc < 0) + return rc; + + /* probe only iTPMS */ + if (vendor != TPM_VID_INTEL) + return 0; + + itpm = false; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) + goto out; + + tpm_tis_ready(chip); + release_locality(chip, priv->locality, 0); + + itpm = true; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) { + dev_info(&chip->dev, "Detected an iTPM.\n"); + rc = 1; + } else + rc = -EFAULT; + +out: + tpm_tis_ready(chip); + release_locality(chip, priv->locality, 0); + + return rc; +} + +static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + + switch (priv->manufacturer_id) { + case TPM_VID_WINBOND: + return ((status == TPM_STS_VALID) || + (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); + case TPM_VID_STM: + return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); + default: + return (status == TPM_STS_COMMAND_READY); + } +} + +static irqreturn_t tis_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u32 interrupt; + int i, rc; + + rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); + if (rc < 0) + return IRQ_NONE; + + if (interrupt == 0) + return IRQ_NONE; + + priv->irq_tested = true; + if (interrupt & TPM_INTF_DATA_AVAIL_INT) + wake_up_interruptible(&priv->read_queue); + if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) + for (i = 0; i < 5; i++) + if (check_locality(chip, i) >= 0) + break; + if (interrupt & + (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | + TPM_INTF_CMD_READY_INT)) + wake_up_interruptible(&priv->int_queue); + + /* Clear interrupts handled with TPM_EOI */ + rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt); + if (rc < 0) + return IRQ_NONE; + + tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); + return IRQ_HANDLED; +} + +/* Register the IRQ and issue a command that will cause an interrupt. If an + * irq is seen then leave the chip setup for IRQ operation, otherwise reverse + * everything and leave in polling mode. Returns 0 on success. + */ +static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, + int flags, int irq) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u8 original_int_vec; + int rc; + u32 int_status; + + if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags, + dev_name(&chip->dev), chip) != 0) { + dev_info(&chip->dev, "Unable to request irq: %d for probe\n", + irq); + return -1; + } + priv->irq = irq; + + rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), + &original_int_vec); + if (rc < 0) + return rc; + + rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq); + if (rc < 0) + return rc; + + rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status); + if (rc < 0) + return rc; + + /* Clear all existing */ + rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status); + if (rc < 0) + return rc; + + /* Turn on */ + rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), + intmask | TPM_GLOBAL_INT_ENABLE); + if (rc < 0) + return rc; + + priv->irq_tested = false; + + /* Generate an interrupt by having the core call through to + * tpm_tis_send + */ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_gen_interrupt(chip); + else + tpm_gen_interrupt(chip); + + /* tpm_tis_send will either confirm the interrupt is working or it + * will call disable_irq which undoes all of the above. + */ + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { + rc = tpm_tis_write8(priv, original_int_vec, + TPM_INT_VECTOR(priv->locality)); + if (rc < 0) + return rc; + + return 1; + } + + return 0; +} + +/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that + * do not have ACPI/etc. We typically expect the interrupt to be declared if + * present. + */ +static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u8 original_int_vec; + int i, rc; + + rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), + &original_int_vec); + if (rc < 0) + return; + + if (!original_int_vec) { + if (IS_ENABLED(CONFIG_X86)) + for (i = 3; i <= 15; i++) + if (!tpm_tis_probe_irq_single(chip, intmask, 0, + i)) + return; + } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, + original_int_vec)) + return; +} + +void tpm_tis_remove(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u32 reg = TPM_INT_ENABLE(priv->locality); + u32 interrupt; + int rc; + + rc = tpm_tis_read32(priv, reg, &interrupt); + if (rc < 0) + interrupt = 0; + + tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); + release_locality(chip, priv->locality, 1); +} +EXPORT_SYMBOL_GPL(tpm_tis_remove); + +static const struct tpm_class_ops tpm_tis = { + .flags = TPM_OPS_AUTO_STARTUP, + .status = tpm_tis_status, + .recv = tpm_tis_recv, + .send = tpm_tis_send, + .cancel = tpm_tis_ready, + .update_timeouts = tpm_tis_update_timeouts, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = tpm_tis_req_canceled, +}; + +int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, + const struct tpm_tis_phy_ops *phy_ops, + acpi_handle acpi_dev_handle) +{ + u32 vendor, intfcaps, intmask; + u8 rid; + int rc, probe; + struct tpm_chip *chip; + + chip = tpmm_chip_alloc(dev, &tpm_tis); + if (IS_ERR(chip)) + return PTR_ERR(chip); + +#ifdef CONFIG_ACPI + chip->acpi_dev_handle = acpi_dev_handle; +#endif + + /* Maximum timeouts */ + chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX); + chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX); + chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX); + chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX); + priv->phy_ops = phy_ops; + dev_set_drvdata(&chip->dev, priv); + + if (wait_startup(chip, 0) != 0) { + rc = -ENODEV; + goto out_err; + } + + /* Take control of the TPM's interrupt hardware and shut it off */ + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); + if (rc < 0) + goto out_err; + + intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | + TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; + intmask &= ~TPM_GLOBAL_INT_ENABLE; + tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + + if (request_locality(chip, 0) != 0) { + rc = -ENODEV; + goto out_err; + } + + rc = tpm2_probe(chip); + if (rc) + goto out_err; + + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); + if (rc < 0) + goto out_err; + + priv->manufacturer_id = vendor; + + rc = tpm_tis_read8(priv, TPM_RID(0), &rid); + if (rc < 0) + goto out_err; + + dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", + (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", + vendor >> 16, rid); + + if (!(priv->flags & TPM_TIS_ITPM_POSSIBLE)) { + probe = probe_itpm(chip); + if (probe < 0) { + rc = -ENODEV; + goto out_err; + } + + if (!!probe) + priv->flags |= TPM_TIS_ITPM_POSSIBLE; + } + + /* Figure out the capabilities */ + rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); + if (rc < 0) + goto out_err; + + dev_dbg(dev, "TPM interface capabilities (0x%x):\n", + intfcaps); + if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) + dev_dbg(dev, "\tBurst Count Static\n"); + if (intfcaps & TPM_INTF_CMD_READY_INT) + dev_dbg(dev, "\tCommand Ready Int Support\n"); + if (intfcaps & TPM_INTF_INT_EDGE_FALLING) + dev_dbg(dev, "\tInterrupt Edge Falling\n"); + if (intfcaps & TPM_INTF_INT_EDGE_RISING) + dev_dbg(dev, "\tInterrupt Edge Rising\n"); + if (intfcaps & TPM_INTF_INT_LEVEL_LOW) + dev_dbg(dev, "\tInterrupt Level Low\n"); + if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) + dev_dbg(dev, "\tInterrupt Level High\n"); + if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) + dev_dbg(dev, "\tLocality Change Int Support\n"); + if (intfcaps & TPM_INTF_STS_VALID_INT) + dev_dbg(dev, "\tSts Valid Int Support\n"); + if (intfcaps & TPM_INTF_DATA_AVAIL_INT) + dev_dbg(dev, "\tData Avail Int Support\n"); + + /* Very early on issue a command to the TPM in polling mode to make + * sure it works. May as well use that command to set the proper + * timeouts for the driver. + */ + if (tpm_get_timeouts(chip)) { + dev_err(dev, "Could not get TPM timeouts and durations\n"); + rc = -ENODEV; + goto out_err; + } + + /* INTERRUPT Setup */ + init_waitqueue_head(&priv->read_queue); + init_waitqueue_head(&priv->int_queue); + if (irq != -1) { + if (irq) { + tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, + irq); + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) + dev_err(&chip->dev, FW_BUG + "TPM interrupt not working, polling instead\n"); + } else { + tpm_tis_probe_irq(chip, intmask); + } + } + + return tpm_chip_register(chip); +out_err: + tpm_tis_remove(chip); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_tis_core_init); + +#ifdef CONFIG_PM_SLEEP +static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) +{ + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); + u32 intmask; + int rc; + + /* reenable interrupts that device may have lost or + * BIOS/firmware may have disabled + */ + rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); + if (rc < 0) + return; + + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); + if (rc < 0) + return; + + intmask |= TPM_INTF_CMD_READY_INT + | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT + | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; + + tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); +} + +int tpm_tis_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + int ret; + + if (chip->flags & TPM_CHIP_FLAG_IRQ) + tpm_tis_reenable_interrupts(chip); + + ret = tpm_pm_resume(dev); + if (ret) + return ret; + + /* TPM 1.2 requires self-test on resume. This function actually returns + * an error code but for unknown reason it isn't handled. + */ + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) + tpm_do_selftest(chip); + + return 0; +} +EXPORT_SYMBOL_GPL(tpm_tis_resume); +#endif + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h new file mode 100644 index 000000000000..9191aabbf9c2 --- /dev/null +++ b/drivers/char/tpm/tpm_tis_core.h @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2005, 2006 IBM Corporation + * Copyright (C) 2014, 2015 Intel Corporation + * + * Authors: + * Leendert van Doorn <leendert@watson.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * Maintained by: <tpmdd-devel@lists.sourceforge.net> + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This device driver implements the TPM interface as defined in + * the TCG TPM Interface Spec version 1.2, revision 1.0. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#ifndef __TPM_TIS_CORE_H__ +#define __TPM_TIS_CORE_H__ + +#include "tpm.h" + +enum tis_access { + TPM_ACCESS_VALID = 0x80, + TPM_ACCESS_ACTIVE_LOCALITY = 0x20, + TPM_ACCESS_REQUEST_PENDING = 0x04, + TPM_ACCESS_REQUEST_USE = 0x02, +}; + +enum tis_status { + TPM_STS_VALID = 0x80, + TPM_STS_COMMAND_READY = 0x40, + TPM_STS_GO = 0x20, + TPM_STS_DATA_AVAIL = 0x10, + TPM_STS_DATA_EXPECT = 0x08, +}; + +enum tis_int_flags { + TPM_GLOBAL_INT_ENABLE = 0x80000000, + TPM_INTF_BURST_COUNT_STATIC = 0x100, + TPM_INTF_CMD_READY_INT = 0x080, + TPM_INTF_INT_EDGE_FALLING = 0x040, + TPM_INTF_INT_EDGE_RISING = 0x020, + TPM_INTF_INT_LEVEL_LOW = 0x010, + TPM_INTF_INT_LEVEL_HIGH = 0x008, + TPM_INTF_LOCALITY_CHANGE_INT = 0x004, + TPM_INTF_STS_VALID_INT = 0x002, + TPM_INTF_DATA_AVAIL_INT = 0x001, +}; + +enum tis_defaults { + TIS_MEM_LEN = 0x5000, + TIS_SHORT_TIMEOUT = 750, /* ms */ + TIS_LONG_TIMEOUT = 2000, /* 2 sec */ +}; + +/* Some timeout values are needed before it is known whether the chip is + * TPM 1.0 or TPM 2.0. + */ +#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) +#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) +#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) +#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) + +#define TPM_ACCESS(l) (0x0000 | ((l) << 12)) +#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) +#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) +#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) +#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) +#define TPM_STS(l) (0x0018 | ((l) << 12)) +#define TPM_STS3(l) (0x001b | ((l) << 12)) +#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) + +#define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) +#define TPM_RID(l) (0x0F04 | ((l) << 12)) + +enum tpm_tis_flags { + TPM_TIS_ITPM_POSSIBLE = BIT(0), +}; + +struct tpm_tis_data { + u16 manufacturer_id; + int locality; + int irq; + bool irq_tested; + unsigned int flags; + wait_queue_head_t int_queue; + wait_queue_head_t read_queue; + const struct tpm_tis_phy_ops *phy_ops; +}; + +struct tpm_tis_phy_ops { + int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, + u8 *result); + int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, + u8 *value); + int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); + int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); + int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); +}; + +static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr, + u16 len, u8 *result) +{ + return data->phy_ops->read_bytes(data, addr, len, result); +} + +static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result) +{ + return data->phy_ops->read_bytes(data, addr, 1, result); +} + +static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr, + u16 *result) +{ + return data->phy_ops->read16(data, addr, result); +} + +static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, + u32 *result) +{ + return data->phy_ops->read32(data, addr, result); +} + +static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, + u16 len, u8 *value) +{ + return data->phy_ops->write_bytes(data, addr, len, value); +} + +static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value) +{ + return data->phy_ops->write_bytes(data, addr, 1, &value); +} + +static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr, + u32 value) +{ + return data->phy_ops->write32(data, addr, value); +} + +void tpm_tis_remove(struct tpm_chip *chip); +int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, + const struct tpm_tis_phy_ops *phy_ops, + acpi_handle acpi_dev_handle); + +#ifdef CONFIG_PM_SLEEP +int tpm_tis_resume(struct device *dev); +#endif + +#endif diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c new file mode 100644 index 000000000000..dbaad9c681e3 --- /dev/null +++ b/drivers/char/tpm/tpm_tis_spi.c @@ -0,0 +1,272 @@ +/* + * Copyright (C) 2015 Infineon Technologies AG + * Copyright (C) 2016 STMicroelectronics SAS + * + * Authors: + * Peter Huewe <peter.huewe@infineon.com> + * Christophe Ricard <christophe-h.ricard@st.com> + * + * Maintained by: <tpmdd-devel@lists.sourceforge.net> + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This device driver implements the TPM interface as defined in + * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native + * SPI access_. + * + * It is based on the original tpm_tis device driver from Leendert van + * Dorn and Kyleen Hall and Jarko Sakkinnen. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/acpi.h> +#include <linux/freezer.h> + +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/gpio.h> +#include <linux/of_irq.h> +#include <linux/of_gpio.h> +#include <linux/tpm.h> +#include "tpm.h" +#include "tpm_tis_core.h" + +#define MAX_SPI_FRAMESIZE 64 + +struct tpm_tis_spi_phy { + struct tpm_tis_data priv; + struct spi_device *spi_device; + + u8 tx_buf[MAX_SPI_FRAMESIZE + 4]; + u8 rx_buf[MAX_SPI_FRAMESIZE + 4]; +}; + +static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) +{ + return container_of(data, struct tpm_tis_spi_phy, priv); +} + +static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, + u16 len, u8 *result) +{ + struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); + int ret, i; + struct spi_message m; + struct spi_transfer spi_xfer = { + .tx_buf = phy->tx_buf, + .rx_buf = phy->rx_buf, + .len = 4, + }; + + if (len > MAX_SPI_FRAMESIZE) + return -ENOMEM; + + phy->tx_buf[0] = 0x80 | (len - 1); + phy->tx_buf[1] = 0xd4; + phy->tx_buf[2] = (addr >> 8) & 0xFF; + phy->tx_buf[3] = addr & 0xFF; + + spi_xfer.cs_change = 1; + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + + spi_bus_lock(phy->spi_device->master); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + + memset(phy->tx_buf, 0, len); + + /* According to TCG PTP specification, if there is no TPM present at + * all, then the design has a weak pull-up on MISO. If a TPM is not + * present, a pull-up on MISO means that the SB controller sees a 1, + * and will latch in 0xFF on the read. + */ + for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { + spi_xfer.len = 1; + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + } + + spi_xfer.cs_change = 0; + spi_xfer.len = len; + spi_xfer.rx_buf = result; + + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + ret = spi_sync_locked(phy->spi_device, &m); + +exit: + spi_bus_unlock(phy->spi_device->master); + return ret; +} + +static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, + u16 len, u8 *value) +{ + struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); + int ret, i; + struct spi_message m; + struct spi_transfer spi_xfer = { + .tx_buf = phy->tx_buf, + .rx_buf = phy->rx_buf, + .len = 4, + }; + + if (len > MAX_SPI_FRAMESIZE) + return -ENOMEM; + + phy->tx_buf[0] = len - 1; + phy->tx_buf[1] = 0xd4; + phy->tx_buf[2] = (addr >> 8) & 0xFF; + phy->tx_buf[3] = addr & 0xFF; + + spi_xfer.cs_change = 1; + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + + spi_bus_lock(phy->spi_device->master); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + + memset(phy->tx_buf, 0, len); + + /* According to TCG PTP specification, if there is no TPM present at + * all, then the design has a weak pull-up on MISO. If a TPM is not + * present, a pull-up on MISO means that the SB controller sees a 1, + * and will latch in 0xFF on the read. + */ + for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { + spi_xfer.len = 1; + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + } + + spi_xfer.len = len; + spi_xfer.tx_buf = value; + spi_xfer.cs_change = 0; + spi_xfer.tx_buf = value; + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + ret = spi_sync_locked(phy->spi_device, &m); + +exit: + spi_bus_unlock(phy->spi_device->master); + return ret; +} + +static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) +{ + int rc; + + rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result); + if (!rc) + *result = le16_to_cpu(*result); + return rc; +} + +static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result) +{ + int rc; + + rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result); + if (!rc) + *result = le32_to_cpu(*result); + return rc; +} + +static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value) +{ + value = cpu_to_le32(value); + return data->phy_ops->write_bytes(data, addr, sizeof(u32), + (u8 *)&value); +} + +static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { + .read_bytes = tpm_tis_spi_read_bytes, + .write_bytes = tpm_tis_spi_write_bytes, + .read16 = tpm_tis_spi_read16, + .read32 = tpm_tis_spi_read32, + .write32 = tpm_tis_spi_write32, +}; + +static int tpm_tis_spi_probe(struct spi_device *dev) +{ + struct tpm_tis_spi_phy *phy; + + phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), + GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->spi_device = dev; + + return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, + NULL); +} + +static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); + +static int tpm_tis_spi_remove(struct spi_device *dev) +{ + struct tpm_chip *chip = spi_get_drvdata(dev); + + tpm_chip_unregister(chip); + tpm_tis_remove(chip); + return 0; +} + +static const struct spi_device_id tpm_tis_spi_id[] = { + {"tpm_tis_spi", 0}, + {} +}; +MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); + +static const struct of_device_id of_tis_spi_match[] = { + { .compatible = "st,st33htpm-spi", }, + { .compatible = "infineon,slb9670", }, + { .compatible = "tcg,tpm_tis-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, of_tis_spi_match); + +static const struct acpi_device_id acpi_tis_spi_match[] = { + {"SMO0768", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match); + +static struct spi_driver tpm_tis_spi_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "tpm_tis_spi", + .pm = &tpm_tis_pm, + .of_match_table = of_match_ptr(of_tis_spi_match), + .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), + }, + .probe = tpm_tis_spi_probe, + .remove = tpm_tis_spi_remove, + .id_table = tpm_tis_spi_id, +}; +module_spi_driver(tpm_tis_spi_driver); + +MODULE_DESCRIPTION("TPM Driver for native SPI access"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c new file mode 100644 index 000000000000..9a940332c157 --- /dev/null +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -0,0 +1,637 @@ +/* + * Copyright (C) 2015, 2016 IBM Corporation + * + * Author: Stefan Berger <stefanb@us.ibm.com> + * + * Maintained by: <tpmdd-devel@lists.sourceforge.net> + * + * Device driver for vTPM (vTPM proxy driver) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/miscdevice.h> +#include <linux/vtpm_proxy.h> +#include <linux/file.h> +#include <linux/anon_inodes.h> +#include <linux/poll.h> +#include <linux/compat.h> + +#include "tpm.h" + +#define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) + +struct proxy_dev { + struct tpm_chip *chip; + + u32 flags; /* public API flags */ + + wait_queue_head_t wq; + + struct mutex buf_lock; /* protect buffer and flags */ + + long state; /* internal state */ +#define STATE_OPENED_FLAG BIT(0) +#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ + + size_t req_len; /* length of queued TPM request */ + size_t resp_len; /* length of queued TPM response */ + u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ + + struct work_struct work; /* task that retrieves TPM timeouts */ +}; + +/* all supported flags */ +#define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) + +static struct workqueue_struct *workqueue; + +static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); + +/* + * Functions related to 'server side' + */ + +/** + * vtpm_proxy_fops_read - Read TPM commands on 'server side' + * + * Return value: + * Number of bytes read or negative error code + */ +static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + struct proxy_dev *proxy_dev = filp->private_data; + size_t len; + int sig, rc; + + sig = wait_event_interruptible(proxy_dev->wq, + proxy_dev->req_len != 0 || + !(proxy_dev->state & STATE_OPENED_FLAG)); + if (sig) + return -EINTR; + + mutex_lock(&proxy_dev->buf_lock); + + if (!(proxy_dev->state & STATE_OPENED_FLAG)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EPIPE; + } + + len = proxy_dev->req_len; + + if (count < len) { + mutex_unlock(&proxy_dev->buf_lock); + pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", + count, len); + return -EIO; + } + + rc = copy_to_user(buf, proxy_dev->buffer, len); + memset(proxy_dev->buffer, 0, len); + proxy_dev->req_len = 0; + + if (!rc) + proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; + + mutex_unlock(&proxy_dev->buf_lock); + + if (rc) + return -EFAULT; + + return len; +} + +/** + * vtpm_proxy_fops_write - Write TPM responses on 'server side' + * + * Return value: + * Number of bytes read or negative error value + */ +static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, + size_t count, loff_t *off) +{ + struct proxy_dev *proxy_dev = filp->private_data; + + mutex_lock(&proxy_dev->buf_lock); + + if (!(proxy_dev->state & STATE_OPENED_FLAG)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EPIPE; + } + + if (count > sizeof(proxy_dev->buffer) || + !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EIO; + } + + proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; + + proxy_dev->req_len = 0; + + if (copy_from_user(proxy_dev->buffer, buf, count)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EFAULT; + } + + proxy_dev->resp_len = count; + + mutex_unlock(&proxy_dev->buf_lock); + + wake_up_interruptible(&proxy_dev->wq); + + return count; +} + +/* + * vtpm_proxy_fops_poll: Poll status on 'server side' + * + * Return value: + * Poll flags + */ +static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) +{ + struct proxy_dev *proxy_dev = filp->private_data; + unsigned ret; + + poll_wait(filp, &proxy_dev->wq, wait); + + ret = POLLOUT; + + mutex_lock(&proxy_dev->buf_lock); + + if (proxy_dev->req_len) + ret |= POLLIN | POLLRDNORM; + + if (!(proxy_dev->state & STATE_OPENED_FLAG)) + ret |= POLLHUP; + + mutex_unlock(&proxy_dev->buf_lock); + + return ret; +} + +/* + * vtpm_proxy_fops_open - Open vTPM device on 'server side' + * + * Called when setting up the anonymous file descriptor + */ +static void vtpm_proxy_fops_open(struct file *filp) +{ + struct proxy_dev *proxy_dev = filp->private_data; + + proxy_dev->state |= STATE_OPENED_FLAG; +} + +/** + * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open + * + * Call to undo vtpm_proxy_fops_open + */ +static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) +{ + mutex_lock(&proxy_dev->buf_lock); + + proxy_dev->state &= ~STATE_OPENED_FLAG; + + mutex_unlock(&proxy_dev->buf_lock); + + /* no more TPM responses -- wake up anyone waiting for them */ + wake_up_interruptible(&proxy_dev->wq); +} + +/* + * vtpm_proxy_fops_release: Close 'server side' + * + * Return value: + * Always returns 0. + */ +static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) +{ + struct proxy_dev *proxy_dev = filp->private_data; + + filp->private_data = NULL; + + vtpm_proxy_delete_device(proxy_dev); + + return 0; +} + +static const struct file_operations vtpm_proxy_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = vtpm_proxy_fops_read, + .write = vtpm_proxy_fops_write, + .poll = vtpm_proxy_fops_poll, + .release = vtpm_proxy_fops_release, +}; + +/* + * Functions invoked by the core TPM driver to send TPM commands to + * 'server side' and receive responses from there. + */ + +/* + * Called when core TPM driver reads TPM responses from 'server side' + * + * Return value: + * Number of TPM response bytes read, negative error value otherwise + */ +static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); + size_t len; + + /* process gone ? */ + mutex_lock(&proxy_dev->buf_lock); + + if (!(proxy_dev->state & STATE_OPENED_FLAG)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EPIPE; + } + + len = proxy_dev->resp_len; + if (count < len) { + dev_err(&chip->dev, + "Invalid size in recv: count=%zd, resp_len=%zd\n", + count, len); + len = -EIO; + goto out; + } + + memcpy(buf, proxy_dev->buffer, len); + proxy_dev->resp_len = 0; + +out: + mutex_unlock(&proxy_dev->buf_lock); + + return len; +} + +/* + * Called when core TPM driver forwards TPM requests to 'server side'. + * + * Return value: + * 0 in case of success, negative error value otherwise. + */ +static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); + int rc = 0; + + if (count > sizeof(proxy_dev->buffer)) { + dev_err(&chip->dev, + "Invalid size in send: count=%zd, buffer size=%zd\n", + count, sizeof(proxy_dev->buffer)); + return -EIO; + } + + mutex_lock(&proxy_dev->buf_lock); + + if (!(proxy_dev->state & STATE_OPENED_FLAG)) { + mutex_unlock(&proxy_dev->buf_lock); + return -EPIPE; + } + + proxy_dev->resp_len = 0; + + proxy_dev->req_len = count; + memcpy(proxy_dev->buffer, buf, count); + + proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; + + mutex_unlock(&proxy_dev->buf_lock); + + wake_up_interruptible(&proxy_dev->wq); + + return rc; +} + +static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) +{ + /* not supported */ +} + +static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) +{ + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); + + if (proxy_dev->resp_len) + return VTPM_PROXY_REQ_COMPLETE_FLAG; + + return 0; +} + +static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) +{ + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); + bool ret; + + mutex_lock(&proxy_dev->buf_lock); + + ret = !(proxy_dev->state & STATE_OPENED_FLAG); + + mutex_unlock(&proxy_dev->buf_lock); + + return ret; +} + +static const struct tpm_class_ops vtpm_proxy_tpm_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = vtpm_proxy_tpm_op_recv, + .send = vtpm_proxy_tpm_op_send, + .cancel = vtpm_proxy_tpm_op_cancel, + .status = vtpm_proxy_tpm_op_status, + .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, + .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, + .req_canceled = vtpm_proxy_tpm_req_canceled, +}; + +/* + * Code related to the startup of the TPM 2 and startup of TPM 1.2 + + * retrieval of timeouts and durations. + */ + +static void vtpm_proxy_work(struct work_struct *work) +{ + struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, + work); + int rc; + + rc = tpm_chip_register(proxy_dev->chip); + if (rc) + goto err; + + return; + +err: + vtpm_proxy_fops_undo_open(proxy_dev); +} + +/* + * vtpm_proxy_work_stop: make sure the work has finished + * + * This function is useful when user space closed the fd + * while the driver still determines timeouts. + */ +static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) +{ + vtpm_proxy_fops_undo_open(proxy_dev); + flush_work(&proxy_dev->work); +} + +/* + * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization + */ +static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) +{ + queue_work(workqueue, &proxy_dev->work); +} + +/* + * Code related to creation and deletion of device pairs + */ +static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) +{ + struct proxy_dev *proxy_dev; + struct tpm_chip *chip; + int err; + + proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); + if (proxy_dev == NULL) + return ERR_PTR(-ENOMEM); + + init_waitqueue_head(&proxy_dev->wq); + mutex_init(&proxy_dev->buf_lock); + INIT_WORK(&proxy_dev->work, vtpm_proxy_work); + + chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); + if (IS_ERR(chip)) { + err = PTR_ERR(chip); + goto err_proxy_dev_free; + } + dev_set_drvdata(&chip->dev, proxy_dev); + + proxy_dev->chip = chip; + + return proxy_dev; + +err_proxy_dev_free: + kfree(proxy_dev); + + return ERR_PTR(err); +} + +/* + * Undo what has been done in vtpm_create_proxy_dev + */ +static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) +{ + put_device(&proxy_dev->chip->dev); /* frees chip */ + kfree(proxy_dev); +} + +/* + * Create a /dev/tpm%d and 'server side' file descriptor pair + * + * Return value: + * Returns file pointer on success, an error value otherwise + */ +static struct file *vtpm_proxy_create_device( + struct vtpm_proxy_new_dev *vtpm_new_dev) +{ + struct proxy_dev *proxy_dev; + int rc, fd; + struct file *file; + + if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) + return ERR_PTR(-EOPNOTSUPP); + + proxy_dev = vtpm_proxy_create_proxy_dev(); + if (IS_ERR(proxy_dev)) + return ERR_CAST(proxy_dev); + + proxy_dev->flags = vtpm_new_dev->flags; + + /* setup an anonymous file for the server-side */ + fd = get_unused_fd_flags(O_RDWR); + if (fd < 0) { + rc = fd; + goto err_delete_proxy_dev; + } + + file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, + O_RDWR); + if (IS_ERR(file)) { + rc = PTR_ERR(file); + goto err_put_unused_fd; + } + + /* from now on we can unwind with put_unused_fd() + fput() */ + /* simulate an open() on the server side */ + vtpm_proxy_fops_open(file); + + if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) + proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; + + vtpm_proxy_work_start(proxy_dev); + + vtpm_new_dev->fd = fd; + vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); + vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); + vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; + + return file; + +err_put_unused_fd: + put_unused_fd(fd); + +err_delete_proxy_dev: + vtpm_proxy_delete_proxy_dev(proxy_dev); + + return ERR_PTR(rc); +} + +/* + * Counter part to vtpm_create_device. + */ +static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) +{ + vtpm_proxy_work_stop(proxy_dev); + + /* + * A client may hold the 'ops' lock, so let it know that the server + * side shuts down before we try to grab the 'ops' lock when + * unregistering the chip. + */ + vtpm_proxy_fops_undo_open(proxy_dev); + + tpm_chip_unregister(proxy_dev->chip); + + vtpm_proxy_delete_proxy_dev(proxy_dev); +} + +/* + * Code related to the control device /dev/vtpmx + */ + +/* + * vtpmx_fops_ioctl: ioctl on /dev/vtpmx + * + * Return value: + * Returns 0 on success, a negative error code otherwise. + */ +static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; + struct vtpm_proxy_new_dev vtpm_new_dev; + struct file *file; + + switch (ioctl) { + case VTPM_PROXY_IOC_NEW_DEV: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + vtpm_new_dev_p = argp; + if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, + sizeof(vtpm_new_dev))) + return -EFAULT; + file = vtpm_proxy_create_device(&vtpm_new_dev); + if (IS_ERR(file)) + return PTR_ERR(file); + if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, + sizeof(vtpm_new_dev))) { + put_unused_fd(vtpm_new_dev.fd); + fput(file); + return -EFAULT; + } + + fd_install(vtpm_new_dev.fd, file); + return 0; + + default: + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); +} +#endif + +static const struct file_operations vtpmx_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vtpmx_fops_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vtpmx_fops_compat_ioctl, +#endif + .llseek = noop_llseek, +}; + +static struct miscdevice vtpmx_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vtpmx", + .fops = &vtpmx_fops, +}; + +static int vtpmx_init(void) +{ + return misc_register(&vtpmx_miscdev); +} + +static void vtpmx_cleanup(void) +{ + misc_deregister(&vtpmx_miscdev); +} + +static int __init vtpm_module_init(void) +{ + int rc; + + rc = vtpmx_init(); + if (rc) { + pr_err("couldn't create vtpmx device\n"); + return rc; + } + + workqueue = create_workqueue("tpm-vtpm"); + if (!workqueue) { + pr_err("couldn't create workqueue\n"); + rc = -ENOMEM; + goto err_vtpmx_cleanup; + } + + return 0; + +err_vtpmx_cleanup: + vtpmx_cleanup(); + + return rc; +} + +static void __exit vtpm_module_exit(void) +{ + destroy_workqueue(workqueue); + vtpmx_cleanup(); +} + +module_init(vtpm_module_init); +module_exit(vtpm_module_exit); + +MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); +MODULE_DESCRIPTION("vTPM Driver"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 3111f2778079..62028f483bba 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -28,6 +28,8 @@ struct tpm_private { unsigned int evtchn; int ring_ref; domid_t backend_id; + int irq; + wait_queue_head_t read_queue; }; enum status_bits { @@ -39,7 +41,7 @@ enum status_bits { static u8 vtpm_status(struct tpm_chip *chip) { - struct tpm_private *priv = TPM_VPRIV(chip); + struct tpm_private *priv = dev_get_drvdata(&chip->dev); switch (priv->shr->state) { case VTPM_STATE_IDLE: return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; @@ -60,7 +62,7 @@ static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) static void vtpm_cancel(struct tpm_chip *chip) { - struct tpm_private *priv = TPM_VPRIV(chip); + struct tpm_private *priv = dev_get_drvdata(&chip->dev); priv->shr->state = VTPM_STATE_CANCEL; wmb(); notify_remote_via_evtchn(priv->evtchn); @@ -73,7 +75,7 @@ static unsigned int shr_data_offset(struct vtpm_shared_page *shr) static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { - struct tpm_private *priv = TPM_VPRIV(chip); + struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; unsigned int offset = shr_data_offset(shr); @@ -87,8 +89,8 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) return -EINVAL; /* Wait for completion of any existing command or cancellation */ - if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c, - &chip->vendor.read_queue, true) < 0) { + if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c, + &priv->read_queue, true) < 0) { vtpm_cancel(chip); return -ETIME; } @@ -104,7 +106,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) duration = tpm_calc_ordinal_duration(chip, ordinal); if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, - &chip->vendor.read_queue, true) < 0) { + &priv->read_queue, true) < 0) { /* got a signal or timeout, try to cancel */ vtpm_cancel(chip); return -ETIME; @@ -115,7 +117,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct tpm_private *priv = TPM_VPRIV(chip); + struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; unsigned int offset = shr_data_offset(shr); size_t length = shr->length; @@ -124,8 +126,8 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) return -ECANCELED; /* In theory the wait at the end of _send makes this one unnecessary */ - if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c, - &chip->vendor.read_queue, true) < 0) { + if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c, + &priv->read_queue, true) < 0) { vtpm_cancel(chip); return -ETIME; } @@ -161,7 +163,7 @@ static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) switch (priv->shr->state) { case VTPM_STATE_IDLE: case VTPM_STATE_FINISH: - wake_up_interruptible(&priv->chip->vendor.read_queue); + wake_up_interruptible(&priv->read_queue); break; case VTPM_STATE_SUBMIT: case VTPM_STATE_CANCEL: @@ -179,10 +181,10 @@ static int setup_chip(struct device *dev, struct tpm_private *priv) if (IS_ERR(chip)) return PTR_ERR(chip); - init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&priv->read_queue); priv->chip = chip; - TPM_VPRIV(chip) = priv; + dev_set_drvdata(&chip->dev, priv); return 0; } @@ -217,7 +219,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) xenbus_dev_fatal(dev, rv, "allocating TPM irq"); return rv; } - priv->chip->vendor.irq = rv; + priv->irq = rv; again: rv = xenbus_transaction_start(&xbt); @@ -277,8 +279,8 @@ static void ring_free(struct tpm_private *priv) else free_page((unsigned long)priv->shr); - if (priv->chip && priv->chip->vendor.irq) - unbind_from_irqhandler(priv->chip->vendor.irq, priv); + if (priv->irq) + unbind_from_irqhandler(priv->irq, priv); kfree(priv); } @@ -318,10 +320,10 @@ static int tpmfront_probe(struct xenbus_device *dev, static int tpmfront_remove(struct xenbus_device *dev) { struct tpm_chip *chip = dev_get_drvdata(&dev->dev); - struct tpm_private *priv = TPM_VPRIV(chip); + struct tpm_private *priv = dev_get_drvdata(&chip->dev); tpm_chip_unregister(chip); ring_free(priv); - TPM_VPRIV(chip) = NULL; + dev_set_drvdata(&chip->dev, NULL); return 0; } diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 98efbfcdb503..e2d9bd760c84 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -49,10 +49,10 @@ config COMMON_CLK_MAX77802 This driver supports Maxim 77802 crystal oscillator clock. config COMMON_CLK_RK808 - tristate "Clock driver for RK808" + tristate "Clock driver for RK808/RK818" depends on MFD_RK808 ---help--- - This driver supports RK808 crystal oscillator clock. These + This driver supports RK808 and RK818 crystal oscillator clock. These multi-function devices have two fixed-rate oscillators, clocked at 32KHz each. Clkout1 is always on, Clkout2 can off by control register. @@ -203,16 +203,19 @@ config COMMON_CLK_PIC32 config COMMON_CLK_OXNAS bool "Clock driver for the OXNAS SoC Family" + depends on ARCH_OXNAS || COMPILE_TEST select MFD_SYSCON ---help--- Support for the OXNAS SoC Family clocks. source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" +source "drivers/clk/meson/Kconfig" source "drivers/clk/mvebu/Kconfig" source "drivers/clk/qcom/Kconfig" source "drivers/clk/renesas/Kconfig" source "drivers/clk/samsung/Kconfig" +source "drivers/clk/sunxi-ng/Kconfig" source "drivers/clk/tegra/Kconfig" source "drivers/clk/ti/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index dcc5e698ff6d..3b6f9cf3464a 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -16,13 +16,14 @@ obj-$(CONFIG_COMMON_CLK) += clk-conf.o endif # hardware specific clock types -# please keep this section sorted lexicographically by file/directory path name +# please keep this section sorted lexicographically by file path name obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o -obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o +obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o +obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o obj-$(CONFIG_MACH_LOONGSON32) += clk-ls1x.o @@ -35,6 +36,7 @@ obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o obj-$(CONFIG_COMMON_CLK_OXNAS) += clk-oxnas.o obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o +obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o @@ -42,7 +44,6 @@ obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o -obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o @@ -50,35 +51,39 @@ obj-$(CONFIG_ARCH_U300) += clk-u300.o obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o -obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o + +# please keep this section sorted lexicographically by directory path name obj-$(CONFIG_COMMON_CLK_AT91) += at91/ obj-$(CONFIG_ARCH_ARTPEC) += axis/ +obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/ obj-y += bcm/ obj-$(CONFIG_ARCH_BERLIN) += berlin/ +obj-$(CONFIG_H8300) += h8300/ obj-$(CONFIG_ARCH_HISI) += hisilicon/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_MACH_INGENIC) += ingenic/ obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ +obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/ obj-$(CONFIG_MACH_PIC32) += microchip/ ifeq ($(CONFIG_COMMON_CLK), y) obj-$(CONFIG_ARCH_MMP) += mmp/ endif obj-y += mvebu/ -obj-$(CONFIG_ARCH_MESON) += meson/ obj-$(CONFIG_ARCH_MXS) += mxs/ -obj-$(CONFIG_MACH_PISTACHIO) += pistachio/ obj-$(CONFIG_COMMON_CLK_NXP) += nxp/ +obj-$(CONFIG_MACH_PISTACHIO) += pistachio/ obj-$(CONFIG_COMMON_CLK_PXA) += pxa/ obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/ +obj-$(CONFIG_ARCH_RENESAS) += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ -obj-$(CONFIG_ARCH_RENESAS) += renesas/ obj-$(CONFIG_ARCH_SIRF) += sirf/ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-$(CONFIG_ARCH_STI) += st/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ +obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_ARCH_U8500) += ux500/ @@ -86,5 +91,3 @@ obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_ARCH_ZX) += zte/ obj-$(CONFIG_ARCH_ZYNQ) += zynq/ -obj-$(CONFIG_H8300) += h8300/ -obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/ diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index e1aa210dd7aa..7f6bec8837ea 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -267,7 +267,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char return clk; } -void __init of_sama5d2_clk_generated_setup(struct device_node *np) +static void __init of_sama5d2_clk_generated_setup(struct device_node *np) { int num; u32 id; diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c index a196ee28a17a..d7d628214b85 100644 --- a/drivers/clk/bcm/clk-iproc-armpll.c +++ b/drivers/clk/bcm/clk-iproc-armpll.c @@ -20,6 +20,8 @@ #include <linux/clkdev.h> #include <linux/of_address.h> +#include "clk-iproc.h" + #define IPROC_CLK_MAX_FREQ_POLICY 0x3 #define IPROC_CLK_POLICY_FREQ_OFFSET 0x008 #define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT 8 @@ -242,7 +244,6 @@ static const struct clk_ops iproc_arm_pll_ops = { void __init iproc_armpll_setup(struct device_node *node) { int ret; - struct clk *clk; struct iproc_arm_pll *pll; struct clk_init_data init; const char *parent_name; @@ -263,18 +264,18 @@ void __init iproc_armpll_setup(struct device_node *node) init.num_parents = (parent_name ? 1 : 0); pll->hw.init = &init; - clk = clk_register(NULL, &pll->hw); - if (WARN_ON(IS_ERR(clk))) + ret = clk_hw_register(NULL, &pll->hw); + if (WARN_ON(ret)) goto err_iounmap; - ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); + ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll->hw); if (WARN_ON(ret)) goto err_clk_unregister; return; err_clk_unregister: - clk_unregister(clk); + clk_hw_unregister(&pll->hw); err_iounmap: iounmap(pll->base); err_free_pll: diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c index f630e1bbdcfe..4360e481368b 100644 --- a/drivers/clk/bcm/clk-iproc-asiu.c +++ b/drivers/clk/bcm/clk-iproc-asiu.c @@ -37,7 +37,7 @@ struct iproc_asiu { void __iomem *div_base; void __iomem *gate_base; - struct clk_onecell_data clk_data; + struct clk_hw_onecell_data *clk_data; struct iproc_asiu_clk *clks; }; @@ -197,11 +197,11 @@ void __init iproc_asiu_setup(struct device_node *node, if (WARN_ON(!asiu)) return; - asiu->clk_data.clk_num = num_clks; - asiu->clk_data.clks = kcalloc(num_clks, sizeof(*asiu->clk_data.clks), - GFP_KERNEL); - if (WARN_ON(!asiu->clk_data.clks)) + asiu->clk_data = kzalloc(sizeof(*asiu->clk_data->hws) * num_clks + + sizeof(*asiu->clk_data), GFP_KERNEL); + if (WARN_ON(!asiu->clk_data)) goto err_clks; + asiu->clk_data->num = num_clks; asiu->clks = kcalloc(num_clks, sizeof(*asiu->clks), GFP_KERNEL); if (WARN_ON(!asiu->clks)) @@ -217,7 +217,6 @@ void __init iproc_asiu_setup(struct device_node *node, for (i = 0; i < num_clks; i++) { struct clk_init_data init; - struct clk *clk; const char *parent_name; struct iproc_asiu_clk *asiu_clk; const char *clk_name; @@ -240,22 +239,22 @@ void __init iproc_asiu_setup(struct device_node *node, init.num_parents = (parent_name ? 1 : 0); asiu_clk->hw.init = &init; - clk = clk_register(NULL, &asiu_clk->hw); - if (WARN_ON(IS_ERR(clk))) + ret = clk_hw_register(NULL, &asiu_clk->hw); + if (WARN_ON(ret)) goto err_clk_register; - asiu->clk_data.clks[i] = clk; + asiu->clk_data->hws[i] = &asiu_clk->hw; } - ret = of_clk_add_provider(node, of_clk_src_onecell_get, - &asiu->clk_data); + ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + asiu->clk_data); if (WARN_ON(ret)) goto err_clk_register; return; err_clk_register: - for (i = 0; i < num_clks; i++) - clk_unregister(asiu->clk_data.clks[i]); + while (--i >= 0) + clk_hw_unregister(asiu->clk_data->hws[i]); iounmap(asiu->gate_base); err_iomap_gate: @@ -265,7 +264,7 @@ err_iomap_div: kfree(asiu->clks); err_asiu_clks: - kfree(asiu->clk_data.clks); + kfree(asiu->clk_data); err_clks: kfree(asiu); diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index fd492a5dad12..e04634c46395 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -89,7 +89,7 @@ struct iproc_pll { const struct iproc_pll_vco_param *vco_param; unsigned int num_vco_entries; - struct clk_onecell_data clk_data; + struct clk_hw_onecell_data *clk_data; struct iproc_clk *clks; }; @@ -625,7 +625,6 @@ void __init iproc_pll_clk_setup(struct device_node *node, unsigned int num_clks) { int i, ret; - struct clk *clk; struct iproc_pll *pll; struct iproc_clk *iclk; struct clk_init_data init; @@ -638,11 +637,11 @@ void __init iproc_pll_clk_setup(struct device_node *node, if (WARN_ON(!pll)) return; - pll->clk_data.clk_num = num_clks; - pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks), - GFP_KERNEL); - if (WARN_ON(!pll->clk_data.clks)) + pll->clk_data = kzalloc(sizeof(*pll->clk_data->hws) * num_clks + + sizeof(*pll->clk_data), GFP_KERNEL); + if (WARN_ON(!pll->clk_data)) goto err_clk_data; + pll->clk_data->num = num_clks; pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL); if (WARN_ON(!pll->clks)) @@ -694,11 +693,11 @@ void __init iproc_pll_clk_setup(struct device_node *node, iproc_pll_sw_cfg(pll); - clk = clk_register(NULL, &iclk->hw); - if (WARN_ON(IS_ERR(clk))) + ret = clk_hw_register(NULL, &iclk->hw); + if (WARN_ON(ret)) goto err_pll_register; - pll->clk_data.clks[0] = clk; + pll->clk_data->hws[0] = &iclk->hw; /* now initialize and register all leaf clocks */ for (i = 1; i < num_clks; i++) { @@ -724,22 +723,23 @@ void __init iproc_pll_clk_setup(struct device_node *node, init.num_parents = (parent_name ? 1 : 0); iclk->hw.init = &init; - clk = clk_register(NULL, &iclk->hw); - if (WARN_ON(IS_ERR(clk))) + ret = clk_hw_register(NULL, &iclk->hw); + if (WARN_ON(ret)) goto err_clk_register; - pll->clk_data.clks[i] = clk; + pll->clk_data->hws[i] = &iclk->hw; } - ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data); + ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + pll->clk_data); if (WARN_ON(ret)) goto err_clk_register; return; err_clk_register: - for (i = 0; i < num_clks; i++) - clk_unregister(pll->clk_data.clks[i]); + while (--i >= 0) + clk_hw_unregister(pll->clk_data->hws[i]); err_pll_register: if (pll->status_base != pll->control_base) @@ -759,7 +759,7 @@ err_pll_iomap: kfree(pll->clks); err_clks: - kfree(pll->clk_data.clks); + kfree(pll->clk_data); err_clk_data: kfree(pll); diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index 1f60b02416a7..adaf109f2fe2 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c @@ -184,5 +184,5 @@ static void __init clps711x_clk_init_dt(struct device_node *np) of_clk_add_provider(np, of_clk_src_onecell_get, &clps711x_clk->clk_data); } -CLK_OF_DECLARE(clps711x, "cirrus,clps711x-clk", clps711x_clk_init_dt); +CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt); #endif diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 43a218f35b19..674785d968a3 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -55,7 +55,7 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) } clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { - pr_warn("clk: couldn't get parent clock %d for %s\n", + pr_warn("clk: couldn't get assigned clock %d for %s\n", index, node->full_name); rc = PTR_ERR(clk); goto err; diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 75cd6c792cb8..4db3be214077 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -142,6 +142,11 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw) EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor); #ifdef CONFIG_OF +static const struct of_device_id set_rate_parent_matches[] = { + { .compatible = "allwinner,sun4i-a10-pll3-2x-clk" }, + { /* Sentinel */ }, +}; + /** * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock */ @@ -150,6 +155,7 @@ void __init of_fixed_factor_clk_setup(struct device_node *node) struct clk *clk; const char *clk_name = node->name; const char *parent_name; + unsigned long flags = 0; u32 div, mult; if (of_property_read_u32(node, "clock-div", &div)) { @@ -167,7 +173,10 @@ void __init of_fixed_factor_clk_setup(struct device_node *node) of_property_read_string(node, "clock-output-names", &clk_name); parent_name = of_clk_get_parent_name(node, 0); - clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, + if (of_match_node(set_rate_parent_matches, node)) + flags |= CLK_SET_RATE_PARENT; + + clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags, mult, div); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index 8e4453eb54e8..2edb39342a02 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -145,6 +145,17 @@ void clk_unregister_fixed_rate(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_unregister_fixed_rate); +void clk_hw_unregister_fixed_rate(struct clk_hw *hw) +{ + struct clk_fixed_rate *fixed; + + fixed = to_clk_fixed_rate(hw); + + clk_hw_unregister(hw); + kfree(fixed); +} +EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate); + #ifdef CONFIG_OF /** * of_fixed_clk_setup() - Setup function for simple fixed rate clock diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c index be3a21abb185..727ed8e1bb72 100644 --- a/drivers/clk/clk-highbank.c +++ b/drivers/clk/clk-highbank.c @@ -275,7 +275,6 @@ static const struct clk_ops periclk_ops = { static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops) { u32 reg; - struct clk *clk; struct hb_clk *hb_clk; const char *clk_name = node->name; const char *parent_name; @@ -308,13 +307,13 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk hb_clk->hw.init = &init; - clk = clk_register(NULL, &hb_clk->hw); - if (WARN_ON(IS_ERR(clk))) { + rc = clk_hw_register(NULL, &hb_clk->hw); + if (WARN_ON(rc)) { kfree(hb_clk); return NULL; } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); - return clk; + rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw); + return hb_clk->hw.clk; } static void __init hb_pll_init(struct device_node *node) diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c index 9e449c7b751c..dc037c957acd 100644 --- a/drivers/clk/clk-multiplier.c +++ b/drivers/clk/clk-multiplier.c @@ -52,14 +52,28 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, u8 width, unsigned long flags) { + struct clk_multiplier *mult = to_clk_multiplier(hw); unsigned long orig_parent_rate = *best_parent_rate; unsigned long parent_rate, current_rate, best_rate = ~0; unsigned int i, bestmult = 0; + unsigned int maxmult = (1 << width) - 1; + + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { + bestmult = rate / orig_parent_rate; + + /* Make sure we don't end up with a 0 multiplier */ + if ((bestmult == 0) && + !(mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)) + bestmult = 1; - if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) - return rate / *best_parent_rate; + /* Make sure we don't overflow the multiplier */ + if (bestmult > maxmult) + bestmult = maxmult; + + return bestmult; + } - for (i = 1; i < ((1 << width) - 1); i++) { + for (i = 1; i < maxmult; i++) { if (rate == orig_parent_rate * i) { /* * This is the best case for us if we have a diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index e4d8a991c58f..71677eb12565 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -253,11 +253,11 @@ static const struct clk_ops pll_clk_ops = { .recalc_rate = pll_clk_recalc_rate, }; -static struct clk * __init +static struct clk_hw * __init pll_clk_register(struct device *dev, const char *name, const char *parent_name, u32 id) { - struct clk *clk; + int ret; struct clk_pll *pll; struct clk_init_data init; @@ -281,11 +281,13 @@ pll_clk_register(struct device *dev, const char *name, pr_debug("register PLL1 clock \"%s\"\n", name); - clk = clk_register(dev, &pll->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(dev, &pll->hw); + if (ret) { kfree(pll); + return ERR_PTR(ret); + } - return clk; + return &pll->hw; } /* @@ -345,11 +347,11 @@ static const struct clk_ops src_clk_ops = { .recalc_rate = src_clk_recalc_rate, }; -static struct clk * __init +static struct clk_hw * __init src_clk_register(struct device *dev, const char *name, const char *parent_name, u8 id) { - struct clk *clk; + int ret; struct clk_src *sclk; struct clk_init_data init; @@ -376,11 +378,13 @@ src_clk_register(struct device *dev, const char *name, pr_debug("register clock \"%s\" ID: %d group: %d bits: %08x\n", name, id, sclk->group1, sclk->clkbit); - clk = clk_register(dev, &sclk->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(dev, &sclk->hw); + if (ret) { kfree(sclk); + return ERR_PTR(ret); + } - return clk; + return &sclk->hw; } #ifdef CONFIG_DEBUG_FS @@ -508,7 +512,7 @@ device_initcall(nomadik_src_clk_init_debugfs); static void __init of_nomadik_pll_setup(struct device_node *np) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk_hw *hw; const char *clk_name = np->name; const char *parent_name; u32 pll_id; @@ -522,16 +526,16 @@ static void __init of_nomadik_pll_setup(struct device_node *np) return; } parent_name = of_clk_get_parent_name(np, 0); - clk = pll_clk_register(NULL, clk_name, parent_name, pll_id); - if (!IS_ERR(clk)) - of_clk_add_provider(np, of_clk_src_simple_get, clk); + hw = pll_clk_register(NULL, clk_name, parent_name, pll_id); + if (!IS_ERR(hw)) + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); } CLK_OF_DECLARE(nomadik_pll_clk, "st,nomadik-pll-clock", of_nomadik_pll_setup); static void __init of_nomadik_hclk_setup(struct device_node *np) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk_hw *hw; const char *clk_name = np->name; const char *parent_name; @@ -542,20 +546,20 @@ static void __init of_nomadik_hclk_setup(struct device_node *np) /* * The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4. */ - clk = clk_register_divider(NULL, clk_name, parent_name, + hw = clk_hw_register_divider(NULL, clk_name, parent_name, 0, src_base + SRC_CR, 13, 2, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &src_lock); - if (!IS_ERR(clk)) - of_clk_add_provider(np, of_clk_src_simple_get, clk); + if (!IS_ERR(hw)) + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); } CLK_OF_DECLARE(nomadik_hclk_clk, "st,nomadik-hclk-clock", of_nomadik_hclk_setup); static void __init of_nomadik_src_clk_setup(struct device_node *np) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk_hw *hw; const char *clk_name = np->name; const char *parent_name; u32 clk_id; @@ -569,9 +573,9 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np) return; } parent_name = of_clk_get_parent_name(np, 0); - clk = src_clk_register(NULL, clk_name, parent_name, clk_id); - if (!IS_ERR(clk)) - of_clk_add_provider(np, of_clk_src_simple_get, clk); + hw = src_clk_register(NULL, clk_name, parent_name, clk_id); + if (!IS_ERR(hw)) + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); } CLK_OF_DECLARE(nomadik_src_clk, "st,nomadik-src-clock", of_nomadik_src_clk_setup); diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c index 79bcb2e42060..47649ac5d399 100644 --- a/drivers/clk/clk-oxnas.c +++ b/drivers/clk/clk-oxnas.c @@ -18,7 +18,7 @@ #include <linux/clk-provider.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/stringify.h> @@ -170,26 +170,17 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) clk_oxnas->onecell_data); } -static int oxnas_stdclk_remove(struct platform_device *pdev) -{ - of_clk_del_provider(pdev->dev.of_node); - - return 0; -} - static const struct of_device_id oxnas_stdclk_dt_ids[] = { { .compatible = "oxsemi,ox810se-stdclk" }, { } }; -MODULE_DEVICE_TABLE(of, oxnas_stdclk_dt_ids); static struct platform_driver oxnas_stdclk_driver = { .probe = oxnas_stdclk_probe, - .remove = oxnas_stdclk_remove, .driver = { .name = "oxnas-stdclk", + .suppress_bind_attrs = true, .of_match_table = oxnas_stdclk_dt_ids, }, }; - -module_platform_driver(oxnas_stdclk_driver); +builtin_platform_driver(oxnas_stdclk_driver); diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index f8c83977c7fa..fbaa84a33c46 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -137,7 +137,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct s2mps11_clk *s2mps11_clks; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; unsigned int s2mps11_reg; int i, ret = 0; enum sec_device_type hwid = platform_get_device_id(pdev)->driver_data; @@ -147,15 +147,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (!s2mps11_clks) return -ENOMEM; - clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL); + clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data) + + sizeof(*clk_data->hws) * S2MPS11_CLKS_NUM, + GFP_KERNEL); if (!clk_data) return -ENOMEM; - clk_data->clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM, - sizeof(struct clk *), GFP_KERNEL); - if (!clk_data->clks) - return -ENOMEM; - switch (hwid) { case S2MPS11X: s2mps11_reg = S2MPS11_REG_RTC_CTRL; @@ -196,18 +193,18 @@ static int s2mps11_clk_probe(struct platform_device *pdev) goto err_reg; } - s2mps11_clks[i].lookup = clkdev_create(s2mps11_clks[i].clk, + s2mps11_clks[i].lookup = clkdev_hw_create(&s2mps11_clks[i].hw, s2mps11_clks_init[i].name, NULL); if (!s2mps11_clks[i].lookup) { ret = -ENOMEM; goto err_reg; } - clk_data->clks[i] = s2mps11_clks[i].clk; + clk_data->hws[i] = &s2mps11_clks[i].hw; } - clk_data->clk_num = S2MPS11_CLKS_NUM; - of_clk_add_provider(s2mps11_clks->clk_np, of_clk_src_onecell_get, - clk_data); + clk_data->num = S2MPS11_CLKS_NUM; + of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, + clk_data); platform_set_drvdata(pdev, s2mps11_clks); diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index fd89e771107e..02d681008401 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -136,7 +136,7 @@ static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull, 0x0000000000000001ull, 0x04777f33f6fec9ffull }; -static struct clk *clks[MAX_CLKS]; +static struct clk_hw *clks[MAX_CLKS]; static DEFINE_SPINLOCK(stm32f4_clk_lock); static void __iomem *base; @@ -281,7 +281,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary) (BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0); } -static struct clk * +static struct clk_hw * stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data) { int i = stm32f4_rcc_lookup_clk_idx(clkspec->args[0], clkspec->args[1]); @@ -346,9 +346,9 @@ static void __init stm32f4_rcc_init(struct device_node *np) clk_register_apb_mul(NULL, "apb2_mul", "apb2_div", CLK_SET_RATE_PARENT, 15); - clks[SYSTICK] = clk_register_fixed_factor(NULL, "systick", "ahb_div", + clks[SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick", "ahb_div", 0, 1, 8); - clks[FCLK] = clk_register_fixed_factor(NULL, "fclk", "ahb_div", + clks[FCLK] = clk_hw_register_fixed_factor(NULL, "fclk", "ahb_div", 0, 1, 1); for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) { @@ -360,18 +360,18 @@ static void __init stm32f4_rcc_init(struct device_node *np) if (idx < 0) goto fail; - clks[idx] = clk_register_gate( + clks[idx] = clk_hw_register_gate( NULL, gd->name, gd->parent_name, gd->flags, base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock); - if (IS_ERR(clks[n])) { + if (IS_ERR(clks[idx])) { pr_err("%s: Unable to register leaf clock %s\n", np->full_name, gd->name); goto fail; } } - of_clk_add_provider(np, stm32f4_rcc_lookup_clk, NULL); + of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL); return; fail: iounmap(base); diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c index 95d1742dac30..ec8aafda6e24 100644 --- a/drivers/clk/clk-u300.c +++ b/drivers/clk/clk-u300.c @@ -689,7 +689,7 @@ static const struct clk_ops syscon_clk_ops = { .set_rate = syscon_clk_set_rate, }; -static struct clk * __init +static struct clk_hw * __init syscon_clk_register(struct device *dev, const char *name, const char *parent_name, unsigned long flags, bool hw_ctrld, @@ -697,9 +697,10 @@ syscon_clk_register(struct device *dev, const char *name, void __iomem *en_reg, u8 en_bit, u16 clk_val) { - struct clk *clk; + struct clk_hw *hw; struct clk_syscon *sclk; struct clk_init_data init; + int ret; sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL); if (!sclk) { @@ -722,11 +723,14 @@ syscon_clk_register(struct device *dev, const char *name, sclk->en_bit = en_bit; sclk->clk_val = clk_val; - clk = clk_register(dev, &sclk->hw); - if (IS_ERR(clk)) + hw = &sclk->hw; + ret = clk_hw_register(dev, hw); + if (ret) { kfree(sclk); + hw = ERR_PTR(ret); + } - return clk; + return hw; } #define U300_CLK_TYPE_SLOW 0 @@ -868,7 +872,7 @@ static struct u300_clock const u300_clk_lookup[] __initconst = { static void __init of_u300_syscon_clk_init(struct device_node *np) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk_hw *hw = ERR_PTR(-EINVAL); const char *clk_name = np->name; const char *parent_name; void __iomem *res_reg; @@ -911,16 +915,15 @@ static void __init of_u300_syscon_clk_init(struct device_node *np) const struct u300_clock *u3clk = &u300_clk_lookup[i]; if (u3clk->type == clk_type && u3clk->id == clk_id) - clk = syscon_clk_register(NULL, - clk_name, parent_name, - 0, u3clk->hw_ctrld, - res_reg, u3clk->id, - en_reg, u3clk->id, - u3clk->clk_val); + hw = syscon_clk_register(NULL, clk_name, parent_name, + 0, u3clk->hw_ctrld, + res_reg, u3clk->id, + en_reg, u3clk->id, + u3clk->clk_val); } - if (!IS_ERR(clk)) { - of_clk_add_provider(np, of_clk_src_simple_get, clk); + if (!IS_ERR(hw)) { + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); /* * Some few system clocks - device tree does not @@ -928,11 +931,11 @@ static void __init of_u300_syscon_clk_init(struct device_node *np) * for now we add these three clocks here. */ if (clk_type == U300_CLK_TYPE_REST && clk_id == 5) - clk_register_clkdev(clk, NULL, "pl172"); + clk_hw_register_clkdev(hw, NULL, "pl172"); if (clk_type == U300_CLK_TYPE_REST && clk_id == 9) - clk_register_clkdev(clk, NULL, "semi"); + clk_hw_register_clkdev(hw, NULL, "semi"); if (clk_type == U300_CLK_TYPE_REST && clk_id == 12) - clk_register_clkdev(clk, NULL, "intcon"); + clk_hw_register_clkdev(hw, NULL, "intcon"); } } @@ -1111,13 +1114,14 @@ static const struct clk_ops mclk_ops = { .set_rate = mclk_clk_set_rate, }; -static struct clk * __init +static struct clk_hw * __init mclk_clk_register(struct device *dev, const char *name, const char *parent_name, bool is_mspro) { - struct clk *clk; + struct clk_hw *hw; struct clk_mclk *mclk; struct clk_init_data init; + int ret; mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL); if (!mclk) { @@ -1133,23 +1137,26 @@ mclk_clk_register(struct device *dev, const char *name, mclk->hw.init = &init; mclk->is_mspro = is_mspro; - clk = clk_register(dev, &mclk->hw); - if (IS_ERR(clk)) + hw = &mclk->hw; + ret = clk_hw_register(dev, hw); + if (ret) { kfree(mclk); + hw = ERR_PTR(ret); + } - return clk; + return hw; } static void __init of_u300_syscon_mclk_init(struct device_node *np) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk_hw *hw; const char *clk_name = np->name; const char *parent_name; parent_name = of_clk_get_parent_name(np, 0); - clk = mclk_clk_register(NULL, clk_name, parent_name, false); - if (!IS_ERR(clk)) - of_clk_add_provider(np, of_clk_src_simple_get, clk); + hw = mclk_clk_register(NULL, clk_name, parent_name, false); + if (!IS_ERR(hw)) + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); } static const struct of_device_id u300_clk_match[] __initconst = { diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c index b0f76a84f1e9..37368a399ff9 100644 --- a/drivers/clk/clk-vt8500.c +++ b/drivers/clk/clk-vt8500.c @@ -383,51 +383,49 @@ static int vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate, return 0; } -static int wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate, - u32 *multiplier, u32 *divisor1, u32 *divisor2) +/* + * M * parent [O1] => / P [O2] => / D [O3] + * Where O1 is 900MHz...3GHz; + * O2 is 600MHz >= (M * parent) / P >= 300MHz; + * M is 36...120 [25MHz parent]; D is 1 or 2 or 4 or 8. + * Possible ranges (O3): + * D = 8: 37,5MHz...75MHz + * D = 4: 75MHz...150MHz + * D = 2: 150MHz...300MHz + * D = 1: 300MHz...600MHz + */ +static int wm8650_find_pll_bits(unsigned long rate, + unsigned long parent_rate, u32 *multiplier, u32 *divisor1, + u32 *divisor2) { - u32 mul, div1; - int div2; - u32 best_mul, best_div1, best_div2; - unsigned long tclk, rate_err, best_err; - - best_err = (unsigned long)-1; + unsigned long O1, min_err, rate_err; - /* Find the closest match (lower or equal to requested) */ - for (div1 = 5; div1 >= 3; div1--) - for (div2 = 3; div2 >= 0; div2--) - for (mul = 3; mul <= 1023; mul++) { - tclk = parent_rate * mul / (div1 * (1 << div2)); - if (tclk > rate) - continue; - /* error will always be +ve */ - rate_err = rate - tclk; - if (rate_err == 0) { - *multiplier = mul; - *divisor1 = div1; - *divisor2 = div2; - return 0; - } + if (!parent_rate || (rate < 37500000) || (rate > 600000000)) + return -EINVAL; - if (rate_err < best_err) { - best_err = rate_err; - best_mul = mul; - best_div1 = div1; - best_div2 = div2; - } - } + *divisor2 = rate <= 75000000 ? 3 : rate <= 150000000 ? 2 : + rate <= 300000000 ? 1 : 0; + /* + * Divisor P cannot be calculated. Test all divisors and find where M + * will be as close as possible to the requested rate. + */ + min_err = ULONG_MAX; + for (*divisor1 = 5; *divisor1 >= 3; (*divisor1)--) { + O1 = rate * *divisor1 * (1 << (*divisor2)); + rate_err = O1 % parent_rate; + if (rate_err < min_err) { + *multiplier = O1 / parent_rate; + if (rate_err == 0) + return 0; + + min_err = rate_err; + } + } - if (best_err == (unsigned long)-1) { - pr_warn("%s: impossible rate %lu\n", __func__, rate); + if ((*multiplier < 3) || (*multiplier > 1023)) return -EINVAL; - } - /* if we got here, it wasn't an exact match */ - pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate, - rate - best_err); - *multiplier = best_mul; - *divisor1 = best_div1; - *divisor2 = best_div2; + pr_warn("%s: rate error is %lu\n", __func__, min_err); return 0; } @@ -464,7 +462,6 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate, { u32 mul; int div1, div2; - u32 best_mul, best_div1, best_div2; unsigned long tclk, rate_err, best_err; best_err = (unsigned long)-1; @@ -488,9 +485,9 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate, if (rate_err < best_err) { best_err = rate_err; - best_mul = mul; - best_div1 = div1; - best_div2 = div2; + *multiplier = mul; + *divisor1 = div1; + *divisor2 = div2; } } @@ -503,10 +500,7 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate, pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate, rate - best_err); - *filter = wm8750_get_filter(parent_rate, best_div1); - *multiplier = best_mul; - *divisor1 = best_div1; - *divisor2 = best_div2; + *filter = wm8750_get_filter(parent_rate, *divisor1); return 0; } @@ -516,7 +510,6 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate, { u32 mul; int div1, div2; - u32 best_mul, best_div1, best_div2; unsigned long tclk, rate_err, best_err; best_err = (unsigned long)-1; @@ -540,9 +533,9 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate, if (rate_err < best_err) { best_err = rate_err; - best_mul = mul; - best_div1 = div1; - best_div2 = div2; + *multiplier = mul; + *divisor1 = div1; + *divisor2 = div2; } } @@ -555,10 +548,6 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate, pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate, rate - best_err); - *multiplier = best_mul; - *divisor1 = best_div1; - *divisor2 = best_div2; - return 0; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d584004f7af7..820a939fb6bb 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core) return core->ops->is_enabled(core->hw); } -static void clk_unprepare_unused_subtree(struct clk_core *core) -{ - struct clk_core *child; - - lockdep_assert_held(&prepare_lock); - - hlist_for_each_entry(child, &core->children, child_node) - clk_unprepare_unused_subtree(child); - - if (core->prepare_count) - return; - - if (core->flags & CLK_IGNORE_UNUSED) - return; - - if (clk_core_is_prepared(core)) { - trace_clk_unprepare(core); - if (core->ops->unprepare_unused) - core->ops->unprepare_unused(core->hw); - else if (core->ops->unprepare) - core->ops->unprepare(core->hw); - trace_clk_unprepare_complete(core); - } -} - -static void clk_disable_unused_subtree(struct clk_core *core) -{ - struct clk_core *child; - unsigned long flags; - - lockdep_assert_held(&prepare_lock); - - hlist_for_each_entry(child, &core->children, child_node) - clk_disable_unused_subtree(child); - - flags = clk_enable_lock(); - - if (core->enable_count) - goto unlock_out; - - if (core->flags & CLK_IGNORE_UNUSED) - goto unlock_out; - - /* - * some gate clocks have special needs during the disable-unused - * sequence. call .disable_unused if available, otherwise fall - * back to .disable - */ - if (clk_core_is_enabled(core)) { - trace_clk_disable(core); - if (core->ops->disable_unused) - core->ops->disable_unused(core->hw); - else if (core->ops->disable) - core->ops->disable(core->hw); - trace_clk_disable_complete(core); - } - -unlock_out: - clk_enable_unlock(flags); -} - -static bool clk_ignore_unused; -static int __init clk_ignore_unused_setup(char *__unused) -{ - clk_ignore_unused = true; - return 1; -} -__setup("clk_ignore_unused", clk_ignore_unused_setup); - -static int clk_disable_unused(void) -{ - struct clk_core *core; - - if (clk_ignore_unused) { - pr_warn("clk: Not disabling unused clocks\n"); - return 0; - } - - clk_prepare_lock(); - - hlist_for_each_entry(core, &clk_root_list, child_node) - clk_disable_unused_subtree(core); - - hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_disable_unused_subtree(core); - - hlist_for_each_entry(core, &clk_root_list, child_node) - clk_unprepare_unused_subtree(core); - - hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_unprepare_unused_subtree(core); - - clk_prepare_unlock(); - - return 0; -} -late_initcall_sync(clk_disable_unused); - /*** helper functions ***/ const char *__clk_get_name(const struct clk *clk) @@ -591,6 +493,13 @@ static void clk_core_unprepare(struct clk_core *core) clk_core_unprepare(core->parent); } +static void clk_core_unprepare_lock(struct clk_core *core) +{ + clk_prepare_lock(); + clk_core_unprepare(core); + clk_prepare_unlock(); +} + /** * clk_unprepare - undo preparation of a clock source * @clk: the clk being unprepared @@ -607,9 +516,7 @@ void clk_unprepare(struct clk *clk) if (IS_ERR_OR_NULL(clk)) return; - clk_prepare_lock(); - clk_core_unprepare(clk->core); - clk_prepare_unlock(); + clk_core_unprepare_lock(clk->core); } EXPORT_SYMBOL_GPL(clk_unprepare); @@ -645,6 +552,17 @@ static int clk_core_prepare(struct clk_core *core) return 0; } +static int clk_core_prepare_lock(struct clk_core *core) +{ + int ret; + + clk_prepare_lock(); + ret = clk_core_prepare(core); + clk_prepare_unlock(); + + return ret; +} + /** * clk_prepare - prepare a clock source * @clk: the clk being prepared @@ -659,16 +577,10 @@ static int clk_core_prepare(struct clk_core *core) */ int clk_prepare(struct clk *clk) { - int ret; - if (!clk) return 0; - clk_prepare_lock(); - ret = clk_core_prepare(clk->core); - clk_prepare_unlock(); - - return ret; + return clk_core_prepare_lock(clk->core); } EXPORT_SYMBOL_GPL(clk_prepare); @@ -688,16 +600,25 @@ static void clk_core_disable(struct clk_core *core) if (--core->enable_count > 0) return; - trace_clk_disable(core); + trace_clk_disable_rcuidle(core); if (core->ops->disable) core->ops->disable(core->hw); - trace_clk_disable_complete(core); + trace_clk_disable_complete_rcuidle(core); clk_core_disable(core->parent); } +static void clk_core_disable_lock(struct clk_core *core) +{ + unsigned long flags; + + flags = clk_enable_lock(); + clk_core_disable(core); + clk_enable_unlock(flags); +} + /** * clk_disable - gate a clock * @clk: the clk being gated @@ -712,14 +633,10 @@ static void clk_core_disable(struct clk_core *core) */ void clk_disable(struct clk *clk) { - unsigned long flags; - if (IS_ERR_OR_NULL(clk)) return; - flags = clk_enable_lock(); - clk_core_disable(clk->core); - clk_enable_unlock(flags); + clk_core_disable_lock(clk->core); } EXPORT_SYMBOL_GPL(clk_disable); @@ -741,12 +658,12 @@ static int clk_core_enable(struct clk_core *core) if (ret) return ret; - trace_clk_enable(core); + trace_clk_enable_rcuidle(core); if (core->ops->enable) ret = core->ops->enable(core->hw); - trace_clk_enable_complete(core); + trace_clk_enable_complete_rcuidle(core); if (ret) { clk_core_disable(core->parent); @@ -758,6 +675,18 @@ static int clk_core_enable(struct clk_core *core) return 0; } +static int clk_core_enable_lock(struct clk_core *core) +{ + unsigned long flags; + int ret; + + flags = clk_enable_lock(); + ret = clk_core_enable(core); + clk_enable_unlock(flags); + + return ret; +} + /** * clk_enable - ungate a clock * @clk: the clk being ungated @@ -773,19 +702,136 @@ static int clk_core_enable(struct clk_core *core) */ int clk_enable(struct clk *clk) { - unsigned long flags; - int ret; - if (!clk) return 0; + return clk_core_enable_lock(clk->core); +} +EXPORT_SYMBOL_GPL(clk_enable); + +static int clk_core_prepare_enable(struct clk_core *core) +{ + int ret; + + ret = clk_core_prepare_lock(core); + if (ret) + return ret; + + ret = clk_core_enable_lock(core); + if (ret) + clk_core_unprepare_lock(core); + + return ret; +} + +static void clk_core_disable_unprepare(struct clk_core *core) +{ + clk_core_disable_lock(core); + clk_core_unprepare_lock(core); +} + +static void clk_unprepare_unused_subtree(struct clk_core *core) +{ + struct clk_core *child; + + lockdep_assert_held(&prepare_lock); + + hlist_for_each_entry(child, &core->children, child_node) + clk_unprepare_unused_subtree(child); + + if (core->prepare_count) + return; + + if (core->flags & CLK_IGNORE_UNUSED) + return; + + if (clk_core_is_prepared(core)) { + trace_clk_unprepare(core); + if (core->ops->unprepare_unused) + core->ops->unprepare_unused(core->hw); + else if (core->ops->unprepare) + core->ops->unprepare(core->hw); + trace_clk_unprepare_complete(core); + } +} + +static void clk_disable_unused_subtree(struct clk_core *core) +{ + struct clk_core *child; + unsigned long flags; + + lockdep_assert_held(&prepare_lock); + + hlist_for_each_entry(child, &core->children, child_node) + clk_disable_unused_subtree(child); + + if (core->flags & CLK_OPS_PARENT_ENABLE) + clk_core_prepare_enable(core->parent); + flags = clk_enable_lock(); - ret = clk_core_enable(clk->core); + + if (core->enable_count) + goto unlock_out; + + if (core->flags & CLK_IGNORE_UNUSED) + goto unlock_out; + + /* + * some gate clocks have special needs during the disable-unused + * sequence. call .disable_unused if available, otherwise fall + * back to .disable + */ + if (clk_core_is_enabled(core)) { + trace_clk_disable(core); + if (core->ops->disable_unused) + core->ops->disable_unused(core->hw); + else if (core->ops->disable) + core->ops->disable(core->hw); + trace_clk_disable_complete(core); + } + +unlock_out: clk_enable_unlock(flags); + if (core->flags & CLK_OPS_PARENT_ENABLE) + clk_core_disable_unprepare(core->parent); +} - return ret; +static bool clk_ignore_unused; +static int __init clk_ignore_unused_setup(char *__unused) +{ + clk_ignore_unused = true; + return 1; } -EXPORT_SYMBOL_GPL(clk_enable); +__setup("clk_ignore_unused", clk_ignore_unused_setup); + +static int clk_disable_unused(void) +{ + struct clk_core *core; + + if (clk_ignore_unused) { + pr_warn("clk: Not disabling unused clocks\n"); + return 0; + } + + clk_prepare_lock(); + + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_disable_unused_subtree(core); + + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_disable_unused_subtree(core); + + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_unprepare_unused_subtree(core); + + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_unprepare_unused_subtree(core); + + clk_prepare_unlock(); + + return 0; +} +late_initcall_sync(clk_disable_unused); static int clk_core_round_rate_nolock(struct clk_core *core, struct clk_rate_request *req) @@ -828,9 +874,7 @@ static int clk_core_round_rate_nolock(struct clk_core *core, /** * __clk_determine_rate - get the closest rate actually supported by a clock * @hw: determine the rate of this clock - * @rate: target rate - * @min_rate: returned rate must be greater than this rate - * @max_rate: returned rate must be less than this rate + * @req: target rate request * * Useful for clk_ops such as .set_rate and .determine_rate. */ @@ -1128,7 +1172,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core, struct clk_core *old_parent = core->parent; /* - * Migrate prepare state between parents and prevent race with + * 1. enable parents for CLK_OPS_PARENT_ENABLE clock + * + * 2. Migrate prepare state between parents and prevent race with * clk_enable(). * * If the clock is not prepared, then a race with @@ -1144,12 +1190,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core, * * See also: Comment for clk_set_parent() below. */ + + /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ + if (core->flags & CLK_OPS_PARENT_ENABLE) { + clk_core_prepare_enable(old_parent); + clk_core_prepare_enable(parent); + } + + /* migrate prepare count if > 0 */ if (core->prepare_count) { - clk_core_prepare(parent); - flags = clk_enable_lock(); - clk_core_enable(parent); - clk_core_enable(core); - clk_enable_unlock(flags); + clk_core_prepare_enable(parent); + clk_core_enable_lock(core); } /* update the clk tree topology */ @@ -1164,18 +1215,19 @@ static void __clk_set_parent_after(struct clk_core *core, struct clk_core *parent, struct clk_core *old_parent) { - unsigned long flags; - /* * Finish the migration of prepare state and undo the changes done * for preventing a race with clk_enable(). */ if (core->prepare_count) { - flags = clk_enable_lock(); - clk_core_disable(core); - clk_core_disable(old_parent); - clk_enable_unlock(flags); - clk_core_unprepare(old_parent); + clk_core_disable_lock(core); + clk_core_disable_unprepare(old_parent); + } + + /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ + if (core->flags & CLK_OPS_PARENT_ENABLE) { + clk_core_disable_unprepare(parent); + clk_core_disable_unprepare(old_parent); } } @@ -1422,13 +1474,17 @@ static void clk_change_rate(struct clk_core *core) unsigned long best_parent_rate = 0; bool skip_set_rate = false; struct clk_core *old_parent; + struct clk_core *parent = NULL; old_rate = core->rate; - if (core->new_parent) + if (core->new_parent) { + parent = core->new_parent; best_parent_rate = core->new_parent->rate; - else if (core->parent) + } else if (core->parent) { + parent = core->parent; best_parent_rate = core->parent->rate; + } if (core->flags & CLK_SET_RATE_UNGATE) { unsigned long flags; @@ -1456,6 +1512,9 @@ static void clk_change_rate(struct clk_core *core) __clk_set_parent_after(core, core->new_parent, old_parent); } + if (core->flags & CLK_OPS_PARENT_ENABLE) + clk_core_prepare_enable(parent); + trace_clk_set_rate(core, core->new_rate); if (!skip_set_rate && core->ops->set_rate) @@ -1474,6 +1533,9 @@ static void clk_change_rate(struct clk_core *core) clk_core_unprepare(core); } + if (core->flags & CLK_OPS_PARENT_ENABLE) + clk_core_disable_unprepare(parent); + if (core->notifier_count && old_rate != core->rate) __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); @@ -1501,7 +1563,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core, { struct clk_core *top, *fail_clk; unsigned long rate = req_rate; - int ret = 0; if (!core) return 0; @@ -1532,7 +1593,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, core->req_rate = req_rate; - return ret; + return 0; } /** diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 89cc700fbc37..97ae60fa1584 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -250,7 +250,7 @@ struct clk_lookup_alloc { char con_id[MAX_CON_ID]; }; -static struct clk_lookup * __init_refok +static struct clk_lookup * __ref vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, va_list ap) { @@ -287,7 +287,7 @@ vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt, return cl; } -struct clk_lookup * __init_refok +struct clk_lookup * __ref clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) { struct clk_lookup *cl; diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c index 715c7301a66a..51b173ef1dda 100644 --- a/drivers/clk/hisilicon/clk-hi3519.c +++ b/drivers/clk/hisilicon/clk-hi3519.c @@ -38,6 +38,11 @@ #define HI3519_NR_CLKS 128 +struct hi3519_crg_data { + struct hisi_clock_data *clk_data; + struct hisi_reset_controller *rstc; +}; + static const struct hisi_fixed_rate_clock hi3519_fixed_rate_clks[] = { { HI3519_FIXED_24M, "24m", NULL, 0, 24000000, }, { HI3519_FIXED_50M, "50m", NULL, 0, 50000000, }, @@ -80,33 +85,105 @@ static const struct hisi_gate_clock hi3519_gate_clks[] = { CLK_SET_RATE_PARENT, 0xe4, 18, 0, }, }; -static int hi3519_clk_probe(struct platform_device *pdev) +static struct hisi_clock_data *hi3519_clk_register(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct hisi_clock_data *clk_data; - struct hisi_reset_controller *rstc; + int ret; - rstc = hisi_reset_init(np); - if (!rstc) + clk_data = hisi_clk_alloc(pdev, HI3519_NR_CLKS); + if (!clk_data) + return ERR_PTR(-ENOMEM); + + ret = hisi_clk_register_fixed_rate(hi3519_fixed_rate_clks, + ARRAY_SIZE(hi3519_fixed_rate_clks), + clk_data); + if (ret) + return ERR_PTR(ret); + + ret = hisi_clk_register_mux(hi3519_mux_clks, + ARRAY_SIZE(hi3519_mux_clks), + clk_data); + if (ret) + goto unregister_fixed_rate; + + ret = hisi_clk_register_gate(hi3519_gate_clks, + ARRAY_SIZE(hi3519_gate_clks), + clk_data); + if (ret) + goto unregister_mux; + + ret = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, &clk_data->clk_data); + if (ret) + goto unregister_gate; + + return clk_data; + +unregister_fixed_rate: + hisi_clk_unregister_fixed_rate(hi3519_fixed_rate_clks, + ARRAY_SIZE(hi3519_fixed_rate_clks), + clk_data); + +unregister_mux: + hisi_clk_unregister_mux(hi3519_mux_clks, + ARRAY_SIZE(hi3519_mux_clks), + clk_data); +unregister_gate: + hisi_clk_unregister_gate(hi3519_gate_clks, + ARRAY_SIZE(hi3519_gate_clks), + clk_data); + return ERR_PTR(ret); +} + +static void hi3519_clk_unregister(struct platform_device *pdev) +{ + struct hi3519_crg_data *crg = platform_get_drvdata(pdev); + + of_clk_del_provider(pdev->dev.of_node); + + hisi_clk_unregister_gate(hi3519_gate_clks, + ARRAY_SIZE(hi3519_mux_clks), + crg->clk_data); + hisi_clk_unregister_mux(hi3519_mux_clks, + ARRAY_SIZE(hi3519_mux_clks), + crg->clk_data); + hisi_clk_unregister_fixed_rate(hi3519_fixed_rate_clks, + ARRAY_SIZE(hi3519_fixed_rate_clks), + crg->clk_data); +} + +static int hi3519_clk_probe(struct platform_device *pdev) +{ + struct hi3519_crg_data *crg; + + crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL); + if (!crg) + return -ENOMEM; + + crg->rstc = hisi_reset_init(pdev); + if (!crg->rstc) return -ENOMEM; - clk_data = hisi_clk_init(np, HI3519_NR_CLKS); - if (!clk_data) { - hisi_reset_exit(rstc); - return -ENODEV; + crg->clk_data = hi3519_clk_register(pdev); + if (IS_ERR(crg->clk_data)) { + hisi_reset_exit(crg->rstc); + return PTR_ERR(crg->clk_data); } - hisi_clk_register_fixed_rate(hi3519_fixed_rate_clks, - ARRAY_SIZE(hi3519_fixed_rate_clks), - clk_data); - hisi_clk_register_mux(hi3519_mux_clks, ARRAY_SIZE(hi3519_mux_clks), - clk_data); - hisi_clk_register_gate(hi3519_gate_clks, - ARRAY_SIZE(hi3519_gate_clks), clk_data); + platform_set_drvdata(pdev, crg); + return 0; +} + +static int hi3519_clk_remove(struct platform_device *pdev) +{ + struct hi3519_crg_data *crg = platform_get_drvdata(pdev); + hisi_reset_exit(crg->rstc); + hi3519_clk_unregister(pdev); return 0; } + static const struct of_device_id hi3519_clk_match_table[] = { { .compatible = "hisilicon,hi3519-crg" }, { } @@ -115,6 +192,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table); static struct platform_driver hi3519_clk_driver = { .probe = hi3519_clk_probe, + .remove = hi3519_clk_remove, .driver = { .name = "hi3519-clk", .of_match_table = hi3519_clk_match_table, @@ -127,5 +205,11 @@ static int __init hi3519_clk_init(void) } core_initcall(hi3519_clk_init); +static void __exit hi3519_clk_exit(void) +{ + platform_driver_unregister(&hi3519_clk_driver); +} +module_exit(hi3519_clk_exit); + MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("HiSilicon Hi3519 Clock Driver"); diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index f02cb41d40a4..fe364e63f8de 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -34,8 +34,8 @@ static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = { { HI6220_PLL_BBP, "bbppll0", NULL, 0, 245760000, }, { HI6220_PLL_GPU, "gpupll", NULL, 0, 1000000000,}, { HI6220_PLL1_DDR, "ddrpll1", NULL, 0, 1066000000,}, - { HI6220_PLL_SYS, "syspll", NULL, 0, 1200000000,}, - { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1200000000,}, + { HI6220_PLL_SYS, "syspll", NULL, 0, 1190400000,}, + { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1190400000,}, { HI6220_DDR_SRC, "ddr_sel_src", NULL, 0, 1200000000,}, { HI6220_PLL_MEDIA, "media_pll", NULL, 0, 1440000000,}, { HI6220_PLL_DDR, "ddrpll0", NULL, 0, 1600000000,}, @@ -68,6 +68,8 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = { { HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, }, { HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, }, { HI6220_UART0_PCLK, "uart0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, }, + { HI6220_RTC0_PCLK, "rtc0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 25, 0, }, + { HI6220_RTC1_PCLK, "rtc1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 26, 0, }, }; static void __init hi6220_clk_ao_init(struct device_node *np) diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c index 9b15adbfc30c..9ba2d91f4d3a 100644 --- a/drivers/clk/hisilicon/clk.c +++ b/drivers/clk/hisilicon/clk.c @@ -37,6 +37,35 @@ static DEFINE_SPINLOCK(hisi_clk_lock); +struct hisi_clock_data *hisi_clk_alloc(struct platform_device *pdev, + int nr_clks) +{ + struct hisi_clock_data *clk_data; + struct resource *res; + struct clk **clk_table; + + clk_data = devm_kmalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return NULL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + clk_data->base = devm_ioremap(&pdev->dev, + res->start, resource_size(res)); + if (!clk_data->base) + return NULL; + + clk_table = devm_kmalloc(&pdev->dev, sizeof(struct clk *) * nr_clks, + GFP_KERNEL); + if (!clk_table) + return NULL; + + clk_data->clk_data.clks = clk_table; + clk_data->clk_data.clk_num = nr_clks; + + return clk_data; +} +EXPORT_SYMBOL_GPL(hisi_clk_alloc); + struct hisi_clock_data *hisi_clk_init(struct device_node *np, int nr_clks) { @@ -73,7 +102,7 @@ err: } EXPORT_SYMBOL_GPL(hisi_clk_init); -void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks, +int hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks, int nums, struct hisi_clock_data *data) { struct clk *clk; @@ -87,14 +116,22 @@ void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks, if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); - continue; + goto err; } data->clk_data.clks[clks[i].id] = clk; } + + return 0; + +err: + while (i--) + clk_unregister_fixed_rate(data->clk_data.clks[clks[i].id]); + + return PTR_ERR(clk); } EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_rate); -void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks, +int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks, int nums, struct hisi_clock_data *data) { @@ -109,14 +146,22 @@ void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks, if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); - continue; + goto err; } data->clk_data.clks[clks[i].id] = clk; } + + return 0; + +err: + while (i--) + clk_unregister_fixed_factor(data->clk_data.clks[clks[i].id]); + + return PTR_ERR(clk); } EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_factor); -void hisi_clk_register_mux(const struct hisi_mux_clock *clks, +int hisi_clk_register_mux(const struct hisi_mux_clock *clks, int nums, struct hisi_clock_data *data) { struct clk *clk; @@ -135,7 +180,7 @@ void hisi_clk_register_mux(const struct hisi_mux_clock *clks, if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); - continue; + goto err; } if (clks[i].alias) @@ -143,10 +188,18 @@ void hisi_clk_register_mux(const struct hisi_mux_clock *clks, data->clk_data.clks[clks[i].id] = clk; } + + return 0; + +err: + while (i--) + clk_unregister_mux(data->clk_data.clks[clks[i].id]); + + return PTR_ERR(clk); } EXPORT_SYMBOL_GPL(hisi_clk_register_mux); -void hisi_clk_register_divider(const struct hisi_divider_clock *clks, +int hisi_clk_register_divider(const struct hisi_divider_clock *clks, int nums, struct hisi_clock_data *data) { struct clk *clk; @@ -165,7 +218,7 @@ void hisi_clk_register_divider(const struct hisi_divider_clock *clks, if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); - continue; + goto err; } if (clks[i].alias) @@ -173,10 +226,18 @@ void hisi_clk_register_divider(const struct hisi_divider_clock *clks, data->clk_data.clks[clks[i].id] = clk; } + + return 0; + +err: + while (i--) + clk_unregister_divider(data->clk_data.clks[clks[i].id]); + + return PTR_ERR(clk); } EXPORT_SYMBOL_GPL(hisi_clk_register_divider); -void hisi_clk_register_gate(const struct hisi_gate_clock *clks, +int hisi_clk_register_gate(const struct hisi_gate_clock *clks, int nums, struct hisi_clock_data *data) { struct clk *clk; @@ -194,7 +255,7 @@ void hisi_clk_register_gate(const struct hisi_gate_clock *clks, if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); - continue; + goto err; } if (clks[i].alias) @@ -202,6 +263,14 @@ void hisi_clk_register_gate(const struct hisi_gate_clock *clks, data->clk_data.clks[clks[i].id] = clk; } + + return 0; + +err: + while (i--) + clk_unregister_gate(data->clk_data.clks[clks[i].id]); + + return PTR_ERR(clk); } EXPORT_SYMBOL_GPL(hisi_clk_register_gate); diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h index 20d64afe4ad8..4e1d1affc6f5 100644 --- a/drivers/clk/hisilicon/clk.h +++ b/drivers/clk/hisilicon/clk.h @@ -30,6 +30,8 @@ #include <linux/io.h> #include <linux/spinlock.h> +struct platform_device; + struct hisi_clock_data { struct clk_onecell_data clk_data; void __iomem *base; @@ -110,19 +112,41 @@ struct clk *hi6220_register_clkdiv(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u32 mask_bit, spinlock_t *lock); +struct hisi_clock_data *hisi_clk_alloc(struct platform_device *, int); struct hisi_clock_data *hisi_clk_init(struct device_node *, int); -void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *, +int hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *, int, struct hisi_clock_data *); -void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *, +int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *, int, struct hisi_clock_data *); -void hisi_clk_register_mux(const struct hisi_mux_clock *, int, +int hisi_clk_register_mux(const struct hisi_mux_clock *, int, struct hisi_clock_data *); -void hisi_clk_register_divider(const struct hisi_divider_clock *, +int hisi_clk_register_divider(const struct hisi_divider_clock *, int, struct hisi_clock_data *); -void hisi_clk_register_gate(const struct hisi_gate_clock *, +int hisi_clk_register_gate(const struct hisi_gate_clock *, int, struct hisi_clock_data *); void hisi_clk_register_gate_sep(const struct hisi_gate_clock *, int, struct hisi_clock_data *); void hi6220_clk_register_divider(const struct hi6220_divider_clock *, int, struct hisi_clock_data *); + +#define hisi_clk_unregister(type) \ +static inline \ +void hisi_clk_unregister_##type(const struct hisi_##type##_clock *clks, \ + int nums, struct hisi_clock_data *data) \ +{ \ + struct clk **clocks = data->clk_data.clks; \ + int i; \ + for (i = 0; i < nums; i++) { \ + int id = clks[i].id; \ + if (clocks[id]) \ + clk_unregister_##type(clocks[id]); \ + } \ +} + +hisi_clk_unregister(fixed_rate) +hisi_clk_unregister(fixed_factor) +hisi_clk_unregister(mux) +hisi_clk_unregister(divider) +hisi_clk_unregister(gate) + #endif /* __HISI_CLK_H */ diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c index 113eee8ed23a..a1c1f684ad58 100644 --- a/drivers/clk/hisilicon/clkdivider-hi6220.c +++ b/drivers/clk/hisilicon/clkdivider-hi6220.c @@ -18,6 +18,8 @@ #include <linux/err.h> #include <linux/spinlock.h> +#include "clk.h" + #define div_mask(width) ((1 << (width)) - 1) /** diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c index 6aa49c2204d0..2a5015c736ce 100644 --- a/drivers/clk/hisilicon/reset.c +++ b/drivers/clk/hisilicon/reset.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -98,25 +99,25 @@ static const struct reset_control_ops hisi_reset_ops = { .deassert = hisi_reset_deassert, }; -struct hisi_reset_controller *hisi_reset_init(struct device_node *np) +struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) { struct hisi_reset_controller *rstc; + struct resource *res; - rstc = kzalloc(sizeof(*rstc), GFP_KERNEL); + rstc = devm_kmalloc(&pdev->dev, sizeof(*rstc), GFP_KERNEL); if (!rstc) return NULL; - rstc->membase = of_iomap(np, 0); - if (!rstc->membase) { - kfree(rstc); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rstc->membase = devm_ioremap(&pdev->dev, + res->start, resource_size(res)); + if (!rstc->membase) return NULL; - } spin_lock_init(&rstc->lock); - rstc->rcdev.owner = THIS_MODULE; rstc->rcdev.ops = &hisi_reset_ops; - rstc->rcdev.of_node = np; + rstc->rcdev.of_node = pdev->dev.of_node; rstc->rcdev.of_reset_n_cells = 2; rstc->rcdev.of_xlate = hisi_reset_of_xlate; reset_controller_register(&rstc->rcdev); @@ -128,7 +129,5 @@ EXPORT_SYMBOL_GPL(hisi_reset_init); void hisi_reset_exit(struct hisi_reset_controller *rstc) { reset_controller_unregister(&rstc->rcdev); - iounmap(rstc->membase); - kfree(rstc); } EXPORT_SYMBOL_GPL(hisi_reset_exit); diff --git a/drivers/clk/hisilicon/reset.h b/drivers/clk/hisilicon/reset.h index 677d773ed27c..9a69374a0b32 100644 --- a/drivers/clk/hisilicon/reset.h +++ b/drivers/clk/hisilicon/reset.h @@ -22,10 +22,11 @@ struct device_node; struct hisi_reset_controller; #ifdef CONFIG_RESET_CONTROLLER -struct hisi_reset_controller *hisi_reset_init(struct device_node *np); +struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev); void hisi_reset_exit(struct hisi_reset_controller *rstc); #else -static inline hisi_reset_controller *hisi_reset_init(struct device_node *np) +static inline +struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) { return 0; } diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 2beb396fe652..ba1c1ae72ac2 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -192,13 +192,13 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clk[IMX6QDL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); - clk[IMX6QDL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); - clk[IMX6QDL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); - clk[IMX6QDL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); - clk[IMX6QDL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); - clk[IMX6QDL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); - clk[IMX6QDL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + clk[IMX6QDL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + clk[IMX6QDL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + clk[IMX6QDL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + clk[IMX6QDL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + clk[IMX6QDL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + clk[IMX6QDL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + clk[IMX6QDL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); clk[IMX6QDL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); clk[IMX6QDL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index 1be6230a07af..5fd4ddac1bf1 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -218,13 +218,13 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) clks[IMX6SL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clks[IMX6SL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); - clks[IMX6SL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); - clks[IMX6SL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); - clks[IMX6SL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); - clks[IMX6SL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); - clks[IMX6SL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); - clks[IMX6SL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + clks[IMX6SL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + clks[IMX6SL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + clks[IMX6SL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + clks[IMX6SL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + clks[IMX6SL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + clks[IMX6SL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + clks[IMX6SL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); clks[IMX6SL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index 97e742a8be17..b5c96de41ccf 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -174,13 +174,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) clks[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); - clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); - clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); - clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); - clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); - clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); - clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); clks[IMX6SX_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 0f1f17a8f3ed..d1d7787ce211 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -130,13 +130,13 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); - clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); - clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); - clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); - clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); - clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); - clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); @@ -305,8 +305,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16); clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20); - clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24); - clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x68, 26); + clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt2_bus", "perclk", base + 0x68, 24); + clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26); clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28); clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28); clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 522996800d5b..6ed4f8fa0667 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -65,7 +65,7 @@ static const char *dram_phym_sel[] = { "pll_dram_main_clk", "dram_phym_alt_clk", }; static const char *dram_sel[] = { "pll_dram_main_clk", - "dram_alt_clk", }; + "dram_alt_root_clk", }; static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk", "pll_sys_main_clk", "pll_enet_500m_clk", @@ -361,6 +361,14 @@ static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", }; static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", }; +static int const clks_init_on[] __initconst = { + IMX7D_ARM_A7_ROOT_CLK, IMX7D_MAIN_AXI_ROOT_CLK, + IMX7D_PLL_SYS_MAIN_480M_CLK, IMX7D_NAND_USDHC_BUS_ROOT_CLK, + IMX7D_DRAM_PHYM_ROOT_CLK, IMX7D_DRAM_ROOT_CLK, + IMX7D_DRAM_PHYM_ALT_ROOT_CLK, IMX7D_DRAM_ALT_ROOT_CLK, + IMX7D_AHB_CHANNEL_ROOT_CLK, +}; + static struct clk_onecell_data clk_data; static struct clk ** const uart_clks[] __initconst = { @@ -395,12 +403,12 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_PLL_AUDIO_MAIN_SRC] = imx_clk_mux("pll_audio_main_src", base + 0xf0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); clks[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "pll_arm_main_src", base + 0x60, 0x7f); - clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_dram_main", "pll_dram_main_src", base + 0x70, 0x7f); - clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "pll_sys_main_src", base + 0xb0, 0x1); - clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "pll_enet_main_src", base + 0xe0, 0x0); - clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "pll_audio_main_src", base + 0xf0, 0x7f); - clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "pll_video_main_src", base + 0x130, 0x7f); + clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f); + clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_dram_main", "osc", base + 0x70, 0x7f); + clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1); + clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0); + clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f); + clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "osc", base + 0x130, 0x7f); clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT); clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT); @@ -474,363 +482,363 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) base = of_iomap(np, 0); WARN_ON(!base); - clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel)); - clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel)); - clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel)); - clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel)); - clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel)); - clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel)); - clks[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_mux("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel)); - clks[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_mux("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel)); - clks[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_mux("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel)); - clks[IMX7D_DRAM_ROOT_SRC] = imx_clk_mux("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel)); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_mux("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel)); - clks[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_mux("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel)); - clks[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_mux("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel)); - clks[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_mux("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel)); - clks[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_mux("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel)); - clks[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_mux("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel)); - clks[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_mux("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel)); - clks[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_mux("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel)); - clks[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_mux("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel)); - clks[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_mux("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel)); - clks[IMX7D_SAI1_ROOT_SRC] = imx_clk_mux("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel)); - clks[IMX7D_SAI2_ROOT_SRC] = imx_clk_mux("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel)); - clks[IMX7D_SAI3_ROOT_SRC] = imx_clk_mux("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel)); - clks[IMX7D_SPDIF_ROOT_SRC] = imx_clk_mux("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel)); - clks[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_mux("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel)); - clks[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_mux("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel)); - clks[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_mux("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel)); - clks[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_mux("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel)); - clks[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_mux("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel)); - clks[IMX7D_EIM_ROOT_SRC] = imx_clk_mux("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel)); - clks[IMX7D_NAND_ROOT_SRC] = imx_clk_mux("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel)); - clks[IMX7D_QSPI_ROOT_SRC] = imx_clk_mux("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel)); - clks[IMX7D_USDHC1_ROOT_SRC] = imx_clk_mux("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel)); - clks[IMX7D_USDHC2_ROOT_SRC] = imx_clk_mux("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel)); - clks[IMX7D_USDHC3_ROOT_SRC] = imx_clk_mux("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel)); - clks[IMX7D_CAN1_ROOT_SRC] = imx_clk_mux("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel)); - clks[IMX7D_CAN2_ROOT_SRC] = imx_clk_mux("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel)); - clks[IMX7D_I2C1_ROOT_SRC] = imx_clk_mux("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel)); - clks[IMX7D_I2C2_ROOT_SRC] = imx_clk_mux("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel)); - clks[IMX7D_I2C3_ROOT_SRC] = imx_clk_mux("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel)); - clks[IMX7D_I2C4_ROOT_SRC] = imx_clk_mux("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel)); - clks[IMX7D_UART1_ROOT_SRC] = imx_clk_mux("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel)); - clks[IMX7D_UART2_ROOT_SRC] = imx_clk_mux("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel)); - clks[IMX7D_UART3_ROOT_SRC] = imx_clk_mux("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel)); - clks[IMX7D_UART4_ROOT_SRC] = imx_clk_mux("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel)); - clks[IMX7D_UART5_ROOT_SRC] = imx_clk_mux("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel)); - clks[IMX7D_UART6_ROOT_SRC] = imx_clk_mux("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel)); - clks[IMX7D_UART7_ROOT_SRC] = imx_clk_mux("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel)); - clks[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_mux("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel)); - clks[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_mux("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel)); - clks[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_mux("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel)); - clks[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_mux("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel)); - clks[IMX7D_PWM1_ROOT_SRC] = imx_clk_mux("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel)); - clks[IMX7D_PWM2_ROOT_SRC] = imx_clk_mux("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel)); - clks[IMX7D_PWM3_ROOT_SRC] = imx_clk_mux("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel)); - clks[IMX7D_PWM4_ROOT_SRC] = imx_clk_mux("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel)); - clks[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_mux("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel)); - clks[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_mux("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel)); - clks[IMX7D_SIM1_ROOT_SRC] = imx_clk_mux("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel)); - clks[IMX7D_SIM2_ROOT_SRC] = imx_clk_mux("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel)); - clks[IMX7D_GPT1_ROOT_SRC] = imx_clk_mux("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel)); - clks[IMX7D_GPT2_ROOT_SRC] = imx_clk_mux("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel)); - clks[IMX7D_GPT3_ROOT_SRC] = imx_clk_mux("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel)); - clks[IMX7D_GPT4_ROOT_SRC] = imx_clk_mux("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel)); - clks[IMX7D_TRACE_ROOT_SRC] = imx_clk_mux("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel)); - clks[IMX7D_WDOG_ROOT_SRC] = imx_clk_mux("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel)); - clks[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_mux("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel)); - clks[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_mux("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel)); - clks[IMX7D_WRCLK_ROOT_SRC] = imx_clk_mux("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel)); - clks[IMX7D_CLKO1_ROOT_SRC] = imx_clk_mux("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel)); - clks[IMX7D_CLKO2_ROOT_SRC] = imx_clk_mux("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel)); - - clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate("arm_a7_cg", "arm_a7_src", base + 0x8000, 28); - clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); - clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate("arm_m0_cg", "arm_m0_src", base + 0x8100, 28); - clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate("axi_cg", "axi_src", base + 0x8800, 28); - clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate("disp_axi_cg", "disp_axi_src", base + 0x8880, 28); - clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate("enet_axi_cg", "enet_axi_src", base + 0x8900, 28); - clks[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_gate("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28); - clks[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_gate("ahb_cg", "ahb_src", base + 0x9000, 28); - clks[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_gate("dram_phym_cg", "dram_phym_src", base + 0x9800, 28); - clks[IMX7D_DRAM_ROOT_CG] = imx_clk_gate("dram_cg", "dram_src", base + 0x9880, 28); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_gate("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28); - clks[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_gate("dram_alt_cg", "dram_alt_src", base + 0xa080, 28); - clks[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_gate("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28); - clks[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_gate("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28); - clks[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_gate("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28); - clks[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_gate("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28); - clks[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_gate("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28); - clks[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_gate("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28); - clks[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_gate("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28); - clks[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_gate("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28); - clks[IMX7D_SAI1_ROOT_CG] = imx_clk_gate("sai1_cg", "sai1_src", base + 0xa500, 28); - clks[IMX7D_SAI2_ROOT_CG] = imx_clk_gate("sai2_cg", "sai2_src", base + 0xa580, 28); - clks[IMX7D_SAI3_ROOT_CG] = imx_clk_gate("sai3_cg", "sai3_src", base + 0xa600, 28); - clks[IMX7D_SPDIF_ROOT_CG] = imx_clk_gate("spdif_cg", "spdif_src", base + 0xa680, 28); - clks[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_gate("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28); - clks[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_gate("enet1_time_cg", "enet1_time_src", base + 0xa780, 28); - clks[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_gate("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28); - clks[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_gate("enet2_time_cg", "enet2_time_src", base + 0xa880, 28); - clks[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_gate("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28); - clks[IMX7D_EIM_ROOT_CG] = imx_clk_gate("eim_cg", "eim_src", base + 0xa980, 28); - clks[IMX7D_NAND_ROOT_CG] = imx_clk_gate("nand_cg", "nand_src", base + 0xaa00, 28); - clks[IMX7D_QSPI_ROOT_CG] = imx_clk_gate("qspi_cg", "qspi_src", base + 0xaa80, 28); - clks[IMX7D_USDHC1_ROOT_CG] = imx_clk_gate("usdhc1_cg", "usdhc1_src", base + 0xab00, 28); - clks[IMX7D_USDHC2_ROOT_CG] = imx_clk_gate("usdhc2_cg", "usdhc2_src", base + 0xab80, 28); - clks[IMX7D_USDHC3_ROOT_CG] = imx_clk_gate("usdhc3_cg", "usdhc3_src", base + 0xac00, 28); - clks[IMX7D_CAN1_ROOT_CG] = imx_clk_gate("can1_cg", "can1_src", base + 0xac80, 28); - clks[IMX7D_CAN2_ROOT_CG] = imx_clk_gate("can2_cg", "can2_src", base + 0xad00, 28); - clks[IMX7D_I2C1_ROOT_CG] = imx_clk_gate("i2c1_cg", "i2c1_src", base + 0xad80, 28); - clks[IMX7D_I2C2_ROOT_CG] = imx_clk_gate("i2c2_cg", "i2c2_src", base + 0xae00, 28); - clks[IMX7D_I2C3_ROOT_CG] = imx_clk_gate("i2c3_cg", "i2c3_src", base + 0xae80, 28); - clks[IMX7D_I2C4_ROOT_CG] = imx_clk_gate("i2c4_cg", "i2c4_src", base + 0xaf00, 28); - clks[IMX7D_UART1_ROOT_CG] = imx_clk_gate("uart1_cg", "uart1_src", base + 0xaf80, 28); - clks[IMX7D_UART2_ROOT_CG] = imx_clk_gate("uart2_cg", "uart2_src", base + 0xb000, 28); - clks[IMX7D_UART3_ROOT_CG] = imx_clk_gate("uart3_cg", "uart3_src", base + 0xb080, 28); - clks[IMX7D_UART4_ROOT_CG] = imx_clk_gate("uart4_cg", "uart4_src", base + 0xb100, 28); - clks[IMX7D_UART5_ROOT_CG] = imx_clk_gate("uart5_cg", "uart5_src", base + 0xb180, 28); - clks[IMX7D_UART6_ROOT_CG] = imx_clk_gate("uart6_cg", "uart6_src", base + 0xb200, 28); - clks[IMX7D_UART7_ROOT_CG] = imx_clk_gate("uart7_cg", "uart7_src", base + 0xb280, 28); - clks[IMX7D_ECSPI1_ROOT_CG] = imx_clk_gate("ecspi1_cg", "ecspi1_src", base + 0xb300, 28); - clks[IMX7D_ECSPI2_ROOT_CG] = imx_clk_gate("ecspi2_cg", "ecspi2_src", base + 0xb380, 28); - clks[IMX7D_ECSPI3_ROOT_CG] = imx_clk_gate("ecspi3_cg", "ecspi3_src", base + 0xb400, 28); - clks[IMX7D_ECSPI4_ROOT_CG] = imx_clk_gate("ecspi4_cg", "ecspi4_src", base + 0xb480, 28); - clks[IMX7D_PWM1_ROOT_CG] = imx_clk_gate("pwm1_cg", "pwm1_src", base + 0xb500, 28); - clks[IMX7D_PWM2_ROOT_CG] = imx_clk_gate("pwm2_cg", "pwm2_src", base + 0xb580, 28); - clks[IMX7D_PWM3_ROOT_CG] = imx_clk_gate("pwm3_cg", "pwm3_src", base + 0xb600, 28); - clks[IMX7D_PWM4_ROOT_CG] = imx_clk_gate("pwm4_cg", "pwm4_src", base + 0xb680, 28); - clks[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_gate("flextimer1_cg", "flextimer1_src", base + 0xb700, 28); - clks[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_gate("flextimer2_cg", "flextimer2_src", base + 0xb780, 28); - clks[IMX7D_SIM1_ROOT_CG] = imx_clk_gate("sim1_cg", "sim1_src", base + 0xb800, 28); - clks[IMX7D_SIM2_ROOT_CG] = imx_clk_gate("sim2_cg", "sim2_src", base + 0xb880, 28); - clks[IMX7D_GPT1_ROOT_CG] = imx_clk_gate("gpt1_cg", "gpt1_src", base + 0xb900, 28); - clks[IMX7D_GPT2_ROOT_CG] = imx_clk_gate("gpt2_cg", "gpt2_src", base + 0xb980, 28); - clks[IMX7D_GPT3_ROOT_CG] = imx_clk_gate("gpt3_cg", "gpt3_src", base + 0xbA00, 28); - clks[IMX7D_GPT4_ROOT_CG] = imx_clk_gate("gpt4_cg", "gpt4_src", base + 0xbA80, 28); - clks[IMX7D_TRACE_ROOT_CG] = imx_clk_gate("trace_cg", "trace_src", base + 0xbb00, 28); - clks[IMX7D_WDOG_ROOT_CG] = imx_clk_gate("wdog_cg", "wdog_src", base + 0xbb80, 28); - clks[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_gate("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28); - clks[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_gate("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28); - clks[IMX7D_WRCLK_ROOT_CG] = imx_clk_gate("wrclk_cg", "wrclk_src", base + 0xbd00, 28); - clks[IMX7D_CLKO1_ROOT_CG] = imx_clk_gate("clko1_cg", "clko1_src", base + 0xbd80, 28); - clks[IMX7D_CLKO2_ROOT_CG] = imx_clk_gate("clko2_cg", "clko2_src", base + 0xbe00, 28); - - clks[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_divider("axi_pre_div", "axi_cg", base + 0x8800, 16, 3); - clks[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_divider("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3); - clks[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_divider("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3); - clks[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_divider("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3); - clks[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_divider("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_divider("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3); - clks[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_divider("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3); - clks[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_divider("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3); - clks[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_divider("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3); - clks[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_divider("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3); - clks[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_divider("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3); - clks[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_divider("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3); - clks[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_divider("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3); - clks[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_divider("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3); - clks[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_divider("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3); - clks[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_divider("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3); - clks[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_divider("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3); - clks[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_divider("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3); - clks[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_divider("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3); - clks[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_divider("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3); - clks[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_divider("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3); - clks[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_divider("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3); - clks[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_divider("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3); - clks[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_divider("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3); - clks[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_divider("eim_pre_div", "eim_cg", base + 0xa980, 16, 3); - clks[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_divider("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3); - clks[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_divider("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3); - clks[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_divider("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3); - clks[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_divider("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3); - clks[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_divider("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3); - clks[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_divider("can1_pre_div", "can1_cg", base + 0xac80, 16, 3); - clks[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_divider("can2_pre_div", "can2_cg", base + 0xad00, 16, 3); - clks[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_divider("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3); - clks[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_divider("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3); - clks[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_divider("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3); - clks[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_divider("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3); - clks[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_divider("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3); - clks[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_divider("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3); - clks[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_divider("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3); - clks[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_divider("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3); - clks[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_divider("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3); - clks[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_divider("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3); - clks[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_divider("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3); - clks[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_divider("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3); - clks[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_divider("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3); - clks[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_divider("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3); - clks[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_divider("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3); - clks[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_divider("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3); - clks[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_divider("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3); - clks[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_divider("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3); - clks[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_divider("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3); - clks[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_divider("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3); - clks[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_divider("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3); - clks[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_divider("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3); - clks[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_divider("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3); - clks[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_divider("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3); - clks[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_divider("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3); - clks[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_divider("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3); - clks[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_divider("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3); - clks[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_divider("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3); - clks[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_divider("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3); - clks[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_divider("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3); - clks[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_divider("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3); - clks[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_divider("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3); - clks[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_divider("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3); - clks[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_divider("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3); - - clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3); - clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); - clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3); - clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6); - clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6); - clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); - clks[IMX7D_NAND_USDHC_BUS_ROOT_DIV] = imx_clk_divider("nand_usdhc_post_div", "nand_usdhc_pre_div", base + 0x8980, 0, 6); - clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider("ahb_post_div", "ahb_pre_div", base + 0x9000, 0, 6); - clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider("dram_post_div", "dram_cg", base + 0x9880, 0, 3); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); - clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); - clks[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_divider("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6); - clks[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_divider("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6); - clks[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_divider("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6); - clks[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_divider("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6); - clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6); - clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6); - clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6); - clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider("mipi_dphy_post_div", "mipi_csi_dphy_div", base + 0xa480, 0, 6); - clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6); - clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6); - clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6); - clks[IMX7D_SPDIF_ROOT_DIV] = imx_clk_divider("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6); - clks[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_divider("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6); - clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6); - clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6); - clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6); - clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6); - clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6); - clks[IMX7D_NAND_ROOT_DIV] = imx_clk_divider("nand_post_div", "nand_pre_div", base + 0xaa00, 0, 6); - clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6); - clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6); - clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6); - clks[IMX7D_USDHC3_ROOT_DIV] = imx_clk_divider("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6); - clks[IMX7D_CAN1_ROOT_DIV] = imx_clk_divider("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6); - clks[IMX7D_CAN2_ROOT_DIV] = imx_clk_divider("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6); - clks[IMX7D_I2C1_ROOT_DIV] = imx_clk_divider("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6); - clks[IMX7D_I2C2_ROOT_DIV] = imx_clk_divider("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6); - clks[IMX7D_I2C3_ROOT_DIV] = imx_clk_divider("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6); - clks[IMX7D_I2C4_ROOT_DIV] = imx_clk_divider("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6); - clks[IMX7D_UART1_ROOT_DIV] = imx_clk_divider("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6); - clks[IMX7D_UART2_ROOT_DIV] = imx_clk_divider("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6); - clks[IMX7D_UART3_ROOT_DIV] = imx_clk_divider("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6); - clks[IMX7D_UART4_ROOT_DIV] = imx_clk_divider("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6); - clks[IMX7D_UART5_ROOT_DIV] = imx_clk_divider("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6); - clks[IMX7D_UART6_ROOT_DIV] = imx_clk_divider("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6); - clks[IMX7D_UART7_ROOT_DIV] = imx_clk_divider("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6); - clks[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_divider("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6); - clks[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_divider("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6); - clks[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_divider("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6); - clks[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_divider("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6); - clks[IMX7D_PWM1_ROOT_DIV] = imx_clk_divider("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6); - clks[IMX7D_PWM2_ROOT_DIV] = imx_clk_divider("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6); - clks[IMX7D_PWM3_ROOT_DIV] = imx_clk_divider("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6); - clks[IMX7D_PWM4_ROOT_DIV] = imx_clk_divider("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6); - clks[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_divider("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6); - clks[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_divider("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6); - clks[IMX7D_SIM1_ROOT_DIV] = imx_clk_divider("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6); - clks[IMX7D_SIM2_ROOT_DIV] = imx_clk_divider("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6); - clks[IMX7D_GPT1_ROOT_DIV] = imx_clk_divider("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6); - clks[IMX7D_GPT2_ROOT_DIV] = imx_clk_divider("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6); - clks[IMX7D_GPT3_ROOT_DIV] = imx_clk_divider("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6); - clks[IMX7D_GPT4_ROOT_DIV] = imx_clk_divider("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6); - clks[IMX7D_TRACE_ROOT_DIV] = imx_clk_divider("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6); - clks[IMX7D_WDOG_ROOT_DIV] = imx_clk_divider("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6); - clks[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_divider("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6); - clks[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_divider("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6); - clks[IMX7D_WRCLK_ROOT_DIV] = imx_clk_divider("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6); - clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6); - clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6); - - clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate2("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0); - clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate2("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0); - clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate2("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0); - clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate2("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); - clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate2("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); - clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate2("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); - clks[IMX7D_OCRAM_CLK] = imx_clk_gate2("ocram_clk", "axi_post_div", base + 0x4110, 0); - clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate2("ocram_s_clk", "ahb_post_div", base + 0x4120, 0); - clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_gate2("nand_usdhc_root_clk", "nand_usdhc_post_div", base + 0x4130, 0); - clks[IMX7D_AHB_CHANNEL_ROOT_CLK] = imx_clk_gate2("ahb_root_clk", "ahb_post_div", base + 0x4200, 0); - clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate2("dram_root_clk", "dram_post_div", base + 0x4130, 0); - clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate2("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate2("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0); - clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate2("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0); - clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate2("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0); - clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate2("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0); - clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate2("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0); - clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate2("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0); - clks[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_gate2("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0); - clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate2("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0); - clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate2("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0); - clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate2("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0); - clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0); - clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0); - clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0); - clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate2("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0); - clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate2("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0); - clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate2("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0); - clks[IMX7D_ENET2_REF_ROOT_CLK] = imx_clk_gate2("enet2_ref_root_clk", "enet2_ref_post_div", base + 0x4500, 0); - clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0); - clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate2("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0); - clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate2("eim_root_clk", "eim_post_div", base + 0x4160, 0); - clks[IMX7D_NAND_ROOT_CLK] = imx_clk_gate2("nand_root_clk", "nand_post_div", base + 0x4140, 0); - clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate2("qspi_root_clk", "qspi_post_div", base + 0x4150, 0); - clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate2("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0); - clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate2("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0); - clks[IMX7D_USDHC3_ROOT_CLK] = imx_clk_gate2("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0); - clks[IMX7D_CAN1_ROOT_CLK] = imx_clk_gate2("can1_root_clk", "can1_post_div", base + 0x4740, 0); - clks[IMX7D_CAN2_ROOT_CLK] = imx_clk_gate2("can2_root_clk", "can2_post_div", base + 0x4750, 0); - clks[IMX7D_I2C1_ROOT_CLK] = imx_clk_gate2("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0); - clks[IMX7D_I2C2_ROOT_CLK] = imx_clk_gate2("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0); - clks[IMX7D_I2C3_ROOT_CLK] = imx_clk_gate2("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0); - clks[IMX7D_I2C4_ROOT_CLK] = imx_clk_gate2("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0); - clks[IMX7D_UART1_ROOT_CLK] = imx_clk_gate2("uart1_root_clk", "uart1_post_div", base + 0x4940, 0); - clks[IMX7D_UART2_ROOT_CLK] = imx_clk_gate2("uart2_root_clk", "uart2_post_div", base + 0x4950, 0); - clks[IMX7D_UART3_ROOT_CLK] = imx_clk_gate2("uart3_root_clk", "uart3_post_div", base + 0x4960, 0); - clks[IMX7D_UART4_ROOT_CLK] = imx_clk_gate2("uart4_root_clk", "uart4_post_div", base + 0x4970, 0); - clks[IMX7D_UART5_ROOT_CLK] = imx_clk_gate2("uart5_root_clk", "uart5_post_div", base + 0x4980, 0); - clks[IMX7D_UART6_ROOT_CLK] = imx_clk_gate2("uart6_root_clk", "uart6_post_div", base + 0x4990, 0); - clks[IMX7D_UART7_ROOT_CLK] = imx_clk_gate2("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0); - clks[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_gate2("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0); - clks[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_gate2("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0); - clks[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_gate2("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0); - clks[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_gate2("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0); - clks[IMX7D_PWM1_ROOT_CLK] = imx_clk_gate2("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0); - clks[IMX7D_PWM2_ROOT_CLK] = imx_clk_gate2("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0); - clks[IMX7D_PWM3_ROOT_CLK] = imx_clk_gate2("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0); - clks[IMX7D_PWM4_ROOT_CLK] = imx_clk_gate2("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0); - clks[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_gate2("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0); - clks[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_gate2("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0); - clks[IMX7D_SIM1_ROOT_CLK] = imx_clk_gate2("sim1_root_clk", "sim1_post_div", base + 0x4900, 0); - clks[IMX7D_SIM2_ROOT_CLK] = imx_clk_gate2("sim2_root_clk", "sim2_post_div", base + 0x4910, 0); - clks[IMX7D_GPT1_ROOT_CLK] = imx_clk_gate2("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0); - clks[IMX7D_GPT2_ROOT_CLK] = imx_clk_gate2("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0); - clks[IMX7D_GPT3_ROOT_CLK] = imx_clk_gate2("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0); - clks[IMX7D_GPT4_ROOT_CLK] = imx_clk_gate2("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0); - clks[IMX7D_TRACE_ROOT_CLK] = imx_clk_gate2("trace_root_clk", "trace_post_div", base + 0x4300, 0); - clks[IMX7D_WDOG1_ROOT_CLK] = imx_clk_gate2("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0); - clks[IMX7D_WDOG2_ROOT_CLK] = imx_clk_gate2("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0); - clks[IMX7D_WDOG3_ROOT_CLK] = imx_clk_gate2("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0); - clks[IMX7D_WDOG4_ROOT_CLK] = imx_clk_gate2("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); - clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate2("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); - clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate2("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); - clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate2("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); - clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate2("adc_root_clk", "ipg_root_clk", base + 0x4200, 0); + clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel)); + clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel)); + clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux2("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel)); + clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel)); + clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel)); + clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel)); + clks[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_mux2("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel)); + clks[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel)); + clks[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_mux2("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel)); + clks[IMX7D_DRAM_ROOT_SRC] = imx_clk_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel)); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_mux2("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel)); + clks[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel)); + clks[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_mux2("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel)); + clks[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_mux2("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel)); + clks[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_mux2("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel)); + clks[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_mux2("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel)); + clks[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_mux2("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel)); + clks[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_mux2("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel)); + clks[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_mux2("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel)); + clks[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_mux2("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel)); + clks[IMX7D_SAI1_ROOT_SRC] = imx_clk_mux2("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel)); + clks[IMX7D_SAI2_ROOT_SRC] = imx_clk_mux2("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel)); + clks[IMX7D_SAI3_ROOT_SRC] = imx_clk_mux2("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel)); + clks[IMX7D_SPDIF_ROOT_SRC] = imx_clk_mux2("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel)); + clks[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_mux2("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel)); + clks[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_mux2("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel)); + clks[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_mux2("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel)); + clks[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_mux2("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel)); + clks[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_mux2("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel)); + clks[IMX7D_EIM_ROOT_SRC] = imx_clk_mux2("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel)); + clks[IMX7D_NAND_ROOT_SRC] = imx_clk_mux2("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel)); + clks[IMX7D_QSPI_ROOT_SRC] = imx_clk_mux2("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel)); + clks[IMX7D_USDHC1_ROOT_SRC] = imx_clk_mux2("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel)); + clks[IMX7D_USDHC2_ROOT_SRC] = imx_clk_mux2("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel)); + clks[IMX7D_USDHC3_ROOT_SRC] = imx_clk_mux2("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel)); + clks[IMX7D_CAN1_ROOT_SRC] = imx_clk_mux2("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel)); + clks[IMX7D_CAN2_ROOT_SRC] = imx_clk_mux2("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel)); + clks[IMX7D_I2C1_ROOT_SRC] = imx_clk_mux2("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel)); + clks[IMX7D_I2C2_ROOT_SRC] = imx_clk_mux2("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel)); + clks[IMX7D_I2C3_ROOT_SRC] = imx_clk_mux2("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel)); + clks[IMX7D_I2C4_ROOT_SRC] = imx_clk_mux2("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel)); + clks[IMX7D_UART1_ROOT_SRC] = imx_clk_mux2("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel)); + clks[IMX7D_UART2_ROOT_SRC] = imx_clk_mux2("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel)); + clks[IMX7D_UART3_ROOT_SRC] = imx_clk_mux2("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel)); + clks[IMX7D_UART4_ROOT_SRC] = imx_clk_mux2("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel)); + clks[IMX7D_UART5_ROOT_SRC] = imx_clk_mux2("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel)); + clks[IMX7D_UART6_ROOT_SRC] = imx_clk_mux2("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel)); + clks[IMX7D_UART7_ROOT_SRC] = imx_clk_mux2("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel)); + clks[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_mux2("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel)); + clks[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_mux2("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel)); + clks[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_mux2("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel)); + clks[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_mux2("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel)); + clks[IMX7D_PWM1_ROOT_SRC] = imx_clk_mux2("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel)); + clks[IMX7D_PWM2_ROOT_SRC] = imx_clk_mux2("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel)); + clks[IMX7D_PWM3_ROOT_SRC] = imx_clk_mux2("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel)); + clks[IMX7D_PWM4_ROOT_SRC] = imx_clk_mux2("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel)); + clks[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_mux2("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel)); + clks[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_mux2("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel)); + clks[IMX7D_SIM1_ROOT_SRC] = imx_clk_mux2("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel)); + clks[IMX7D_SIM2_ROOT_SRC] = imx_clk_mux2("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel)); + clks[IMX7D_GPT1_ROOT_SRC] = imx_clk_mux2("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel)); + clks[IMX7D_GPT2_ROOT_SRC] = imx_clk_mux2("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel)); + clks[IMX7D_GPT3_ROOT_SRC] = imx_clk_mux2("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel)); + clks[IMX7D_GPT4_ROOT_SRC] = imx_clk_mux2("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel)); + clks[IMX7D_TRACE_ROOT_SRC] = imx_clk_mux2("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel)); + clks[IMX7D_WDOG_ROOT_SRC] = imx_clk_mux2("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel)); + clks[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_mux2("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel)); + clks[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_mux2("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel)); + clks[IMX7D_WRCLK_ROOT_SRC] = imx_clk_mux2("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel)); + clks[IMX7D_CLKO1_ROOT_SRC] = imx_clk_mux2("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel)); + clks[IMX7D_CLKO2_ROOT_SRC] = imx_clk_mux2("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel)); + + clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28); + clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); + clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate3("arm_m0_cg", "arm_m0_src", base + 0x8100, 28); + clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28); + clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28); + clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28); + clks[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_gate3("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28); + clks[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_gate3("ahb_cg", "ahb_src", base + 0x9000, 28); + clks[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_gate3("dram_phym_cg", "dram_phym_src", base + 0x9800, 28); + clks[IMX7D_DRAM_ROOT_CG] = imx_clk_gate3("dram_cg", "dram_src", base + 0x9880, 28); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_gate3("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28); + clks[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_gate3("dram_alt_cg", "dram_alt_src", base + 0xa080, 28); + clks[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_gate3("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28); + clks[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_gate3("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28); + clks[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_gate3("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28); + clks[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_gate3("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28); + clks[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_gate3("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28); + clks[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_gate3("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28); + clks[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_gate3("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28); + clks[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_gate3("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28); + clks[IMX7D_SAI1_ROOT_CG] = imx_clk_gate3("sai1_cg", "sai1_src", base + 0xa500, 28); + clks[IMX7D_SAI2_ROOT_CG] = imx_clk_gate3("sai2_cg", "sai2_src", base + 0xa580, 28); + clks[IMX7D_SAI3_ROOT_CG] = imx_clk_gate3("sai3_cg", "sai3_src", base + 0xa600, 28); + clks[IMX7D_SPDIF_ROOT_CG] = imx_clk_gate3("spdif_cg", "spdif_src", base + 0xa680, 28); + clks[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_gate3("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28); + clks[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_gate3("enet1_time_cg", "enet1_time_src", base + 0xa780, 28); + clks[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_gate3("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28); + clks[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_gate3("enet2_time_cg", "enet2_time_src", base + 0xa880, 28); + clks[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_gate3("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28); + clks[IMX7D_EIM_ROOT_CG] = imx_clk_gate3("eim_cg", "eim_src", base + 0xa980, 28); + clks[IMX7D_NAND_ROOT_CG] = imx_clk_gate3("nand_cg", "nand_src", base + 0xaa00, 28); + clks[IMX7D_QSPI_ROOT_CG] = imx_clk_gate3("qspi_cg", "qspi_src", base + 0xaa80, 28); + clks[IMX7D_USDHC1_ROOT_CG] = imx_clk_gate3("usdhc1_cg", "usdhc1_src", base + 0xab00, 28); + clks[IMX7D_USDHC2_ROOT_CG] = imx_clk_gate3("usdhc2_cg", "usdhc2_src", base + 0xab80, 28); + clks[IMX7D_USDHC3_ROOT_CG] = imx_clk_gate3("usdhc3_cg", "usdhc3_src", base + 0xac00, 28); + clks[IMX7D_CAN1_ROOT_CG] = imx_clk_gate3("can1_cg", "can1_src", base + 0xac80, 28); + clks[IMX7D_CAN2_ROOT_CG] = imx_clk_gate3("can2_cg", "can2_src", base + 0xad00, 28); + clks[IMX7D_I2C1_ROOT_CG] = imx_clk_gate3("i2c1_cg", "i2c1_src", base + 0xad80, 28); + clks[IMX7D_I2C2_ROOT_CG] = imx_clk_gate3("i2c2_cg", "i2c2_src", base + 0xae00, 28); + clks[IMX7D_I2C3_ROOT_CG] = imx_clk_gate3("i2c3_cg", "i2c3_src", base + 0xae80, 28); + clks[IMX7D_I2C4_ROOT_CG] = imx_clk_gate3("i2c4_cg", "i2c4_src", base + 0xaf00, 28); + clks[IMX7D_UART1_ROOT_CG] = imx_clk_gate3("uart1_cg", "uart1_src", base + 0xaf80, 28); + clks[IMX7D_UART2_ROOT_CG] = imx_clk_gate3("uart2_cg", "uart2_src", base + 0xb000, 28); + clks[IMX7D_UART3_ROOT_CG] = imx_clk_gate3("uart3_cg", "uart3_src", base + 0xb080, 28); + clks[IMX7D_UART4_ROOT_CG] = imx_clk_gate3("uart4_cg", "uart4_src", base + 0xb100, 28); + clks[IMX7D_UART5_ROOT_CG] = imx_clk_gate3("uart5_cg", "uart5_src", base + 0xb180, 28); + clks[IMX7D_UART6_ROOT_CG] = imx_clk_gate3("uart6_cg", "uart6_src", base + 0xb200, 28); + clks[IMX7D_UART7_ROOT_CG] = imx_clk_gate3("uart7_cg", "uart7_src", base + 0xb280, 28); + clks[IMX7D_ECSPI1_ROOT_CG] = imx_clk_gate3("ecspi1_cg", "ecspi1_src", base + 0xb300, 28); + clks[IMX7D_ECSPI2_ROOT_CG] = imx_clk_gate3("ecspi2_cg", "ecspi2_src", base + 0xb380, 28); + clks[IMX7D_ECSPI3_ROOT_CG] = imx_clk_gate3("ecspi3_cg", "ecspi3_src", base + 0xb400, 28); + clks[IMX7D_ECSPI4_ROOT_CG] = imx_clk_gate3("ecspi4_cg", "ecspi4_src", base + 0xb480, 28); + clks[IMX7D_PWM1_ROOT_CG] = imx_clk_gate3("pwm1_cg", "pwm1_src", base + 0xb500, 28); + clks[IMX7D_PWM2_ROOT_CG] = imx_clk_gate3("pwm2_cg", "pwm2_src", base + 0xb580, 28); + clks[IMX7D_PWM3_ROOT_CG] = imx_clk_gate3("pwm3_cg", "pwm3_src", base + 0xb600, 28); + clks[IMX7D_PWM4_ROOT_CG] = imx_clk_gate3("pwm4_cg", "pwm4_src", base + 0xb680, 28); + clks[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_gate3("flextimer1_cg", "flextimer1_src", base + 0xb700, 28); + clks[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_gate3("flextimer2_cg", "flextimer2_src", base + 0xb780, 28); + clks[IMX7D_SIM1_ROOT_CG] = imx_clk_gate3("sim1_cg", "sim1_src", base + 0xb800, 28); + clks[IMX7D_SIM2_ROOT_CG] = imx_clk_gate3("sim2_cg", "sim2_src", base + 0xb880, 28); + clks[IMX7D_GPT1_ROOT_CG] = imx_clk_gate3("gpt1_cg", "gpt1_src", base + 0xb900, 28); + clks[IMX7D_GPT2_ROOT_CG] = imx_clk_gate3("gpt2_cg", "gpt2_src", base + 0xb980, 28); + clks[IMX7D_GPT3_ROOT_CG] = imx_clk_gate3("gpt3_cg", "gpt3_src", base + 0xbA00, 28); + clks[IMX7D_GPT4_ROOT_CG] = imx_clk_gate3("gpt4_cg", "gpt4_src", base + 0xbA80, 28); + clks[IMX7D_TRACE_ROOT_CG] = imx_clk_gate3("trace_cg", "trace_src", base + 0xbb00, 28); + clks[IMX7D_WDOG_ROOT_CG] = imx_clk_gate3("wdog_cg", "wdog_src", base + 0xbb80, 28); + clks[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_gate3("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28); + clks[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_gate3("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28); + clks[IMX7D_WRCLK_ROOT_CG] = imx_clk_gate3("wrclk_cg", "wrclk_src", base + 0xbd00, 28); + clks[IMX7D_CLKO1_ROOT_CG] = imx_clk_gate3("clko1_cg", "clko1_src", base + 0xbd80, 28); + clks[IMX7D_CLKO2_ROOT_CG] = imx_clk_gate3("clko2_cg", "clko2_src", base + 0xbe00, 28); + + clks[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_divider2("axi_pre_div", "axi_cg", base + 0x8800, 16, 3); + clks[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_divider2("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3); + clks[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_divider2("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3); + clks[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_divider2("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3); + clks[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_divider2("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3); + clks[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3); + clks[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_divider2("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3); + clks[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_divider2("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3); + clks[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_divider2("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3); + clks[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3); + clks[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3); + clks[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3); + clks[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3); + clks[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3); + clks[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_divider2("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3); + clks[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_divider2("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3); + clks[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_divider2("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3); + clks[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_divider2("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3); + clks[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3); + clks[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3); + clks[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3); + clks[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3); + clks[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3); + clks[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_divider2("eim_pre_div", "eim_cg", base + 0xa980, 16, 3); + clks[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_divider2("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3); + clks[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_divider2("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3); + clks[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_divider2("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3); + clks[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_divider2("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3); + clks[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_divider2("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3); + clks[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_divider2("can1_pre_div", "can1_cg", base + 0xac80, 16, 3); + clks[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_divider2("can2_pre_div", "can2_cg", base + 0xad00, 16, 3); + clks[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_divider2("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3); + clks[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_divider2("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3); + clks[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_divider2("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3); + clks[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_divider2("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3); + clks[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_divider2("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3); + clks[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_divider2("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3); + clks[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_divider2("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3); + clks[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_divider2("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3); + clks[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_divider2("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3); + clks[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_divider2("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3); + clks[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_divider2("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3); + clks[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_divider2("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3); + clks[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_divider2("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3); + clks[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_divider2("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3); + clks[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_divider2("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3); + clks[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_divider2("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3); + clks[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_divider2("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3); + clks[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_divider2("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3); + clks[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_divider2("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3); + clks[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_divider2("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3); + clks[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_divider2("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3); + clks[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_divider2("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3); + clks[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_divider2("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3); + clks[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_divider2("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3); + clks[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_divider2("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3); + clks[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_divider2("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3); + clks[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_divider2("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3); + clks[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_divider2("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3); + clks[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_divider2("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3); + clks[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3); + clks[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3); + clks[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_divider2("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3); + clks[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_divider2("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3); + clks[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_divider2("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3); + + clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3); + clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); + clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider2("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3); + clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6); + clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6); + clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); + clks[IMX7D_NAND_USDHC_BUS_ROOT_DIV] = imx_clk_divider2("nand_usdhc_post_div", "nand_usdhc_pre_div", base + 0x8980, 0, 6); + clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_post_div", "ahb_pre_div", base + 0x9000, 0, 6); + clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); + clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); + clks[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_divider2("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6); + clks[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_divider2("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6); + clks[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_divider2("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6); + clks[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_divider2("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6); + clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6); + clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6); + clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6); + clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider2("mipi_dphy_post_div", "mipi_csi_dphy_div", base + 0xa480, 0, 6); + clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6); + clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6); + clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6); + clks[IMX7D_SPDIF_ROOT_DIV] = imx_clk_divider2("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6); + clks[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_divider2("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6); + clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6); + clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6); + clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6); + clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider2("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6); + clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6); + clks[IMX7D_NAND_ROOT_DIV] = imx_clk_divider2("nand_post_div", "nand_pre_div", base + 0xaa00, 0, 6); + clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6); + clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6); + clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6); + clks[IMX7D_USDHC3_ROOT_DIV] = imx_clk_divider2("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6); + clks[IMX7D_CAN1_ROOT_DIV] = imx_clk_divider2("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6); + clks[IMX7D_CAN2_ROOT_DIV] = imx_clk_divider2("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6); + clks[IMX7D_I2C1_ROOT_DIV] = imx_clk_divider2("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6); + clks[IMX7D_I2C2_ROOT_DIV] = imx_clk_divider2("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6); + clks[IMX7D_I2C3_ROOT_DIV] = imx_clk_divider2("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6); + clks[IMX7D_I2C4_ROOT_DIV] = imx_clk_divider2("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6); + clks[IMX7D_UART1_ROOT_DIV] = imx_clk_divider2("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6); + clks[IMX7D_UART2_ROOT_DIV] = imx_clk_divider2("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6); + clks[IMX7D_UART3_ROOT_DIV] = imx_clk_divider2("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6); + clks[IMX7D_UART4_ROOT_DIV] = imx_clk_divider2("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6); + clks[IMX7D_UART5_ROOT_DIV] = imx_clk_divider2("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6); + clks[IMX7D_UART6_ROOT_DIV] = imx_clk_divider2("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6); + clks[IMX7D_UART7_ROOT_DIV] = imx_clk_divider2("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6); + clks[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_divider2("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6); + clks[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_divider2("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6); + clks[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_divider2("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6); + clks[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_divider2("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6); + clks[IMX7D_PWM1_ROOT_DIV] = imx_clk_divider2("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6); + clks[IMX7D_PWM2_ROOT_DIV] = imx_clk_divider2("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6); + clks[IMX7D_PWM3_ROOT_DIV] = imx_clk_divider2("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6); + clks[IMX7D_PWM4_ROOT_DIV] = imx_clk_divider2("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6); + clks[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_divider2("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6); + clks[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_divider2("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6); + clks[IMX7D_SIM1_ROOT_DIV] = imx_clk_divider2("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6); + clks[IMX7D_SIM2_ROOT_DIV] = imx_clk_divider2("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6); + clks[IMX7D_GPT1_ROOT_DIV] = imx_clk_divider2("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6); + clks[IMX7D_GPT2_ROOT_DIV] = imx_clk_divider2("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6); + clks[IMX7D_GPT3_ROOT_DIV] = imx_clk_divider2("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6); + clks[IMX7D_GPT4_ROOT_DIV] = imx_clk_divider2("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6); + clks[IMX7D_TRACE_ROOT_DIV] = imx_clk_divider2("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6); + clks[IMX7D_WDOG_ROOT_DIV] = imx_clk_divider2("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6); + clks[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_divider2("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6); + clks[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_divider2("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6); + clks[IMX7D_WRCLK_ROOT_DIV] = imx_clk_divider2("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6); + clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6); + clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6); + + clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0); + clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0); + clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate4("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0); + clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); + clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); + clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); + clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0); + clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_post_div", base + 0x4120, 0); + clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_gate4("nand_usdhc_root_clk", "nand_usdhc_post_div", base + 0x4130, 0); + clks[IMX7D_AHB_CHANNEL_ROOT_CLK] = imx_clk_gate4("ahb_root_clk", "ahb_post_div", base + 0x4200, 0); + clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); + clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0); + clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0); + clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0); + clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0); + clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0); + clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0); + clks[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0); + clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0); + clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0); + clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0); + clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate4("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0); + clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate4("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0); + clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate4("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0); + clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0); + clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate4("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0); + clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate4("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0); + clks[IMX7D_ENET2_REF_ROOT_CLK] = imx_clk_gate4("enet2_ref_root_clk", "enet2_ref_post_div", base + 0x4500, 0); + clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate4("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0); + clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate4("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0); + clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0); + clks[IMX7D_NAND_ROOT_CLK] = imx_clk_gate4("nand_root_clk", "nand_post_div", base + 0x4140, 0); + clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0); + clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0); + clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0); + clks[IMX7D_USDHC3_ROOT_CLK] = imx_clk_gate4("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0); + clks[IMX7D_CAN1_ROOT_CLK] = imx_clk_gate4("can1_root_clk", "can1_post_div", base + 0x4740, 0); + clks[IMX7D_CAN2_ROOT_CLK] = imx_clk_gate4("can2_root_clk", "can2_post_div", base + 0x4750, 0); + clks[IMX7D_I2C1_ROOT_CLK] = imx_clk_gate4("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0); + clks[IMX7D_I2C2_ROOT_CLK] = imx_clk_gate4("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0); + clks[IMX7D_I2C3_ROOT_CLK] = imx_clk_gate4("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0); + clks[IMX7D_I2C4_ROOT_CLK] = imx_clk_gate4("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0); + clks[IMX7D_UART1_ROOT_CLK] = imx_clk_gate4("uart1_root_clk", "uart1_post_div", base + 0x4940, 0); + clks[IMX7D_UART2_ROOT_CLK] = imx_clk_gate4("uart2_root_clk", "uart2_post_div", base + 0x4950, 0); + clks[IMX7D_UART3_ROOT_CLK] = imx_clk_gate4("uart3_root_clk", "uart3_post_div", base + 0x4960, 0); + clks[IMX7D_UART4_ROOT_CLK] = imx_clk_gate4("uart4_root_clk", "uart4_post_div", base + 0x4970, 0); + clks[IMX7D_UART5_ROOT_CLK] = imx_clk_gate4("uart5_root_clk", "uart5_post_div", base + 0x4980, 0); + clks[IMX7D_UART6_ROOT_CLK] = imx_clk_gate4("uart6_root_clk", "uart6_post_div", base + 0x4990, 0); + clks[IMX7D_UART7_ROOT_CLK] = imx_clk_gate4("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0); + clks[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_gate4("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0); + clks[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_gate4("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0); + clks[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_gate4("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0); + clks[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_gate4("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0); + clks[IMX7D_PWM1_ROOT_CLK] = imx_clk_gate4("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0); + clks[IMX7D_PWM2_ROOT_CLK] = imx_clk_gate4("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0); + clks[IMX7D_PWM3_ROOT_CLK] = imx_clk_gate4("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0); + clks[IMX7D_PWM4_ROOT_CLK] = imx_clk_gate4("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0); + clks[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_gate4("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0); + clks[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_gate4("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0); + clks[IMX7D_SIM1_ROOT_CLK] = imx_clk_gate4("sim1_root_clk", "sim1_post_div", base + 0x4900, 0); + clks[IMX7D_SIM2_ROOT_CLK] = imx_clk_gate4("sim2_root_clk", "sim2_post_div", base + 0x4910, 0); + clks[IMX7D_GPT1_ROOT_CLK] = imx_clk_gate4("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0); + clks[IMX7D_GPT2_ROOT_CLK] = imx_clk_gate4("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0); + clks[IMX7D_GPT3_ROOT_CLK] = imx_clk_gate4("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0); + clks[IMX7D_GPT4_ROOT_CLK] = imx_clk_gate4("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0); + clks[IMX7D_TRACE_ROOT_CLK] = imx_clk_gate4("trace_root_clk", "trace_post_div", base + 0x4300, 0); + clks[IMX7D_WDOG1_ROOT_CLK] = imx_clk_gate4("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0); + clks[IMX7D_WDOG2_ROOT_CLK] = imx_clk_gate4("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0); + clks[IMX7D_WDOG3_ROOT_CLK] = imx_clk_gate4("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0); + clks[IMX7D_WDOG4_ROOT_CLK] = imx_clk_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); + clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); + clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); + clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); + clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0); clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); @@ -846,28 +854,13 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); - /* TO BE FIXED LATER - * Enable all clock to bring up imx7, otherwise system will be halt and block - * the other part upstream Because imx7d clock design changed, clock framework - * need do a little modify. - * Dong Aisheng is working on this. After that, this part need be changed. - */ - for (i = 0; i < IMX7D_CLK_END; i++) - clk_prepare_enable(clks[i]); + for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) + clk_prepare_enable(clks[clks_init_on[i]]); /* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */ clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]); - /* - * init enet clock source: - * AXI clock source is 250MHz - * Phy refrence clock is 25MHz - * 1588 time clock source is 100MHz - */ clk_set_parent(clks[IMX7D_ENET_AXI_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_250M_CLK]); - clk_set_parent(clks[IMX7D_ENET_PHY_REF_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_25M_CLK]); - clk_set_parent(clks[IMX7D_ENET1_TIME_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_100M_CLK]); - clk_set_parent(clks[IMX7D_ENET2_TIME_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_100M_CLK]); /* set uart module clock's parent clock source that must be great then 80MHz */ clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]); diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 4826b3c9e19e..19f9b622981a 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -29,8 +29,8 @@ * struct clk_pllv3 - IMX PLL clock version 3 * @clk_hw: clock source * @base: base address of PLL registers - * @powerup_set: set POWER bit to power up the PLL - * @powerdown: pll powerdown offset bit + * @power_bit: pll power bit mask + * @powerup_set: set power_bit to power up the PLL * @div_mask: mask of divider bits * @div_shift: shift of divider bits * @@ -40,8 +40,8 @@ struct clk_pllv3 { struct clk_hw hw; void __iomem *base; + u32 power_bit; bool powerup_set; - u32 powerdown; u32 div_mask; u32 div_shift; unsigned long ref_clock; @@ -52,7 +52,7 @@ struct clk_pllv3 { static int clk_pllv3_wait_lock(struct clk_pllv3 *pll) { unsigned long timeout = jiffies + msecs_to_jiffies(10); - u32 val = readl_relaxed(pll->base) & pll->powerdown; + u32 val = readl_relaxed(pll->base) & pll->power_bit; /* No need to wait for lock when pll is not powered up */ if ((pll->powerup_set && !val) || (!pll->powerup_set && val)) @@ -77,9 +77,9 @@ static int clk_pllv3_prepare(struct clk_hw *hw) val = readl_relaxed(pll->base); if (pll->powerup_set) - val |= BM_PLL_POWER; + val |= pll->power_bit; else - val &= ~BM_PLL_POWER; + val &= ~pll->power_bit; writel_relaxed(val, pll->base); return clk_pllv3_wait_lock(pll); @@ -92,9 +92,9 @@ static void clk_pllv3_unprepare(struct clk_hw *hw) val = readl_relaxed(pll->base); if (pll->powerup_set) - val &= ~BM_PLL_POWER; + val &= ~pll->power_bit; else - val |= BM_PLL_POWER; + val |= pll->power_bit; writel_relaxed(val, pll->base); } @@ -218,8 +218,12 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw, u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET); u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET); u32 div = readl_relaxed(pll->base) & pll->div_mask; + u64 temp64 = (u64)parent_rate; - return (parent_rate * div) + ((parent_rate / mfd) * mfn); + temp64 *= mfn; + do_div(temp64, mfd); + + return (parent_rate * div) + (u32)temp64; } static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, @@ -243,7 +247,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, do_div(temp64, parent_rate); mfn = temp64; - return parent_rate * div + parent_rate / mfd * mfn; + return parent_rate * div + parent_rate * mfn / mfd; } static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, @@ -312,7 +316,7 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, if (!pll) return ERR_PTR(-ENOMEM); - pll->powerdown = BM_PLL_POWER; + pll->power_bit = BM_PLL_POWER; switch (type) { case IMX_PLLV3_SYS: @@ -328,7 +332,7 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, ops = &clk_pllv3_av_ops; break; case IMX_PLLV3_ENET_IMX7: - pll->powerdown = IMX7_ENET_PLL_POWER; + pll->power_bit = IMX7_ENET_PLL_POWER; pll->ref_clock = 1000000000; ops = &clk_pllv3_enet_ops; break; diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index 3a1f24475ee4..0476353ab423 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -315,12 +315,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7)); - clk[VF610_CLK_UART0] = imx_clk_gate2("uart0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(7)); - clk[VF610_CLK_UART1] = imx_clk_gate2("uart1", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(8)); - clk[VF610_CLK_UART2] = imx_clk_gate2("uart2", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(9)); - clk[VF610_CLK_UART3] = imx_clk_gate2("uart3", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(10)); - clk[VF610_CLK_UART4] = imx_clk_gate2("uart4", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(9)); - clk[VF610_CLK_UART5] = imx_clk_gate2("uart5", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(10)); + clk[VF610_CLK_UART0] = imx_clk_gate2_cgr("uart0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(7), 0x2); + clk[VF610_CLK_UART1] = imx_clk_gate2_cgr("uart1", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(8), 0x2); + clk[VF610_CLK_UART2] = imx_clk_gate2_cgr("uart2", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(9), 0x2); + clk[VF610_CLK_UART3] = imx_clk_gate2_cgr("uart3", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(10), 0x2); + clk[VF610_CLK_UART4] = imx_clk_gate2_cgr("uart4", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(9), 0x2); + clk[VF610_CLK_UART5] = imx_clk_gate2_cgr("uart5", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(10), 0x2); clk[VF610_CLK_I2C0] = imx_clk_gate2("i2c0", "ipg_bus", CCM_CCGR4, CCM_CCGRx_CGn(6)); clk[VF610_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_bus", CCM_CCGR4, CCM_CCGRx_CGn(7)); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 508d0fad84cf..a81c0385ed64 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -51,28 +51,6 @@ struct clk * imx_obtain_fixed_clock( struct clk *imx_clk_gate_exclusive(const char *name, const char *parent, void __iomem *reg, u8 shift, u32 exclusive_mask); -static inline struct clk *imx_clk_gate2(const char *name, const char *parent, - void __iomem *reg, u8 shift) -{ - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, - shift, 0x3, 0, &imx_ccm_lock, NULL); -} - -static inline struct clk *imx_clk_gate2_shared(const char *name, - const char *parent, void __iomem *reg, u8 shift, - unsigned int *share_count) -{ - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, - shift, 0x3, 0, &imx_ccm_lock, share_count); -} - -static inline struct clk *imx_clk_gate2_cgr(const char *name, const char *parent, - void __iomem *reg, u8 shift, u8 cgr_val) -{ - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, - shift, cgr_val, 0, &imx_ccm_lock, NULL); -} - struct clk *imx_clk_pfd(const char *name, const char *parent_name, void __iomem *reg, u8 idx); @@ -97,6 +75,13 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate) return clk_register_fixed_rate(NULL, name, NULL, 0, rate); } +static inline struct clk *imx_clk_fixed_factor(const char *name, + const char *parent, unsigned int mult, unsigned int div) +{ + return clk_register_fixed_factor(NULL, name, parent, + CLK_SET_RATE_PARENT, mult, div); +} + static inline struct clk *imx_clk_divider(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width) { @@ -112,6 +97,14 @@ static inline struct clk *imx_clk_divider_flags(const char *name, reg, shift, width, 0, &imx_ccm_lock); } +static inline struct clk *imx_clk_divider2(const char *name, const char *parent, + void __iomem *reg, u8 shift, u8 width) +{ + return clk_register_divider(NULL, name, parent, + CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, + reg, shift, width, 0, &imx_ccm_lock); +} + static inline struct clk *imx_clk_gate(const char *name, const char *parent, void __iomem *reg, u8 shift) { @@ -126,6 +119,44 @@ static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent, shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock); } +static inline struct clk *imx_clk_gate2(const char *name, const char *parent, + void __iomem *reg, u8 shift) +{ + return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + shift, 0x3, 0, &imx_ccm_lock, NULL); +} + +static inline struct clk *imx_clk_gate2_shared(const char *name, + const char *parent, void __iomem *reg, u8 shift, + unsigned int *share_count) +{ + return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + shift, 0x3, 0, &imx_ccm_lock, share_count); +} + +static inline struct clk *imx_clk_gate2_cgr(const char *name, + const char *parent, void __iomem *reg, u8 shift, u8 cgr_val) +{ + return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + shift, cgr_val, 0, &imx_ccm_lock, NULL); +} + +static inline struct clk *imx_clk_gate3(const char *name, const char *parent, + void __iomem *reg, u8 shift) +{ + return clk_register_gate(NULL, name, parent, + CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, + reg, shift, 0, &imx_ccm_lock); +} + +static inline struct clk *imx_clk_gate4(const char *name, const char *parent, + void __iomem *reg, u8 shift) +{ + return clk_register_gate2(NULL, name, parent, + CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, + reg, shift, 0x3, 0, &imx_ccm_lock, NULL); +} + static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char **parents, int num_parents) { @@ -134,6 +165,14 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, width, 0, &imx_ccm_lock); } +static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, + u8 shift, u8 width, const char **parents, int num_parents) +{ + return clk_register_mux(NULL, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE, + reg, shift, width, 0, &imx_ccm_lock); +} + static inline struct clk *imx_clk_mux_flags(const char *name, void __iomem *reg, u8 shift, u8 width, const char **parents, int num_parents, unsigned long flags) @@ -143,13 +182,6 @@ static inline struct clk *imx_clk_mux_flags(const char *name, &imx_ccm_lock); } -static inline struct clk *imx_clk_fixed_factor(const char *name, - const char *parent, unsigned int mult, unsigned int div) -{ - return clk_register_fixed_factor(NULL, name, parent, - CLK_SET_RATE_PARENT, mult, div); -} - struct clk *imx_clk_cpu(const char *name, const char *parent_name, struct clk *div, struct clk *mux, struct clk *pll, struct clk *step); diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig new file mode 100644 index 000000000000..19480bcc7046 --- /dev/null +++ b/drivers/clk/meson/Kconfig @@ -0,0 +1,19 @@ +config COMMON_CLK_AMLOGIC + bool + depends on OF + depends on ARCH_MESON || COMPILE_TEST + +config COMMON_CLK_MESON8B + bool + depends on COMMON_CLK_AMLOGIC + help + Support for the clock controller on AmLogic S805 devices, aka + meson8b. Say Y if you want peripherals and CPU frequency scaling to + work. + +config COMMON_CLK_GXBB + bool + depends on COMMON_CLK_AMLOGIC + help + Support for the clock controller on AmLogic S905 devices, aka gxbb. + Say Y if you want peripherals and CPU frequency scaling to work. diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile index 6d45531df9ab..197e40175166 100644 --- a/drivers/clk/meson/Makefile +++ b/drivers/clk/meson/Makefile @@ -2,5 +2,6 @@ # Makefile for Meson specific clk # -obj-y += clkc.o clk-pll.o clk-cpu.o -obj-y += meson8b-clkc.o +obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o +obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b-clkc.o +obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c index f7c30ea54ca8..f8b2b7efd016 100644 --- a/drivers/clk/meson/clk-cpu.c +++ b/drivers/clk/meson/clk-cpu.c @@ -51,13 +51,6 @@ #include "clkc.h" -struct meson_clk_cpu { - struct notifier_block clk_nb; - const struct clk_div_table *div_table; - struct clk_hw hw; - void __iomem *base; - u16 reg_off; -}; #define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw) #define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb) @@ -119,6 +112,7 @@ static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw, return parent_rate / div; } +/* FIXME MUX1 & MUX2 should be struct clk_hw objects */ static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu, struct clk_notifier_data *ndata) { @@ -140,6 +134,7 @@ static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu, return 0; } +/* FIXME MUX1 & MUX2 should be struct clk_hw objects */ static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu, struct clk_notifier_data *ndata) { @@ -161,7 +156,7 @@ static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu, * PLL clock is to be changed. We use the xtal input as temporary parent * while the PLL frequency is stabilized. */ -static int meson_clk_cpu_notifier_cb(struct notifier_block *nb, +int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; @@ -176,68 +171,8 @@ static int meson_clk_cpu_notifier_cb(struct notifier_block *nb, return notifier_from_errno(ret); } -static const struct clk_ops meson_clk_cpu_ops = { +const struct clk_ops meson_clk_cpu_ops = { .recalc_rate = meson_clk_cpu_recalc_rate, .round_rate = meson_clk_cpu_round_rate, .set_rate = meson_clk_cpu_set_rate, }; - -struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf, - void __iomem *reg_base, - spinlock_t *lock) -{ - struct clk *clk; - struct clk *pclk; - struct meson_clk_cpu *clk_cpu; - struct clk_init_data init; - int ret; - - clk_cpu = kzalloc(sizeof(*clk_cpu), GFP_KERNEL); - if (!clk_cpu) - return ERR_PTR(-ENOMEM); - - clk_cpu->base = reg_base; - clk_cpu->reg_off = clk_conf->reg_off; - clk_cpu->div_table = clk_conf->conf.div_table; - clk_cpu->clk_nb.notifier_call = meson_clk_cpu_notifier_cb; - - init.name = clk_conf->clk_name; - init.ops = &meson_clk_cpu_ops; - init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE; - init.flags |= CLK_SET_RATE_PARENT; - init.parent_names = clk_conf->clks_parent; - init.num_parents = 1; - - clk_cpu->hw.init = &init; - - pclk = __clk_lookup(clk_conf->clks_parent[0]); - if (!pclk) { - pr_err("%s: could not lookup parent clock %s\n", - __func__, clk_conf->clks_parent[0]); - ret = -EINVAL; - goto free_clk; - } - - ret = clk_notifier_register(pclk, &clk_cpu->clk_nb); - if (ret) { - pr_err("%s: failed to register clock notifier for %s\n", - __func__, clk_conf->clk_name); - goto free_clk; - } - - clk = clk_register(NULL, &clk_cpu->hw); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto unregister_clk_nb; - } - - return clk; - -unregister_clk_nb: - clk_notifier_unregister(pclk, &clk_cpu->clk_nb); -free_clk: - kfree(clk_cpu); - - return ERR_PTR(ret); -} - diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c new file mode 100644 index 000000000000..03af79005ddb --- /dev/null +++ b/drivers/clk/meson/clk-mpll.c @@ -0,0 +1,94 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 AmLogic, Inc. + * Author: Michael Turquette <mturquette@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * BSD LICENSE + * + * Copyright (c) 2016 AmLogic, Inc. + * Author: Michael Turquette <mturquette@baylibre.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * MultiPhase Locked Loops are outputs from a PLL with additional frequency + * scaling capabilities. MPLL rates are calculated as: + * + * f(N2_integer, SDM_IN ) = 2.0G/(N2_integer + SDM_IN/16384) + */ + +#include <linux/clk-provider.h> +#include "clkc.h" + +#define SDM_MAX 16384 + +#define to_meson_clk_mpll(_hw) container_of(_hw, struct meson_clk_mpll, hw) + +static unsigned long mpll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw); + struct parm *p; + unsigned long rate = 0; + unsigned long reg, sdm, n2; + + p = &mpll->sdm; + reg = readl(mpll->base + p->reg_off); + sdm = PARM_GET(p->width, p->shift, reg); + + p = &mpll->n2; + reg = readl(mpll->base + p->reg_off); + n2 = PARM_GET(p->width, p->shift, reg); + + rate = (parent_rate * SDM_MAX) / ((SDM_MAX * n2) + sdm); + + return rate; +} + +const struct clk_ops meson_clk_mpll_ro_ops = { + .recalc_rate = mpll_recalc_rate, +}; diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 664edf0708ea..4adc1e89212c 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -44,13 +44,6 @@ #define MESON_PLL_RESET BIT(29) #define MESON_PLL_LOCK BIT(31) -struct meson_clk_pll { - struct clk_hw hw; - void __iomem *base; - struct pll_conf *conf; - unsigned int rate_count; - spinlock_t *lock; -}; #define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw) static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, @@ -60,22 +53,36 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, struct parm *p; unsigned long parent_rate_mhz = parent_rate / 1000000; unsigned long rate_mhz; - u16 n, m, od; + u16 n, m, frac = 0, od, od2 = 0; u32 reg; - p = &pll->conf->n; + p = &pll->n; reg = readl(pll->base + p->reg_off); n = PARM_GET(p->width, p->shift, reg); - p = &pll->conf->m; + p = &pll->m; reg = readl(pll->base + p->reg_off); m = PARM_GET(p->width, p->shift, reg); - p = &pll->conf->od; + p = &pll->od; reg = readl(pll->base + p->reg_off); od = PARM_GET(p->width, p->shift, reg); - rate_mhz = (parent_rate_mhz * m / n) >> od; + p = &pll->od2; + if (p->width) { + reg = readl(pll->base + p->reg_off); + od2 = PARM_GET(p->width, p->shift, reg); + } + + p = &pll->frac; + if (p->width) { + reg = readl(pll->base + p->reg_off); + frac = PARM_GET(p->width, p->shift, reg); + rate_mhz = (parent_rate_mhz * m + \ + (parent_rate_mhz * frac >> 12)) * 2 / n; + rate_mhz = rate_mhz >> od >> od2; + } else + rate_mhz = (parent_rate_mhz * m / n) >> od >> od2; return rate_mhz * 1000000; } @@ -84,7 +91,7 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct meson_clk_pll *pll = to_meson_clk_pll(hw); - const struct pll_rate_table *rate_table = pll->conf->rate_table; + const struct pll_rate_table *rate_table = pll->rate_table; int i; for (i = 0; i < pll->rate_count; i++) { @@ -99,7 +106,7 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll, unsigned long rate) { - const struct pll_rate_table *rate_table = pll->conf->rate_table; + const struct pll_rate_table *rate_table = pll->rate_table; int i; for (i = 0; i < pll->rate_count; i++) { @@ -145,24 +152,38 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; /* PLL reset */ - p = &pll->conf->n; + p = &pll->n; reg = readl(pll->base + p->reg_off); writel(reg | MESON_PLL_RESET, pll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, rate_set->n); writel(reg, pll->base + p->reg_off); - p = &pll->conf->m; + p = &pll->m; reg = readl(pll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, rate_set->m); writel(reg, pll->base + p->reg_off); - p = &pll->conf->od; + p = &pll->od; reg = readl(pll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, rate_set->od); writel(reg, pll->base + p->reg_off); - p = &pll->conf->n; + p = &pll->od2; + if (p->width) { + reg = readl(pll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, rate_set->od2); + writel(reg, pll->base + p->reg_off); + } + + p = &pll->frac; + if (p->width) { + reg = readl(pll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, rate_set->frac); + writel(reg, pll->base + p->reg_off); + } + + p = &pll->n; ret = meson_clk_pll_wait_lock(pll, p); if (ret) { pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", @@ -173,55 +194,12 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, return ret; } -static const struct clk_ops meson_clk_pll_ops = { +const struct clk_ops meson_clk_pll_ops = { .recalc_rate = meson_clk_pll_recalc_rate, .round_rate = meson_clk_pll_round_rate, .set_rate = meson_clk_pll_set_rate, }; -static const struct clk_ops meson_clk_pll_ro_ops = { +const struct clk_ops meson_clk_pll_ro_ops = { .recalc_rate = meson_clk_pll_recalc_rate, }; - -struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf, - void __iomem *reg_base, - spinlock_t *lock) -{ - struct clk *clk; - struct meson_clk_pll *clk_pll; - struct clk_init_data init; - - clk_pll = kzalloc(sizeof(*clk_pll), GFP_KERNEL); - if (!clk_pll) - return ERR_PTR(-ENOMEM); - - clk_pll->base = reg_base + clk_conf->reg_off; - clk_pll->lock = lock; - clk_pll->conf = clk_conf->conf.pll; - - init.name = clk_conf->clk_name; - init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE; - - init.parent_names = &clk_conf->clks_parent[0]; - init.num_parents = 1; - init.ops = &meson_clk_pll_ro_ops; - - /* If no rate_table is specified we assume the PLL is read-only */ - if (clk_pll->conf->rate_table) { - int len; - - for (len = 0; clk_pll->conf->rate_table[len].rate != 0; ) - len++; - - clk_pll->rate_count = len; - init.ops = &meson_clk_pll_ops; - } - - clk_pll->hw.init = &init; - - clk = clk_register(NULL, &clk_pll->hw); - if (IS_ERR(clk)) - kfree(clk_pll); - - return clk; -} diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c deleted file mode 100644 index d920d410b51d..000000000000 --- a/drivers/clk/meson/clkc.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (c) 2015 Endless Mobile, Inc. - * Author: Carlo Caione <carlo@endlessm.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/clk-provider.h> -#include <linux/mfd/syscon.h> -#include <linux/slab.h> - -#include "clkc.h" - -static DEFINE_SPINLOCK(clk_lock); - -static struct clk **clks; -static struct clk_onecell_data clk_data; - -struct clk ** __init meson_clk_init(struct device_node *np, - unsigned long nr_clks) -{ - clks = kcalloc(nr_clks, sizeof(*clks), GFP_KERNEL); - if (!clks) - return ERR_PTR(-ENOMEM); - - clk_data.clks = clks; - clk_data.clk_num = nr_clks; - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); - - return clks; -} - -static void meson_clk_add_lookup(struct clk *clk, unsigned int id) -{ - if (clks && id) - clks[id] = clk; -} - -static struct clk * __init -meson_clk_register_composite(const struct clk_conf *clk_conf, - void __iomem *clk_base) -{ - struct clk *clk; - struct clk_mux *mux = NULL; - struct clk_divider *div = NULL; - struct clk_gate *gate = NULL; - const struct clk_ops *mux_ops = NULL; - const struct composite_conf *composite_conf; - - composite_conf = clk_conf->conf.composite; - - if (clk_conf->num_parents > 1) { - mux = kzalloc(sizeof(*mux), GFP_KERNEL); - if (!mux) - return ERR_PTR(-ENOMEM); - - mux->reg = clk_base + clk_conf->reg_off - + composite_conf->mux_parm.reg_off; - mux->shift = composite_conf->mux_parm.shift; - mux->mask = BIT(composite_conf->mux_parm.width) - 1; - mux->flags = composite_conf->mux_flags; - mux->lock = &clk_lock; - mux->table = composite_conf->mux_table; - mux_ops = (composite_conf->mux_flags & CLK_MUX_READ_ONLY) ? - &clk_mux_ro_ops : &clk_mux_ops; - } - - if (MESON_PARM_APPLICABLE(&composite_conf->div_parm)) { - div = kzalloc(sizeof(*div), GFP_KERNEL); - if (!div) { - clk = ERR_PTR(-ENOMEM); - goto error; - } - - div->reg = clk_base + clk_conf->reg_off - + composite_conf->div_parm.reg_off; - div->shift = composite_conf->div_parm.shift; - div->width = composite_conf->div_parm.width; - div->lock = &clk_lock; - div->flags = composite_conf->div_flags; - div->table = composite_conf->div_table; - } - - if (MESON_PARM_APPLICABLE(&composite_conf->gate_parm)) { - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) { - clk = ERR_PTR(-ENOMEM); - goto error; - } - - gate->reg = clk_base + clk_conf->reg_off - + composite_conf->div_parm.reg_off; - gate->bit_idx = composite_conf->gate_parm.shift; - gate->flags = composite_conf->gate_flags; - gate->lock = &clk_lock; - } - - clk = clk_register_composite(NULL, clk_conf->clk_name, - clk_conf->clks_parent, - clk_conf->num_parents, - mux ? &mux->hw : NULL, mux_ops, - div ? &div->hw : NULL, &clk_divider_ops, - gate ? &gate->hw : NULL, &clk_gate_ops, - clk_conf->flags); - if (IS_ERR(clk)) - goto error; - - return clk; - -error: - kfree(gate); - kfree(div); - kfree(mux); - - return clk; -} - -static struct clk * __init -meson_clk_register_fixed_factor(const struct clk_conf *clk_conf, - void __iomem *clk_base) -{ - struct clk *clk; - const struct fixed_fact_conf *fixed_fact_conf; - const struct parm *p; - unsigned int mult, div; - u32 reg; - - fixed_fact_conf = &clk_conf->conf.fixed_fact; - - mult = clk_conf->conf.fixed_fact.mult; - div = clk_conf->conf.fixed_fact.div; - - if (!mult) { - mult = 1; - p = &fixed_fact_conf->mult_parm; - if (MESON_PARM_APPLICABLE(p)) { - reg = readl(clk_base + clk_conf->reg_off + p->reg_off); - mult = PARM_GET(p->width, p->shift, reg); - } - } - - if (!div) { - div = 1; - p = &fixed_fact_conf->div_parm; - if (MESON_PARM_APPLICABLE(p)) { - reg = readl(clk_base + clk_conf->reg_off + p->reg_off); - mult = PARM_GET(p->width, p->shift, reg); - } - } - - clk = clk_register_fixed_factor(NULL, - clk_conf->clk_name, - clk_conf->clks_parent[0], - clk_conf->flags, - mult, div); - - return clk; -} - -static struct clk * __init -meson_clk_register_fixed_rate(const struct clk_conf *clk_conf, - void __iomem *clk_base) -{ - struct clk *clk; - const struct fixed_rate_conf *fixed_rate_conf; - const struct parm *r; - unsigned long rate; - u32 reg; - - fixed_rate_conf = &clk_conf->conf.fixed_rate; - rate = fixed_rate_conf->rate; - - if (!rate) { - r = &fixed_rate_conf->rate_parm; - reg = readl(clk_base + clk_conf->reg_off + r->reg_off); - rate = PARM_GET(r->width, r->shift, reg); - } - - rate *= 1000000; - - clk = clk_register_fixed_rate(NULL, - clk_conf->clk_name, - clk_conf->num_parents - ? clk_conf->clks_parent[0] : NULL, - clk_conf->flags, rate); - - return clk; -} - -void __init meson_clk_register_clks(const struct clk_conf *clk_confs, - unsigned int nr_confs, - void __iomem *clk_base) -{ - unsigned int i; - struct clk *clk = NULL; - - for (i = 0; i < nr_confs; i++) { - const struct clk_conf *clk_conf = &clk_confs[i]; - - switch (clk_conf->clk_type) { - case CLK_FIXED_RATE: - clk = meson_clk_register_fixed_rate(clk_conf, - clk_base); - break; - case CLK_FIXED_FACTOR: - clk = meson_clk_register_fixed_factor(clk_conf, - clk_base); - break; - case CLK_COMPOSITE: - clk = meson_clk_register_composite(clk_conf, - clk_base); - break; - case CLK_CPU: - clk = meson_clk_register_cpu(clk_conf, clk_base, - &clk_lock); - break; - case CLK_PLL: - clk = meson_clk_register_pll(clk_conf, clk_base, - &clk_lock); - break; - default: - clk = NULL; - } - - if (!clk) { - pr_err("%s: unknown clock type %d\n", __func__, - clk_conf->clk_type); - continue; - } - - if (IS_ERR(clk)) { - pr_warn("%s: Unable to create %s clock\n", __func__, - clk_conf->clk_name); - continue; - } - - meson_clk_add_lookup(clk, clk_conf->clk_id); - } -} diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index 609ae92cc13f..53326c32e853 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h @@ -34,19 +34,16 @@ struct parm { u8 shift; u8 width; }; -#define PARM(_r, _s, _w) \ - { \ - .reg_off = (_r), \ - .shift = (_s), \ - .width = (_w), \ - } \ struct pll_rate_table { unsigned long rate; u16 m; u16 n; u16 od; + u16 od2; + u16 frac; }; + #define PLL_RATE(_r, _m, _n, _od) \ { \ .rate = (_r), \ @@ -55,133 +52,69 @@ struct pll_rate_table { .od = (_od), \ } \ -struct pll_conf { - const struct pll_rate_table *rate_table; - struct parm m; - struct parm n; - struct parm od; -}; +#define PLL_FRAC_RATE(_r, _m, _n, _od, _od2, _frac) \ + { \ + .rate = (_r), \ + .m = (_m), \ + .n = (_n), \ + .od = (_od), \ + .od2 = (_od2), \ + .frac = (_frac), \ + } \ -struct fixed_fact_conf { - unsigned int div; - unsigned int mult; - struct parm div_parm; - struct parm mult_parm; +struct meson_clk_pll { + struct clk_hw hw; + void __iomem *base; + struct parm m; + struct parm n; + struct parm frac; + struct parm od; + struct parm od2; + const struct pll_rate_table *rate_table; + unsigned int rate_count; + spinlock_t *lock; }; -struct fixed_rate_conf { - unsigned long rate; - struct parm rate_parm; -}; +#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw) -struct composite_conf { - struct parm mux_parm; - struct parm div_parm; - struct parm gate_parm; - struct clk_div_table *div_table; - u32 *mux_table; - u8 mux_flags; - u8 div_flags; - u8 gate_flags; +struct meson_clk_cpu { + struct clk_hw hw; + void __iomem *base; + u16 reg_off; + struct notifier_block clk_nb; + const struct clk_div_table *div_table; }; -#define PNAME(x) static const char *x[] +int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event, + void *data); -enum clk_type { - CLK_FIXED_FACTOR, - CLK_FIXED_RATE, - CLK_COMPOSITE, - CLK_CPU, - CLK_PLL, +struct meson_clk_mpll { + struct clk_hw hw; + void __iomem *base; + struct parm sdm; + struct parm n2; + /* FIXME ssen gate control? */ + spinlock_t *lock; }; -struct clk_conf { - u16 reg_off; - enum clk_type clk_type; - unsigned int clk_id; - const char *clk_name; - const char **clks_parent; - int num_parents; - unsigned long flags; - union { - struct fixed_fact_conf fixed_fact; - struct fixed_rate_conf fixed_rate; - const struct composite_conf *composite; - struct pll_conf *pll; - const struct clk_div_table *div_table; - } conf; +#define MESON_GATE(_name, _reg, _bit) \ +struct clk_gate gxbb_##_name = { \ + .reg = (void __iomem *) _reg, \ + .bit_idx = (_bit), \ + .lock = &clk_lock, \ + .hw.init = &(struct clk_init_data) { \ + .name = #_name, \ + .ops = &clk_gate_ops, \ + .parent_names = (const char *[]){ "clk81" }, \ + .num_parents = 1, \ + .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \ + }, \ }; -#define FIXED_RATE_P(_ro, _ci, _cn, _f, _c) \ - { \ - .reg_off = (_ro), \ - .clk_type = CLK_FIXED_RATE, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .flags = (_f), \ - .conf.fixed_rate.rate_parm = _c, \ - } \ - -#define FIXED_RATE(_ci, _cn, _f, _r) \ - { \ - .clk_type = CLK_FIXED_RATE, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .flags = (_f), \ - .conf.fixed_rate.rate = (_r), \ - } \ - -#define PLL(_ro, _ci, _cn, _cp, _f, _c) \ - { \ - .reg_off = (_ro), \ - .clk_type = CLK_PLL, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .clks_parent = (_cp), \ - .num_parents = ARRAY_SIZE(_cp), \ - .flags = (_f), \ - .conf.pll = (_c), \ - } \ - -#define FIXED_FACTOR_DIV(_ci, _cn, _cp, _f, _d) \ - { \ - .clk_type = CLK_FIXED_FACTOR, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .clks_parent = (_cp), \ - .num_parents = ARRAY_SIZE(_cp), \ - .conf.fixed_fact.div = (_d), \ - } \ - -#define CPU(_ro, _ci, _cn, _cp, _dt) \ - { \ - .reg_off = (_ro), \ - .clk_type = CLK_CPU, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .clks_parent = (_cp), \ - .num_parents = ARRAY_SIZE(_cp), \ - .conf.div_table = (_dt), \ - } \ - -#define COMPOSITE(_ro, _ci, _cn, _cp, _f, _c) \ - { \ - .reg_off = (_ro), \ - .clk_type = CLK_COMPOSITE, \ - .clk_id = (_ci), \ - .clk_name = (_cn), \ - .clks_parent = (_cp), \ - .num_parents = ARRAY_SIZE(_cp), \ - .flags = (_f), \ - .conf.composite = (_c), \ - } \ - -struct clk **meson_clk_init(struct device_node *np, unsigned long nr_clks); -void meson_clk_register_clks(const struct clk_conf *clk_confs, - unsigned int nr_confs, void __iomem *clk_base); -struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf, - void __iomem *reg_base, spinlock_t *lock); -struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf, - void __iomem *reg_base, spinlock_t *lock); +/* clk_ops */ +extern const struct clk_ops meson_clk_pll_ro_ops; +extern const struct clk_ops meson_clk_pll_ops; +extern const struct clk_ops meson_clk_cpu_ops; +extern const struct clk_ops meson_clk_mpll_ro_ops; #endif /* __CLKC_H */ diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c new file mode 100644 index 000000000000..a4c6684b3019 --- /dev/null +++ b/drivers/clk/meson/gxbb.c @@ -0,0 +1,944 @@ +/* + * AmLogic S905 / GXBB Clock Controller Driver + * + * Copyright (c) 2016 AmLogic, Inc. + * Michael Turquette <mturquette@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/init.h> + +#include "clkc.h" +#include "gxbb.h" + +static DEFINE_SPINLOCK(clk_lock); + +static const struct pll_rate_table sys_pll_rate_table[] = { + PLL_RATE(24000000, 56, 1, 2), + PLL_RATE(48000000, 64, 1, 2), + PLL_RATE(72000000, 72, 1, 2), + PLL_RATE(96000000, 64, 1, 2), + PLL_RATE(120000000, 80, 1, 2), + PLL_RATE(144000000, 96, 1, 2), + PLL_RATE(168000000, 56, 1, 1), + PLL_RATE(192000000, 64, 1, 1), + PLL_RATE(216000000, 72, 1, 1), + PLL_RATE(240000000, 80, 1, 1), + PLL_RATE(264000000, 88, 1, 1), + PLL_RATE(288000000, 96, 1, 1), + PLL_RATE(312000000, 52, 1, 2), + PLL_RATE(336000000, 56, 1, 2), + PLL_RATE(360000000, 60, 1, 2), + PLL_RATE(384000000, 64, 1, 2), + PLL_RATE(408000000, 68, 1, 2), + PLL_RATE(432000000, 72, 1, 2), + PLL_RATE(456000000, 76, 1, 2), + PLL_RATE(480000000, 80, 1, 2), + PLL_RATE(504000000, 84, 1, 2), + PLL_RATE(528000000, 88, 1, 2), + PLL_RATE(552000000, 92, 1, 2), + PLL_RATE(576000000, 96, 1, 2), + PLL_RATE(600000000, 50, 1, 1), + PLL_RATE(624000000, 52, 1, 1), + PLL_RATE(648000000, 54, 1, 1), + PLL_RATE(672000000, 56, 1, 1), + PLL_RATE(696000000, 58, 1, 1), + PLL_RATE(720000000, 60, 1, 1), + PLL_RATE(744000000, 62, 1, 1), + PLL_RATE(768000000, 64, 1, 1), + PLL_RATE(792000000, 66, 1, 1), + PLL_RATE(816000000, 68, 1, 1), + PLL_RATE(840000000, 70, 1, 1), + PLL_RATE(864000000, 72, 1, 1), + PLL_RATE(888000000, 74, 1, 1), + PLL_RATE(912000000, 76, 1, 1), + PLL_RATE(936000000, 78, 1, 1), + PLL_RATE(960000000, 80, 1, 1), + PLL_RATE(984000000, 82, 1, 1), + PLL_RATE(1008000000, 84, 1, 1), + PLL_RATE(1032000000, 86, 1, 1), + PLL_RATE(1056000000, 88, 1, 1), + PLL_RATE(1080000000, 90, 1, 1), + PLL_RATE(1104000000, 92, 1, 1), + PLL_RATE(1128000000, 94, 1, 1), + PLL_RATE(1152000000, 96, 1, 1), + PLL_RATE(1176000000, 98, 1, 1), + PLL_RATE(1200000000, 50, 1, 0), + PLL_RATE(1224000000, 51, 1, 0), + PLL_RATE(1248000000, 52, 1, 0), + PLL_RATE(1272000000, 53, 1, 0), + PLL_RATE(1296000000, 54, 1, 0), + PLL_RATE(1320000000, 55, 1, 0), + PLL_RATE(1344000000, 56, 1, 0), + PLL_RATE(1368000000, 57, 1, 0), + PLL_RATE(1392000000, 58, 1, 0), + PLL_RATE(1416000000, 59, 1, 0), + PLL_RATE(1440000000, 60, 1, 0), + PLL_RATE(1464000000, 61, 1, 0), + PLL_RATE(1488000000, 62, 1, 0), + PLL_RATE(1512000000, 63, 1, 0), + PLL_RATE(1536000000, 64, 1, 0), + PLL_RATE(1560000000, 65, 1, 0), + PLL_RATE(1584000000, 66, 1, 0), + PLL_RATE(1608000000, 67, 1, 0), + PLL_RATE(1632000000, 68, 1, 0), + PLL_RATE(1656000000, 68, 1, 0), + PLL_RATE(1680000000, 68, 1, 0), + PLL_RATE(1704000000, 68, 1, 0), + PLL_RATE(1728000000, 69, 1, 0), + PLL_RATE(1752000000, 69, 1, 0), + PLL_RATE(1776000000, 69, 1, 0), + PLL_RATE(1800000000, 69, 1, 0), + PLL_RATE(1824000000, 70, 1, 0), + PLL_RATE(1848000000, 70, 1, 0), + PLL_RATE(1872000000, 70, 1, 0), + PLL_RATE(1896000000, 70, 1, 0), + PLL_RATE(1920000000, 71, 1, 0), + PLL_RATE(1944000000, 71, 1, 0), + PLL_RATE(1968000000, 71, 1, 0), + PLL_RATE(1992000000, 71, 1, 0), + PLL_RATE(2016000000, 72, 1, 0), + PLL_RATE(2040000000, 72, 1, 0), + PLL_RATE(2064000000, 72, 1, 0), + PLL_RATE(2088000000, 72, 1, 0), + PLL_RATE(2112000000, 73, 1, 0), + { /* sentinel */ }, +}; + +static const struct pll_rate_table gp0_pll_rate_table[] = { + PLL_RATE(96000000, 32, 1, 3), + PLL_RATE(99000000, 33, 1, 3), + PLL_RATE(102000000, 34, 1, 3), + PLL_RATE(105000000, 35, 1, 3), + PLL_RATE(108000000, 36, 1, 3), + PLL_RATE(111000000, 37, 1, 3), + PLL_RATE(114000000, 38, 1, 3), + PLL_RATE(117000000, 39, 1, 3), + PLL_RATE(120000000, 40, 1, 3), + PLL_RATE(123000000, 41, 1, 3), + PLL_RATE(126000000, 42, 1, 3), + PLL_RATE(129000000, 43, 1, 3), + PLL_RATE(132000000, 44, 1, 3), + PLL_RATE(135000000, 45, 1, 3), + PLL_RATE(138000000, 46, 1, 3), + PLL_RATE(141000000, 47, 1, 3), + PLL_RATE(144000000, 48, 1, 3), + PLL_RATE(147000000, 49, 1, 3), + PLL_RATE(150000000, 50, 1, 3), + PLL_RATE(153000000, 51, 1, 3), + PLL_RATE(156000000, 52, 1, 3), + PLL_RATE(159000000, 53, 1, 3), + PLL_RATE(162000000, 54, 1, 3), + PLL_RATE(165000000, 55, 1, 3), + PLL_RATE(168000000, 56, 1, 3), + PLL_RATE(171000000, 57, 1, 3), + PLL_RATE(174000000, 58, 1, 3), + PLL_RATE(177000000, 59, 1, 3), + PLL_RATE(180000000, 60, 1, 3), + PLL_RATE(183000000, 61, 1, 3), + PLL_RATE(186000000, 62, 1, 3), + PLL_RATE(192000000, 32, 1, 2), + PLL_RATE(198000000, 33, 1, 2), + PLL_RATE(204000000, 34, 1, 2), + PLL_RATE(210000000, 35, 1, 2), + PLL_RATE(216000000, 36, 1, 2), + PLL_RATE(222000000, 37, 1, 2), + PLL_RATE(228000000, 38, 1, 2), + PLL_RATE(234000000, 39, 1, 2), + PLL_RATE(240000000, 40, 1, 2), + PLL_RATE(246000000, 41, 1, 2), + PLL_RATE(252000000, 42, 1, 2), + PLL_RATE(258000000, 43, 1, 2), + PLL_RATE(264000000, 44, 1, 2), + PLL_RATE(270000000, 45, 1, 2), + PLL_RATE(276000000, 46, 1, 2), + PLL_RATE(282000000, 47, 1, 2), + PLL_RATE(288000000, 48, 1, 2), + PLL_RATE(294000000, 49, 1, 2), + PLL_RATE(300000000, 50, 1, 2), + PLL_RATE(306000000, 51, 1, 2), + PLL_RATE(312000000, 52, 1, 2), + PLL_RATE(318000000, 53, 1, 2), + PLL_RATE(324000000, 54, 1, 2), + PLL_RATE(330000000, 55, 1, 2), + PLL_RATE(336000000, 56, 1, 2), + PLL_RATE(342000000, 57, 1, 2), + PLL_RATE(348000000, 58, 1, 2), + PLL_RATE(354000000, 59, 1, 2), + PLL_RATE(360000000, 60, 1, 2), + PLL_RATE(366000000, 61, 1, 2), + PLL_RATE(372000000, 62, 1, 2), + PLL_RATE(384000000, 32, 1, 1), + PLL_RATE(396000000, 33, 1, 1), + PLL_RATE(408000000, 34, 1, 1), + PLL_RATE(420000000, 35, 1, 1), + PLL_RATE(432000000, 36, 1, 1), + PLL_RATE(444000000, 37, 1, 1), + PLL_RATE(456000000, 38, 1, 1), + PLL_RATE(468000000, 39, 1, 1), + PLL_RATE(480000000, 40, 1, 1), + PLL_RATE(492000000, 41, 1, 1), + PLL_RATE(504000000, 42, 1, 1), + PLL_RATE(516000000, 43, 1, 1), + PLL_RATE(528000000, 44, 1, 1), + PLL_RATE(540000000, 45, 1, 1), + PLL_RATE(552000000, 46, 1, 1), + PLL_RATE(564000000, 47, 1, 1), + PLL_RATE(576000000, 48, 1, 1), + PLL_RATE(588000000, 49, 1, 1), + PLL_RATE(600000000, 50, 1, 1), + PLL_RATE(612000000, 51, 1, 1), + PLL_RATE(624000000, 52, 1, 1), + PLL_RATE(636000000, 53, 1, 1), + PLL_RATE(648000000, 54, 1, 1), + PLL_RATE(660000000, 55, 1, 1), + PLL_RATE(672000000, 56, 1, 1), + PLL_RATE(684000000, 57, 1, 1), + PLL_RATE(696000000, 58, 1, 1), + PLL_RATE(708000000, 59, 1, 1), + PLL_RATE(720000000, 60, 1, 1), + PLL_RATE(732000000, 61, 1, 1), + PLL_RATE(744000000, 62, 1, 1), + PLL_RATE(768000000, 32, 1, 0), + PLL_RATE(792000000, 33, 1, 0), + PLL_RATE(816000000, 34, 1, 0), + PLL_RATE(840000000, 35, 1, 0), + PLL_RATE(864000000, 36, 1, 0), + PLL_RATE(888000000, 37, 1, 0), + PLL_RATE(912000000, 38, 1, 0), + PLL_RATE(936000000, 39, 1, 0), + PLL_RATE(960000000, 40, 1, 0), + PLL_RATE(984000000, 41, 1, 0), + PLL_RATE(1008000000, 42, 1, 0), + PLL_RATE(1032000000, 43, 1, 0), + PLL_RATE(1056000000, 44, 1, 0), + PLL_RATE(1080000000, 45, 1, 0), + PLL_RATE(1104000000, 46, 1, 0), + PLL_RATE(1128000000, 47, 1, 0), + PLL_RATE(1152000000, 48, 1, 0), + PLL_RATE(1176000000, 49, 1, 0), + PLL_RATE(1200000000, 50, 1, 0), + PLL_RATE(1224000000, 51, 1, 0), + PLL_RATE(1248000000, 52, 1, 0), + PLL_RATE(1272000000, 53, 1, 0), + PLL_RATE(1296000000, 54, 1, 0), + PLL_RATE(1320000000, 55, 1, 0), + PLL_RATE(1344000000, 56, 1, 0), + PLL_RATE(1368000000, 57, 1, 0), + PLL_RATE(1392000000, 58, 1, 0), + PLL_RATE(1416000000, 59, 1, 0), + PLL_RATE(1440000000, 60, 1, 0), + PLL_RATE(1464000000, 61, 1, 0), + PLL_RATE(1488000000, 62, 1, 0), + { /* sentinel */ }, +}; + +static const struct clk_div_table cpu_div_table[] = { + { .val = 1, .div = 1 }, + { .val = 2, .div = 2 }, + { .val = 3, .div = 3 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, + { .val = 4, .div = 8 }, + { .val = 5, .div = 10 }, + { .val = 6, .div = 12 }, + { .val = 7, .div = 14 }, + { .val = 8, .div = 16 }, + { /* sentinel */ }, +}; + +static struct meson_clk_pll gxbb_fixed_pll = { + .m = { + .reg_off = HHI_MPLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_MPLL_CNTL, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = HHI_MPLL_CNTL, + .shift = 16, + .width = 2, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "fixed_pll", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct meson_clk_pll gxbb_hdmi_pll = { + .m = { + .reg_off = HHI_HDMI_PLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_HDMI_PLL_CNTL, + .shift = 9, + .width = 5, + }, + .frac = { + .reg_off = HHI_HDMI_PLL_CNTL2, + .shift = 0, + .width = 12, + }, + .od = { + .reg_off = HHI_HDMI_PLL_CNTL2, + .shift = 16, + .width = 2, + }, + .od2 = { + .reg_off = HHI_HDMI_PLL_CNTL2, + .shift = 22, + .width = 2, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct meson_clk_pll gxbb_sys_pll = { + .m = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 10, + .width = 2, + }, + .rate_table = sys_pll_rate_table, + .rate_count = ARRAY_SIZE(sys_pll_rate_table), + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct meson_clk_pll gxbb_gp0_pll = { + .m = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 16, + .width = 2, + }, + .rate_table = gp0_pll_rate_table, + .rate_count = ARRAY_SIZE(gp0_pll_rate_table), + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "gp0_pll", + .ops = &meson_clk_pll_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_fixed_factor gxbb_fclk_div2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div2", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor gxbb_fclk_div3 = { + .mult = 1, + .div = 3, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div3", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor gxbb_fclk_div4 = { + .mult = 1, + .div = 4, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div4", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor gxbb_fclk_div5 = { + .mult = 1, + .div = 5, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div5", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor gxbb_fclk_div7 = { + .mult = 1, + .div = 7, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div7", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct meson_clk_mpll gxbb_mpll0 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 0, + .width = 14, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 16, + .width = 9, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll0", + .ops = &meson_clk_mpll_ro_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct meson_clk_mpll gxbb_mpll1 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 0, + .width = 14, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL8, + .shift = 16, + .width = 9, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll1", + .ops = &meson_clk_mpll_ro_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct meson_clk_mpll gxbb_mpll2 = { + .sdm = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 0, + .width = 14, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL9, + .shift = 16, + .width = 9, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpll2", + .ops = &meson_clk_mpll_ro_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +/* + * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL + * post-dividers and should be modeled with their respective PLLs via the + * forthcoming coordinated clock rates feature + */ +static struct meson_clk_cpu gxbb_cpu_clk = { + .reg_off = HHI_SYS_CPU_CLK_CNTL1, + .div_table = cpu_div_table, + .clk_nb.notifier_call = meson_clk_cpu_notifier_cb, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk", + .ops = &meson_clk_cpu_ops, + .parent_names = (const char *[]){ "sys_pll" }, + .num_parents = 1, + }, +}; + +static u32 mux_table_clk81[] = { 6, 5, 7 }; + +static struct clk_mux gxbb_mpeg_clk_sel = { + .reg = (void *)HHI_MPEG_CLK_CNTL, + .mask = 0x7, + .shift = 12, + .flags = CLK_MUX_READ_ONLY, + .table = mux_table_clk81, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpeg_clk_sel", + .ops = &clk_mux_ro_ops, + /* + * FIXME bits 14:12 selects from 8 possible parents: + * xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2, + * fclk_div4, fclk_div3, fclk_div5 + */ + .parent_names = (const char *[]){ "fclk_div3", "fclk_div4", + "fclk_div5" }, + .num_parents = 3, + .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED), + }, +}; + +static struct clk_divider gxbb_mpeg_clk_div = { + .reg = (void *)HHI_MPEG_CLK_CNTL, + .shift = 0, + .width = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpeg_clk_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "mpeg_clk_sel" }, + .num_parents = 1, + .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), + }, +}; + +/* the mother of dragons^W gates */ +static struct clk_gate gxbb_clk81 = { + .reg = (void *)HHI_MPEG_CLK_CNTL, + .bit_idx = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "clk81", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "mpeg_clk_div" }, + .num_parents = 1, + .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL), + }, +}; + +/* Everything Else (EE) domain gates */ +static MESON_GATE(ddr, HHI_GCLK_MPEG0, 0); +static MESON_GATE(dos, HHI_GCLK_MPEG0, 1); +static MESON_GATE(isa, HHI_GCLK_MPEG0, 5); +static MESON_GATE(pl301, HHI_GCLK_MPEG0, 6); +static MESON_GATE(periphs, HHI_GCLK_MPEG0, 7); +static MESON_GATE(spicc, HHI_GCLK_MPEG0, 8); +static MESON_GATE(i2c, HHI_GCLK_MPEG0, 9); +static MESON_GATE(sar_adc, HHI_GCLK_MPEG0, 10); +static MESON_GATE(smart_card, HHI_GCLK_MPEG0, 11); +static MESON_GATE(rng0, HHI_GCLK_MPEG0, 12); +static MESON_GATE(uart0, HHI_GCLK_MPEG0, 13); +static MESON_GATE(sdhc, HHI_GCLK_MPEG0, 14); +static MESON_GATE(stream, HHI_GCLK_MPEG0, 15); +static MESON_GATE(async_fifo, HHI_GCLK_MPEG0, 16); +static MESON_GATE(sdio, HHI_GCLK_MPEG0, 17); +static MESON_GATE(abuf, HHI_GCLK_MPEG0, 18); +static MESON_GATE(hiu_iface, HHI_GCLK_MPEG0, 19); +static MESON_GATE(assist_misc, HHI_GCLK_MPEG0, 23); +static MESON_GATE(spi, HHI_GCLK_MPEG0, 30); + +static MESON_GATE(i2s_spdif, HHI_GCLK_MPEG1, 2); +static MESON_GATE(eth, HHI_GCLK_MPEG1, 3); +static MESON_GATE(demux, HHI_GCLK_MPEG1, 4); +static MESON_GATE(aiu_glue, HHI_GCLK_MPEG1, 6); +static MESON_GATE(iec958, HHI_GCLK_MPEG1, 7); +static MESON_GATE(i2s_out, HHI_GCLK_MPEG1, 8); +static MESON_GATE(amclk, HHI_GCLK_MPEG1, 9); +static MESON_GATE(aififo2, HHI_GCLK_MPEG1, 10); +static MESON_GATE(mixer, HHI_GCLK_MPEG1, 11); +static MESON_GATE(mixer_iface, HHI_GCLK_MPEG1, 12); +static MESON_GATE(adc, HHI_GCLK_MPEG1, 13); +static MESON_GATE(blkmv, HHI_GCLK_MPEG1, 14); +static MESON_GATE(aiu, HHI_GCLK_MPEG1, 15); +static MESON_GATE(uart1, HHI_GCLK_MPEG1, 16); +static MESON_GATE(g2d, HHI_GCLK_MPEG1, 20); +static MESON_GATE(usb0, HHI_GCLK_MPEG1, 21); +static MESON_GATE(usb1, HHI_GCLK_MPEG1, 22); +static MESON_GATE(reset, HHI_GCLK_MPEG1, 23); +static MESON_GATE(nand, HHI_GCLK_MPEG1, 24); +static MESON_GATE(dos_parser, HHI_GCLK_MPEG1, 25); +static MESON_GATE(usb, HHI_GCLK_MPEG1, 26); +static MESON_GATE(vdin1, HHI_GCLK_MPEG1, 28); +static MESON_GATE(ahb_arb0, HHI_GCLK_MPEG1, 29); +static MESON_GATE(efuse, HHI_GCLK_MPEG1, 30); +static MESON_GATE(boot_rom, HHI_GCLK_MPEG1, 31); + +static MESON_GATE(ahb_data_bus, HHI_GCLK_MPEG2, 1); +static MESON_GATE(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2); +static MESON_GATE(hdmi_intr_sync, HHI_GCLK_MPEG2, 3); +static MESON_GATE(hdmi_pclk, HHI_GCLK_MPEG2, 4); +static MESON_GATE(usb1_ddr_bridge, HHI_GCLK_MPEG2, 8); +static MESON_GATE(usb0_ddr_bridge, HHI_GCLK_MPEG2, 9); +static MESON_GATE(mmc_pclk, HHI_GCLK_MPEG2, 11); +static MESON_GATE(dvin, HHI_GCLK_MPEG2, 12); +static MESON_GATE(uart2, HHI_GCLK_MPEG2, 15); +static MESON_GATE(sana, HHI_GCLK_MPEG2, 22); +static MESON_GATE(vpu_intr, HHI_GCLK_MPEG2, 25); +static MESON_GATE(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26); +static MESON_GATE(clk81_a53, HHI_GCLK_MPEG2, 29); + +static MESON_GATE(vclk2_venci0, HHI_GCLK_OTHER, 1); +static MESON_GATE(vclk2_venci1, HHI_GCLK_OTHER, 2); +static MESON_GATE(vclk2_vencp0, HHI_GCLK_OTHER, 3); +static MESON_GATE(vclk2_vencp1, HHI_GCLK_OTHER, 4); +static MESON_GATE(gclk_venci_int0, HHI_GCLK_OTHER, 8); +static MESON_GATE(gclk_vencp_int, HHI_GCLK_OTHER, 9); +static MESON_GATE(dac_clk, HHI_GCLK_OTHER, 10); +static MESON_GATE(aoclk_gate, HHI_GCLK_OTHER, 14); +static MESON_GATE(iec958_gate, HHI_GCLK_OTHER, 16); +static MESON_GATE(enc480p, HHI_GCLK_OTHER, 20); +static MESON_GATE(rng1, HHI_GCLK_OTHER, 21); +static MESON_GATE(gclk_venci_int1, HHI_GCLK_OTHER, 22); +static MESON_GATE(vclk2_venclmcc, HHI_GCLK_OTHER, 24); +static MESON_GATE(vclk2_vencl, HHI_GCLK_OTHER, 25); +static MESON_GATE(vclk_other, HHI_GCLK_OTHER, 26); +static MESON_GATE(edp, HHI_GCLK_OTHER, 31); + +/* Always On (AO) domain gates */ + +static MESON_GATE(ao_media_cpu, HHI_GCLK_AO, 0); +static MESON_GATE(ao_ahb_sram, HHI_GCLK_AO, 1); +static MESON_GATE(ao_ahb_bus, HHI_GCLK_AO, 2); +static MESON_GATE(ao_iface, HHI_GCLK_AO, 3); +static MESON_GATE(ao_i2c, HHI_GCLK_AO, 4); + +/* Array of all clocks provided by this provider */ + +static struct clk_hw_onecell_data gxbb_hw_onecell_data = { + .hws = { + [CLKID_SYS_PLL] = &gxbb_sys_pll.hw, + [CLKID_CPUCLK] = &gxbb_cpu_clk.hw, + [CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw, + [CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw, + [CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &gxbb_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &gxbb_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw, + [CLKID_GP0_PLL] = &gxbb_gp0_pll.hw, + [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw, + [CLKID_CLK81] = &gxbb_clk81.hw, + [CLKID_MPLL0] = &gxbb_mpll0.hw, + [CLKID_MPLL1] = &gxbb_mpll1.hw, + [CLKID_MPLL2] = &gxbb_mpll2.hw, + [CLKID_DDR] = &gxbb_ddr.hw, + [CLKID_DOS] = &gxbb_dos.hw, + [CLKID_ISA] = &gxbb_isa.hw, + [CLKID_PL301] = &gxbb_pl301.hw, + [CLKID_PERIPHS] = &gxbb_periphs.hw, + [CLKID_SPICC] = &gxbb_spicc.hw, + [CLKID_I2C] = &gxbb_i2c.hw, + [CLKID_SAR_ADC] = &gxbb_sar_adc.hw, + [CLKID_SMART_CARD] = &gxbb_smart_card.hw, + [CLKID_RNG0] = &gxbb_rng0.hw, + [CLKID_UART0] = &gxbb_uart0.hw, + [CLKID_SDHC] = &gxbb_sdhc.hw, + [CLKID_STREAM] = &gxbb_stream.hw, + [CLKID_ASYNC_FIFO] = &gxbb_async_fifo.hw, + [CLKID_SDIO] = &gxbb_sdio.hw, + [CLKID_ABUF] = &gxbb_abuf.hw, + [CLKID_HIU_IFACE] = &gxbb_hiu_iface.hw, + [CLKID_ASSIST_MISC] = &gxbb_assist_misc.hw, + [CLKID_SPI] = &gxbb_spi.hw, + [CLKID_I2S_SPDIF] = &gxbb_i2s_spdif.hw, + [CLKID_ETH] = &gxbb_eth.hw, + [CLKID_DEMUX] = &gxbb_demux.hw, + [CLKID_AIU_GLUE] = &gxbb_aiu_glue.hw, + [CLKID_IEC958] = &gxbb_iec958.hw, + [CLKID_I2S_OUT] = &gxbb_i2s_out.hw, + [CLKID_AMCLK] = &gxbb_amclk.hw, + [CLKID_AIFIFO2] = &gxbb_aififo2.hw, + [CLKID_MIXER] = &gxbb_mixer.hw, + [CLKID_MIXER_IFACE] = &gxbb_mixer_iface.hw, + [CLKID_ADC] = &gxbb_adc.hw, + [CLKID_BLKMV] = &gxbb_blkmv.hw, + [CLKID_AIU] = &gxbb_aiu.hw, + [CLKID_UART1] = &gxbb_uart1.hw, + [CLKID_G2D] = &gxbb_g2d.hw, + [CLKID_USB0] = &gxbb_usb0.hw, + [CLKID_USB1] = &gxbb_usb1.hw, + [CLKID_RESET] = &gxbb_reset.hw, + [CLKID_NAND] = &gxbb_nand.hw, + [CLKID_DOS_PARSER] = &gxbb_dos_parser.hw, + [CLKID_USB] = &gxbb_usb.hw, + [CLKID_VDIN1] = &gxbb_vdin1.hw, + [CLKID_AHB_ARB0] = &gxbb_ahb_arb0.hw, + [CLKID_EFUSE] = &gxbb_efuse.hw, + [CLKID_BOOT_ROM] = &gxbb_boot_rom.hw, + [CLKID_AHB_DATA_BUS] = &gxbb_ahb_data_bus.hw, + [CLKID_AHB_CTRL_BUS] = &gxbb_ahb_ctrl_bus.hw, + [CLKID_HDMI_INTR_SYNC] = &gxbb_hdmi_intr_sync.hw, + [CLKID_HDMI_PCLK] = &gxbb_hdmi_pclk.hw, + [CLKID_USB1_DDR_BRIDGE] = &gxbb_usb1_ddr_bridge.hw, + [CLKID_USB0_DDR_BRIDGE] = &gxbb_usb0_ddr_bridge.hw, + [CLKID_MMC_PCLK] = &gxbb_mmc_pclk.hw, + [CLKID_DVIN] = &gxbb_dvin.hw, + [CLKID_UART2] = &gxbb_uart2.hw, + [CLKID_SANA] = &gxbb_sana.hw, + [CLKID_VPU_INTR] = &gxbb_vpu_intr.hw, + [CLKID_SEC_AHB_AHB3_BRIDGE] = &gxbb_sec_ahb_ahb3_bridge.hw, + [CLKID_CLK81_A53] = &gxbb_clk81_a53.hw, + [CLKID_VCLK2_VENCI0] = &gxbb_vclk2_venci0.hw, + [CLKID_VCLK2_VENCI1] = &gxbb_vclk2_venci1.hw, + [CLKID_VCLK2_VENCP0] = &gxbb_vclk2_vencp0.hw, + [CLKID_VCLK2_VENCP1] = &gxbb_vclk2_vencp1.hw, + [CLKID_GCLK_VENCI_INT0] = &gxbb_gclk_venci_int0.hw, + [CLKID_GCLK_VENCI_INT] = &gxbb_gclk_vencp_int.hw, + [CLKID_DAC_CLK] = &gxbb_dac_clk.hw, + [CLKID_AOCLK_GATE] = &gxbb_aoclk_gate.hw, + [CLKID_IEC958_GATE] = &gxbb_iec958_gate.hw, + [CLKID_ENC480P] = &gxbb_enc480p.hw, + [CLKID_RNG1] = &gxbb_rng1.hw, + [CLKID_GCLK_VENCI_INT1] = &gxbb_gclk_venci_int1.hw, + [CLKID_VCLK2_VENCLMCC] = &gxbb_vclk2_venclmcc.hw, + [CLKID_VCLK2_VENCL] = &gxbb_vclk2_vencl.hw, + [CLKID_VCLK_OTHER] = &gxbb_vclk_other.hw, + [CLKID_EDP] = &gxbb_edp.hw, + [CLKID_AO_MEDIA_CPU] = &gxbb_ao_media_cpu.hw, + [CLKID_AO_AHB_SRAM] = &gxbb_ao_ahb_sram.hw, + [CLKID_AO_AHB_BUS] = &gxbb_ao_ahb_bus.hw, + [CLKID_AO_IFACE] = &gxbb_ao_iface.hw, + [CLKID_AO_I2C] = &gxbb_ao_i2c.hw, + }, + .num = NR_CLKS, +}; + +/* Convenience tables to populate base addresses in .probe */ + +static struct meson_clk_pll *const gxbb_clk_plls[] = { + &gxbb_fixed_pll, + &gxbb_hdmi_pll, + &gxbb_sys_pll, + &gxbb_gp0_pll, +}; + +static struct meson_clk_mpll *const gxbb_clk_mplls[] = { + &gxbb_mpll0, + &gxbb_mpll1, + &gxbb_mpll2, +}; + +static struct clk_gate *gxbb_clk_gates[] = { + &gxbb_clk81, + &gxbb_ddr, + &gxbb_dos, + &gxbb_isa, + &gxbb_pl301, + &gxbb_periphs, + &gxbb_spicc, + &gxbb_i2c, + &gxbb_sar_adc, + &gxbb_smart_card, + &gxbb_rng0, + &gxbb_uart0, + &gxbb_sdhc, + &gxbb_stream, + &gxbb_async_fifo, + &gxbb_sdio, + &gxbb_abuf, + &gxbb_hiu_iface, + &gxbb_assist_misc, + &gxbb_spi, + &gxbb_i2s_spdif, + &gxbb_eth, + &gxbb_demux, + &gxbb_aiu_glue, + &gxbb_iec958, + &gxbb_i2s_out, + &gxbb_amclk, + &gxbb_aififo2, + &gxbb_mixer, + &gxbb_mixer_iface, + &gxbb_adc, + &gxbb_blkmv, + &gxbb_aiu, + &gxbb_uart1, + &gxbb_g2d, + &gxbb_usb0, + &gxbb_usb1, + &gxbb_reset, + &gxbb_nand, + &gxbb_dos_parser, + &gxbb_usb, + &gxbb_vdin1, + &gxbb_ahb_arb0, + &gxbb_efuse, + &gxbb_boot_rom, + &gxbb_ahb_data_bus, + &gxbb_ahb_ctrl_bus, + &gxbb_hdmi_intr_sync, + &gxbb_hdmi_pclk, + &gxbb_usb1_ddr_bridge, + &gxbb_usb0_ddr_bridge, + &gxbb_mmc_pclk, + &gxbb_dvin, + &gxbb_uart2, + &gxbb_sana, + &gxbb_vpu_intr, + &gxbb_sec_ahb_ahb3_bridge, + &gxbb_clk81_a53, + &gxbb_vclk2_venci0, + &gxbb_vclk2_venci1, + &gxbb_vclk2_vencp0, + &gxbb_vclk2_vencp1, + &gxbb_gclk_venci_int0, + &gxbb_gclk_vencp_int, + &gxbb_dac_clk, + &gxbb_aoclk_gate, + &gxbb_iec958_gate, + &gxbb_enc480p, + &gxbb_rng1, + &gxbb_gclk_venci_int1, + &gxbb_vclk2_venclmcc, + &gxbb_vclk2_vencl, + &gxbb_vclk_other, + &gxbb_edp, + &gxbb_ao_media_cpu, + &gxbb_ao_ahb_sram, + &gxbb_ao_ahb_bus, + &gxbb_ao_iface, + &gxbb_ao_i2c, +}; + +static int gxbb_clkc_probe(struct platform_device *pdev) +{ + void __iomem *clk_base; + int ret, clkid, i; + struct clk_hw *parent_hw; + struct clk *parent_clk; + struct device *dev = &pdev->dev; + + /* Generic clocks and PLLs */ + clk_base = of_iomap(dev->of_node, 0); + if (!clk_base) { + pr_err("%s: Unable to map clk base\n", __func__); + return -ENXIO; + } + + /* Populate base address for PLLs */ + for (i = 0; i < ARRAY_SIZE(gxbb_clk_plls); i++) + gxbb_clk_plls[i]->base = clk_base; + + /* Populate base address for MPLLs */ + for (i = 0; i < ARRAY_SIZE(gxbb_clk_mplls); i++) + gxbb_clk_mplls[i]->base = clk_base; + + /* Populate the base address for CPU clk */ + gxbb_cpu_clk.base = clk_base; + + /* Populate the base address for the MPEG clks */ + gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg; + gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg; + + /* Populate base address for gates */ + for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++) + gxbb_clk_gates[i]->reg = clk_base + + (u64)gxbb_clk_gates[i]->reg; + + /* + * register all clks + */ + for (clkid = 0; clkid < NR_CLKS; clkid++) { + ret = devm_clk_hw_register(dev, gxbb_hw_onecell_data.hws[clkid]); + if (ret) + goto iounmap; + } + + /* + * Register CPU clk notifier + * + * FIXME this is wrong for a lot of reasons. First, the muxes should be + * struct clk_hw objects. Second, we shouldn't program the muxes in + * notifier handlers. The tricky programming sequence will be handled + * by the forthcoming coordinated clock rates mechanism once that + * feature is released. + * + * Furthermore, looking up the parent this way is terrible. At some + * point we will stop allocating a default struct clk when registering + * a new clk_hw, and this hack will no longer work. Releasing the ccr + * feature before that time solves the problem :-) + */ + parent_hw = clk_hw_get_parent(&gxbb_cpu_clk.hw); + parent_clk = parent_hw->clk; + ret = clk_notifier_register(parent_clk, &gxbb_cpu_clk.clk_nb); + if (ret) { + pr_err("%s: failed to register clock notifier for cpu_clk\n", + __func__); + goto iounmap; + } + + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + &gxbb_hw_onecell_data); + +iounmap: + iounmap(clk_base); + return ret; +} + +static const struct of_device_id gxbb_clkc_match_table[] = { + { .compatible = "amlogic,gxbb-clkc" }, + { } +}; + +static struct platform_driver gxbb_driver = { + .probe = gxbb_clkc_probe, + .driver = { + .name = "gxbb-clkc", + .of_match_table = gxbb_clkc_match_table, + }, +}; + +static int __init gxbb_clkc_init(void) +{ + return platform_driver_register(&gxbb_driver); +} +device_initcall(gxbb_clkc_init); diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h new file mode 100644 index 000000000000..a2adf3448b59 --- /dev/null +++ b/drivers/clk/meson/gxbb.h @@ -0,0 +1,271 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 AmLogic, Inc. + * Author: Michael Turquette <mturquette@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, Inc. + * Author: Michael Turquette <mturquette@baylibre.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __GXBB_H +#define __GXBB_H + +/* + * Clock controller register offsets + * + * Register offsets from the data sheet are listed in comment blocks below. + * Those offsets must be multiplied by 4 before adding them to the base address + * to get the right value + */ +#define SCR 0x2C /* 0x0b offset in data sheet */ +#define TIMEOUT_VALUE 0x3c /* 0x0f offset in data sheet */ + +#define HHI_GP0_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ +#define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ +#define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ +#define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */ + +#define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */ +#define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */ + +#define HHI_MEM_PD_REG0 0x100 /* 0x40 offset in data sheet */ +#define HHI_MEM_PD_REG1 0x104 /* 0x41 offset in data sheet */ +#define HHI_VPU_MEM_PD_REG1 0x108 /* 0x42 offset in data sheet */ +#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ +#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ + +#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ +#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */ +#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */ +#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */ +#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */ +#define HHI_SYS_OSCIN_CNTL 0x158 /* 0x56 offset in data sheet */ +#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */ +#define HHI_SYS_CPU_RESET_CNTL 0x160 /* 0x58 offset in data sheet */ +#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */ + +#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */ +#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */ +#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */ +#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */ +#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ +#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ +#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */ +#define HHI_AUD_CLK_CNTL3 0x1a4 /* 0x69 offset in data sheet */ +#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */ +#define HHI_VPU_CLK_CNTL 0x1bC /* 0x6f offset in data sheet */ + +#define HHI_HDMI_CLK_CNTL 0x1CC /* 0x73 offset in data sheet */ +#define HHI_VDEC_CLK_CNTL 0x1E0 /* 0x78 offset in data sheet */ +#define HHI_VDEC2_CLK_CNTL 0x1E4 /* 0x79 offset in data sheet */ +#define HHI_VDEC3_CLK_CNTL 0x1E8 /* 0x7a offset in data sheet */ +#define HHI_VDEC4_CLK_CNTL 0x1EC /* 0x7b offset in data sheet */ +#define HHI_HDCP22_CLK_CNTL 0x1F0 /* 0x7c offset in data sheet */ +#define HHI_VAPBCLK_CNTL 0x1F4 /* 0x7d offset in data sheet */ + +#define HHI_VPU_CLKB_CNTL 0x20C /* 0x83 offset in data sheet */ +#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */ +#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */ +#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */ +#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */ + +#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */ +#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */ +#define HHI_SD_EMMC_CLK_CNTL 0x264 /* 0x99 offset in data sheet */ + +#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ +#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */ +#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */ +#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */ +#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */ +#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */ +#define HHI_MPLL_CNTL7 0x298 /* MP0, 0xa6 offset in data sheet */ +#define HHI_MPLL_CNTL8 0x29C /* MP1, 0xa7 offset in data sheet */ +#define HHI_MPLL_CNTL9 0x2A0 /* MP2, 0xa8 offset in data sheet */ +#define HHI_MPLL_CNTL10 0x2A4 /* MP2, 0xa9 offset in data sheet */ + +#define HHI_MPLL3_CNTL0 0x2E0 /* 0xb8 offset in data sheet */ +#define HHI_MPLL3_CNTL1 0x2E4 /* 0xb9 offset in data sheet */ +#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ + +#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */ +#define HHI_SYS_PLL_CNTL2 0x304 /* 0xc1 offset in data sheet */ +#define HHI_SYS_PLL_CNTL3 0x308 /* 0xc2 offset in data sheet */ +#define HHI_SYS_PLL_CNTL4 0x30c /* 0xc3 offset in data sheet */ +#define HHI_SYS_PLL_CNTL5 0x310 /* 0xc4 offset in data sheet */ +#define HHI_DPLL_TOP_I 0x318 /* 0xc6 offset in data sheet */ +#define HHI_DPLL_TOP2_I 0x31C /* 0xc7 offset in data sheet */ +#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */ +#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */ +#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */ +#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */ +#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */ +#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */ +#define HHI_HDMI_PLL_CNTL_I 0x338 /* 0xce offset in data sheet */ +#define HHI_HDMI_PLL_CNTL7 0x33C /* 0xcf offset in data sheet */ + +#define HHI_HDMI_PHY_CNTL0 0x3A0 /* 0xe8 offset in data sheet */ +#define HHI_HDMI_PHY_CNTL1 0x3A4 /* 0xe9 offset in data sheet */ +#define HHI_HDMI_PHY_CNTL2 0x3A8 /* 0xea offset in data sheet */ +#define HHI_HDMI_PHY_CNTL3 0x3AC /* 0xeb offset in data sheet */ + +#define HHI_VID_LOCK_CLK_CNTL 0x3C8 /* 0xf2 offset in data sheet */ +#define HHI_BT656_CLK_CNTL 0x3D4 /* 0xf5 offset in data sheet */ +#define HHI_SAR_CLK_CNTL 0x3D8 /* 0xf6 offset in data sheet */ + +/* + * CLKID index values + * + * These indices are entirely contrived and do not map onto the hardware. + * Migrate them out of this header and into the DT header file when they need + * to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h + */ +#define CLKID_SYS_PLL 0 +/* CLKID_CPUCLK */ +#define CLKID_HDMI_PLL 2 +#define CLKID_FIXED_PLL 3 +#define CLKID_FCLK_DIV2 4 +#define CLKID_FCLK_DIV3 5 +#define CLKID_FCLK_DIV4 6 +#define CLKID_FCLK_DIV5 7 +#define CLKID_FCLK_DIV7 8 +#define CLKID_GP0_PLL 9 +#define CLKID_MPEG_SEL 10 +#define CLKID_MPEG_DIV 11 +/* CLKID_CLK81 */ +#define CLKID_MPLL0 13 +#define CLKID_MPLL1 14 +#define CLKID_MPLL2 15 +#define CLKID_DDR 16 +#define CLKID_DOS 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC 21 +#define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 +#define CLKID_SMART_CARD 24 +#define CLKID_RNG0 25 +#define CLKID_UART0 26 +#define CLKID_SDHC 27 +#define CLKID_STREAM 28 +#define CLKID_ASYNC_FIFO 29 +#define CLKID_SDIO 30 +#define CLKID_ABUF 31 +#define CLKID_HIU_IFACE 32 +#define CLKID_ASSIST_MISC 33 +#define CLKID_SPI 34 +#define CLKID_I2S_SPDIF 35 +#define CLKID_ETH 36 +#define CLKID_DEMUX 37 +#define CLKID_AIU_GLUE 38 +#define CLKID_IEC958 39 +#define CLKID_I2S_OUT 40 +#define CLKID_AMCLK 41 +#define CLKID_AIFIFO2 42 +#define CLKID_MIXER 43 +#define CLKID_MIXER_IFACE 44 +#define CLKID_ADC 45 +#define CLKID_BLKMV 46 +#define CLKID_AIU 47 +#define CLKID_UART1 48 +#define CLKID_G2D 49 +#define CLKID_USB0 50 +#define CLKID_USB1 51 +#define CLKID_RESET 52 +#define CLKID_NAND 53 +#define CLKID_DOS_PARSER 54 +#define CLKID_USB 55 +#define CLKID_VDIN1 56 +#define CLKID_AHB_ARB0 57 +#define CLKID_EFUSE 58 +#define CLKID_BOOT_ROM 59 +#define CLKID_AHB_DATA_BUS 60 +#define CLKID_AHB_CTRL_BUS 61 +#define CLKID_HDMI_INTR_SYNC 62 +#define CLKID_HDMI_PCLK 63 +#define CLKID_USB1_DDR_BRIDGE 64 +#define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_MMC_PCLK 66 +#define CLKID_DVIN 67 +#define CLKID_UART2 68 +#define CLKID_SANA 69 +#define CLKID_VPU_INTR 70 +#define CLKID_SEC_AHB_AHB3_BRIDGE 71 +#define CLKID_CLK81_A53 72 +#define CLKID_VCLK2_VENCI0 73 +#define CLKID_VCLK2_VENCI1 74 +#define CLKID_VCLK2_VENCP0 75 +#define CLKID_VCLK2_VENCP1 76 +#define CLKID_GCLK_VENCI_INT0 77 +#define CLKID_GCLK_VENCI_INT 78 +#define CLKID_DAC_CLK 79 +#define CLKID_AOCLK_GATE 80 +#define CLKID_IEC958_GATE 81 +#define CLKID_ENC480P 82 +#define CLKID_RNG1 83 +#define CLKID_GCLK_VENCI_INT1 84 +#define CLKID_VCLK2_VENCLMCC 85 +#define CLKID_VCLK2_VENCL 86 +#define CLKID_VCLK_OTHER 87 +#define CLKID_EDP 88 +#define CLKID_AO_MEDIA_CPU 89 +#define CLKID_AO_AHB_SRAM 90 +#define CLKID_AO_AHB_BUS 91 +#define CLKID_AO_IFACE 92 +#define CLKID_AO_I2C 93 + +#define NR_CLKS 94 + +/* include the CLKIDs that have been made part of the stable DT binding */ +#include <dt-bindings/clock/gxbb-clkc.h> + +#endif /* __GXBB_H */ diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b-clkc.c index 4d057b3e21b2..4c9413cdf373 100644 --- a/drivers/clk/meson/meson8b-clkc.c +++ b/drivers/clk/meson/meson8b-clkc.c @@ -1,7 +1,12 @@ /* + * AmLogic S805 / Meson8b Clock Controller Driver + * * Copyright (c) 2015 Endless Mobile, Inc. * Author: Carlo Caione <carlo@endlessm.com> * + * Copyright (c) 2016 BayLibre, Inc. + * Michael Turquette <mturquette@baylibre.com> + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -15,23 +20,33 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/clk.h> #include <linux/clk-provider.h> -#include <linux/kernel.h> -#include <linux/of.h> #include <linux/of_address.h> -#include <linux/slab.h> #include <dt-bindings/clock/meson8b-clkc.h> +#include <linux/platform_device.h> +#include <linux/init.h> #include "clkc.h" -#define MESON8B_REG_CTL0_ADDR 0x0000 -#define MESON8B_REG_SYS_CPU_CNTL1 0x015c -#define MESON8B_REG_HHI_MPEG 0x0174 -#define MESON8B_REG_MALI 0x01b0 +/* + * Clock controller register offsets + * + * Register offsets from the HardKernel[0] data sheet are listed in comment + * blocks below. Those offsets must be multiplied by 4 before adding them to + * the base address to get the right value + * + * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf + */ +#define MESON8B_REG_SYS_CPU_CNTL1 0x015c /* 0x57 offset in data sheet */ +#define MESON8B_REG_HHI_MPEG 0x0174 /* 0x5d offset in data sheet */ +#define MESON8B_REG_MALI 0x01b0 /* 0x6c offset in data sheet */ #define MESON8B_REG_PLL_FIXED 0x0280 #define MESON8B_REG_PLL_SYS 0x0300 #define MESON8B_REG_PLL_VID 0x0320 +static DEFINE_SPINLOCK(clk_lock); + static const struct pll_rate_table sys_pll_rate_table[] = { PLL_RATE(312000000, 52, 1, 2), PLL_RATE(336000000, 56, 1, 2), @@ -102,95 +117,331 @@ static const struct clk_div_table cpu_div_table[] = { { /* sentinel */ }, }; -PNAME(p_xtal) = { "xtal" }; -PNAME(p_fclk_div) = { "fixed_pll" }; -PNAME(p_cpu_clk) = { "sys_pll" }; -PNAME(p_clk81) = { "fclk_div3", "fclk_div4", "fclk_div5" }; -PNAME(p_mali) = { "fclk_div3", "fclk_div4", "fclk_div5", - "fclk_div7", "zero" }; +static struct clk_fixed_rate meson8b_xtal = { + .fixed_rate = 24000000, + .hw.init = &(struct clk_init_data){ + .name = "xtal", + .num_parents = 0, + .ops = &clk_fixed_rate_ops, + }, +}; + +static struct meson_clk_pll meson8b_fixed_pll = { + .m = { + .reg_off = MESON8B_REG_PLL_FIXED, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = MESON8B_REG_PLL_FIXED, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = MESON8B_REG_PLL_FIXED, + .shift = 16, + .width = 2, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "fixed_pll", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct meson_clk_pll meson8b_vid_pll = { + .m = { + .reg_off = MESON8B_REG_PLL_VID, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = MESON8B_REG_PLL_VID, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = MESON8B_REG_PLL_VID, + .shift = 16, + .width = 2, + }, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "vid_pll", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct meson_clk_pll meson8b_sys_pll = { + .m = { + .reg_off = MESON8B_REG_PLL_SYS, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = MESON8B_REG_PLL_SYS, + .shift = 9, + .width = 5, + }, + .od = { + .reg_off = MESON8B_REG_PLL_SYS, + .shift = 16, + .width = 2, + }, + .rate_table = sys_pll_rate_table, + .rate_count = ARRAY_SIZE(sys_pll_rate_table), + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll", + .ops = &meson_clk_pll_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_fixed_factor meson8b_fclk_div2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div2", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor meson8b_fclk_div3 = { + .mult = 1, + .div = 3, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div3", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor meson8b_fclk_div4 = { + .mult = 1, + .div = 4, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div4", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor meson8b_fclk_div5 = { + .mult = 1, + .div = 5, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div5", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor meson8b_fclk_div7 = { + .mult = 1, + .div = 7, + .hw.init = &(struct clk_init_data){ + .name = "fclk_div7", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "fixed_pll" }, + .num_parents = 1, + }, +}; + +/* + * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL + * post-dividers and should be modeled with their respective PLLs via the + * forthcoming coordinated clock rates feature + */ +static struct meson_clk_cpu meson8b_cpu_clk = { + .reg_off = MESON8B_REG_SYS_CPU_CNTL1, + .div_table = cpu_div_table, + .clk_nb.notifier_call = meson_clk_cpu_notifier_cb, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk", + .ops = &meson_clk_cpu_ops, + .parent_names = (const char *[]){ "sys_pll" }, + .num_parents = 1, + }, +}; static u32 mux_table_clk81[] = { 6, 5, 7 }; -static u32 mux_table_mali[] = { 6, 5, 7, 4, 0 }; - -static struct pll_conf pll_confs = { - .m = PARM(0x00, 0, 9), - .n = PARM(0x00, 9, 5), - .od = PARM(0x00, 16, 2), -}; - -static struct pll_conf sys_pll_conf = { - .m = PARM(0x00, 0, 9), - .n = PARM(0x00, 9, 5), - .od = PARM(0x00, 16, 2), - .rate_table = sys_pll_rate_table, -}; - -static const struct composite_conf clk81_conf __initconst = { - .mux_table = mux_table_clk81, - .mux_flags = CLK_MUX_READ_ONLY, - .mux_parm = PARM(0x00, 12, 3), - .div_parm = PARM(0x00, 0, 7), - .gate_parm = PARM(0x00, 7, 1), -}; - -static const struct composite_conf mali_conf __initconst = { - .mux_table = mux_table_mali, - .mux_parm = PARM(0x00, 9, 3), - .div_parm = PARM(0x00, 0, 7), - .gate_parm = PARM(0x00, 8, 1), -}; - -static const struct clk_conf meson8b_xtal_conf __initconst = - FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal", 0, - PARM(0x00, 4, 7)); - -static const struct clk_conf meson8b_clk_confs[] __initconst = { - FIXED_RATE(CLKID_ZERO, "zero", 0, 0), - PLL(MESON8B_REG_PLL_FIXED, CLKID_PLL_FIXED, "fixed_pll", - p_xtal, 0, &pll_confs), - PLL(MESON8B_REG_PLL_VID, CLKID_PLL_VID, "vid_pll", - p_xtal, 0, &pll_confs), - PLL(MESON8B_REG_PLL_SYS, CLKID_PLL_SYS, "sys_pll", - p_xtal, 0, &sys_pll_conf), - FIXED_FACTOR_DIV(CLKID_FCLK_DIV2, "fclk_div2", p_fclk_div, 0, 2), - FIXED_FACTOR_DIV(CLKID_FCLK_DIV3, "fclk_div3", p_fclk_div, 0, 3), - FIXED_FACTOR_DIV(CLKID_FCLK_DIV4, "fclk_div4", p_fclk_div, 0, 4), - FIXED_FACTOR_DIV(CLKID_FCLK_DIV5, "fclk_div5", p_fclk_div, 0, 5), - FIXED_FACTOR_DIV(CLKID_FCLK_DIV7, "fclk_div7", p_fclk_div, 0, 7), - CPU(MESON8B_REG_SYS_CPU_CNTL1, CLKID_CPUCLK, "a5_clk", p_cpu_clk, - cpu_div_table), - COMPOSITE(MESON8B_REG_HHI_MPEG, CLKID_CLK81, "clk81", p_clk81, - CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED, &clk81_conf), - COMPOSITE(MESON8B_REG_MALI, CLKID_MALI, "mali", p_mali, - CLK_IGNORE_UNUSED, &mali_conf), -}; - -static void __init meson8b_clkc_init(struct device_node *np) -{ - void __iomem *clk_base; - if (!meson_clk_init(np, CLK_NR_CLKS)) - return; +struct clk_mux meson8b_mpeg_clk_sel = { + .reg = (void *)MESON8B_REG_HHI_MPEG, + .mask = 0x7, + .shift = 12, + .flags = CLK_MUX_READ_ONLY, + .table = mux_table_clk81, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpeg_clk_sel", + .ops = &clk_mux_ro_ops, + /* + * FIXME bits 14:12 selects from 8 possible parents: + * xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2, + * fclk_div4, fclk_div3, fclk_div5 + */ + .parent_names = (const char *[]){ "fclk_div3", "fclk_div4", + "fclk_div5" }, + .num_parents = 3, + .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED), + }, +}; - /* XTAL */ - clk_base = of_iomap(np, 0); - if (!clk_base) { - pr_err("%s: Unable to map xtal base\n", __func__); - return; - } +struct clk_divider meson8b_mpeg_clk_div = { + .reg = (void *)MESON8B_REG_HHI_MPEG, + .shift = 0, + .width = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "mpeg_clk_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "mpeg_clk_sel" }, + .num_parents = 1, + .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), + }, +}; - meson_clk_register_clks(&meson8b_xtal_conf, 1, clk_base); - iounmap(clk_base); +struct clk_gate meson8b_clk81 = { + .reg = (void *)MESON8B_REG_HHI_MPEG, + .bit_idx = 7, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "clk81", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "mpeg_clk_div" }, + .num_parents = 1, + .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), + }, +}; + +static struct clk_hw_onecell_data meson8b_hw_onecell_data = { + .hws = { + [CLKID_XTAL] = &meson8b_xtal.hw, + [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw, + [CLKID_PLL_VID] = &meson8b_vid_pll.hw, + [CLKID_PLL_SYS] = &meson8b_sys_pll.hw, + [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw, + [CLKID_CPUCLK] = &meson8b_cpu_clk.hw, + [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw, + [CLKID_CLK81] = &meson8b_clk81.hw, + }, + .num = CLK_NR_CLKS, +}; + +static struct meson_clk_pll *const meson8b_clk_plls[] = { + &meson8b_fixed_pll, + &meson8b_vid_pll, + &meson8b_sys_pll, +}; + +static int meson8b_clkc_probe(struct platform_device *pdev) +{ + void __iomem *clk_base; + int ret, clkid, i; + struct clk_hw *parent_hw; + struct clk *parent_clk; + struct device *dev = &pdev->dev; /* Generic clocks and PLLs */ - clk_base = of_iomap(np, 1); + clk_base = of_iomap(dev->of_node, 1); if (!clk_base) { pr_err("%s: Unable to map clk base\n", __func__); - return; + return -ENXIO; + } + + /* Populate base address for PLLs */ + for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++) + meson8b_clk_plls[i]->base = clk_base; + + /* Populate the base address for CPU clk */ + meson8b_cpu_clk.base = clk_base; + + /* Populate the base address for the MPEG clks */ + meson8b_mpeg_clk_sel.reg = clk_base + (u32)meson8b_mpeg_clk_sel.reg; + meson8b_mpeg_clk_div.reg = clk_base + (u32)meson8b_mpeg_clk_div.reg; + meson8b_clk81.reg = clk_base + (u32)meson8b_clk81.reg; + + /* + * register all clks + * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 + */ + for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) { + /* array might be sparse */ + if (!meson8b_hw_onecell_data.hws[clkid]) + continue; + + /* FIXME convert to devm_clk_register */ + ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]); + if (ret) + goto iounmap; } - meson_clk_register_clks(meson8b_clk_confs, - ARRAY_SIZE(meson8b_clk_confs), - clk_base); + /* + * Register CPU clk notifier + * + * FIXME this is wrong for a lot of reasons. First, the muxes should be + * struct clk_hw objects. Second, we shouldn't program the muxes in + * notifier handlers. The tricky programming sequence will be handled + * by the forthcoming coordinated clock rates mechanism once that + * feature is released. + * + * Furthermore, looking up the parent this way is terrible. At some + * point we will stop allocating a default struct clk when registering + * a new clk_hw, and this hack will no longer work. Releasing the ccr + * feature before that time solves the problem :-) + */ + parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw); + parent_clk = parent_hw->clk; + ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb); + if (ret) { + pr_err("%s: failed to register clock notifier for cpu_clk\n", + __func__); + goto iounmap; + } + + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + &meson8b_hw_onecell_data); + +iounmap: + iounmap(clk_base); + return ret; +} + +static const struct of_device_id meson8b_clkc_match_table[] = { + { .compatible = "amlogic,meson8b-clkc" }, + { } +}; + +static struct platform_driver meson8b_driver = { + .probe = meson8b_clkc_probe, + .driver = { + .name = "meson8b-clkc", + .of_match_table = meson8b_clkc_match_table, + }, +}; + +static int __init meson8b_clkc_init(void) +{ + return platform_driver_register(&meson8b_driver); } -CLK_OF_DECLARE(meson8b_clock, "amlogic,meson8b-clkc", meson8b_clkc_init); +device_initcall(meson8b_clkc_init); diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c index 481b2646b496..90d740a2fc0d 100644 --- a/drivers/clk/nxp/clk-lpc32xx.c +++ b/drivers/clk/nxp/clk-lpc32xx.c @@ -87,7 +87,7 @@ enum { enum { /* Start from the last defined clock in dt bindings */ - LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_HCLK_PLL + 1, + LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_PERIPH + 1, LPC32XX_CLK_ADC_RTC, LPC32XX_CLK_TEST1, LPC32XX_CLK_TEST2, @@ -99,7 +99,6 @@ enum { LPC32XX_CLK_HCLK_DIV_PERIPH, LPC32XX_CLK_HCLK_DIV, LPC32XX_CLK_HCLK, - LPC32XX_CLK_PERIPH, LPC32XX_CLK_ARM, LPC32XX_CLK_ARM_VFP, diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c index 6dc55864979c..c347a0d44bc8 100644 --- a/drivers/clk/qcom/gcc-msm8660.c +++ b/drivers/clk/qcom/gcc-msm8660.c @@ -2290,6 +2290,32 @@ static struct clk_branch sdc5_h_clk = { }, }; +static struct clk_branch ebi2_2x_clk = { + .halt_reg = 0x2fcc, + .halt_bit = 18, + .clkr = { + .enable_reg = 0x2660, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "ebi2_2x_clk", + .ops = &clk_branch_ops, + }, + }, +}; + +static struct clk_branch ebi2_clk = { + .halt_reg = 0x2fcc, + .halt_bit = 19, + .clkr = { + .enable_reg = 0x2664, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "ebi2_clk", + .ops = &clk_branch_ops, + }, + }, +}; + static struct clk_branch adm0_clk = { .halt_reg = 0x2fdc, .halt_check = BRANCH_HALT_VOTED, @@ -2533,6 +2559,8 @@ static struct clk_regmap *gcc_msm8660_clks[] = { [SDC3_H_CLK] = &sdc3_h_clk.clkr, [SDC4_H_CLK] = &sdc4_h_clk.clkr, [SDC5_H_CLK] = &sdc5_h_clk.clkr, + [EBI2_2X_CLK] = &ebi2_2x_clk.clkr, + [EBI2_CLK] = &ebi2_clk.clkr, [ADM0_CLK] = &adm0_clk.clkr, [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr, [ADM1_CLK] = &adm1_clk.clkr, diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index c9b96f318d9c..bbf732bbc3fd 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -2891,21 +2891,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = { }, }; -static struct clk_branch gcc_aggre1_pnoc_ahb_clk = { - .halt_reg = 0x82014, - .clkr = { - .enable_reg = 0x82014, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_aggre1_pnoc_ahb_clk", - .parent_names = (const char *[]){ "periph_noc_clk_src" }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_aggre2_ufs_axi_clk = { .halt_reg = 0x83014, .clkr = { @@ -3308,7 +3293,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr, [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr, [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr, - [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr, [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr, [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr, [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr, diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 2115ce410cfb..41a12d376799 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -1,6 +1,7 @@ config CLK_RENESAS_CPG_MSSR bool default y if ARCH_R8A7795 + default y if ARCH_R8A7796 config CLK_RENESAS_CPG_MSTP bool @@ -11,6 +12,7 @@ config CLK_RENESAS_CPG_MSTP default y if ARCH_R8A7779 default y if ARCH_R8A7790 default y if ARCH_R8A7791 + default y if ARCH_R8A7792 default y if ARCH_R8A7793 default y if ARCH_R8A7794 default y if ARCH_SH73A0 diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index ead8bb843524..90dd0db7d9c6 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -6,9 +6,11 @@ obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o clk-div6.o +obj-$(CONFIG_ARCH_R8A7792) += clk-rcar-gen2.o clk-div6.o obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o clk-div6.o obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o clk-div6.o -obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o +obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o rcar-gen3-cpg.o +obj-$(CONFIG_ARCH_R8A7796) += r8a7796-cpg-mssr.o rcar-gen3-cpg.o obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o clk-div6.o obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o clk-div6.o diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index ca5519c583d4..d359c92e13a6 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -12,22 +12,14 @@ * the Free Software Foundation; version 2 of the License. */ -#include <linux/bug.h> -#include <linux/clk.h> -#include <linux/clk-provider.h> #include <linux/device.h> -#include <linux/err.h> #include <linux/init.h> -#include <linux/io.h> #include <linux/kernel.h> -#include <linux/of.h> -#include <linux/slab.h> #include <dt-bindings/clock/r8a7795-cpg-mssr.h> #include "renesas-cpg-mssr.h" - -#define CPG_RCKCR 0x240 +#include "rcar-gen3-cpg.h" enum clk_ids { /* Core Clock Outputs exported to DT */ @@ -58,20 +50,6 @@ enum clk_ids { MOD_CLK_BASE }; -enum r8a7795_clk_types { - CLK_TYPE_GEN3_MAIN = CLK_TYPE_CUSTOM, - CLK_TYPE_GEN3_PLL0, - CLK_TYPE_GEN3_PLL1, - CLK_TYPE_GEN3_PLL2, - CLK_TYPE_GEN3_PLL3, - CLK_TYPE_GEN3_PLL4, - CLK_TYPE_GEN3_SD, - CLK_TYPE_GEN3_R, -}; - -#define DEF_GEN3_SD(_name, _id, _parent, _offset) \ - DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset) - static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), @@ -129,6 +107,9 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { }; static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { + DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), + DEF_MOD("fdp1-1", 118, R8A7795_CLK_S2D1), + DEF_MOD("fdp1-0", 119, R8A7795_CLK_S2D1), DEF_MOD("scif5", 202, R8A7795_CLK_S3D4), DEF_MOD("scif4", 203, R8A7795_CLK_S3D4), DEF_MOD("scif3", 204, R8A7795_CLK_S3D4), @@ -157,11 +138,20 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1), DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4), DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4), + DEF_MOD("drif7", 508, R8A7795_CLK_S3D2), + DEF_MOD("drif6", 509, R8A7795_CLK_S3D2), + DEF_MOD("drif5", 510, R8A7795_CLK_S3D2), + DEF_MOD("drif4", 511, R8A7795_CLK_S3D2), + DEF_MOD("drif3", 512, R8A7795_CLK_S3D2), + DEF_MOD("drif2", 513, R8A7795_CLK_S3D2), + DEF_MOD("drif1", 514, R8A7795_CLK_S3D2), + DEF_MOD("drif0", 515, R8A7795_CLK_S3D2), DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1), DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1), DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1), DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1), DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1), + DEF_MOD("thermal", 522, R8A7795_CLK_CP), DEF_MOD("pwm", 523, R8A7795_CLK_S3D4), DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1), @@ -199,7 +189,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = { DEF_MOD("du2", 722, R8A7795_CLK_S2D1), DEF_MOD("du1", 723, R8A7795_CLK_S2D1), DEF_MOD("du0", 724, R8A7795_CLK_S2D1), - DEF_MOD("lvds", 727, R8A7795_CLK_S2D1), + DEF_MOD("lvds", 727, R8A7795_CLK_S0D4), DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI), DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI), DEF_MOD("vin7", 804, R8A7795_CLK_S2D1), @@ -262,225 +252,6 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = { MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; -/* ----------------------------------------------------------------------------- - * SDn Clock - * - */ -#define CPG_SD_STP_HCK BIT(9) -#define CPG_SD_STP_CK BIT(8) - -#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK) -#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0) - -#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \ -{ \ - .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \ - ((stp_ck) ? CPG_SD_STP_CK : 0) | \ - ((sd_srcfc) << 2) | \ - ((sd_fc) << 0), \ - .div = (sd_div), \ -} - -struct sd_div_table { - u32 val; - unsigned int div; -}; - -struct sd_clock { - struct clk_hw hw; - void __iomem *reg; - const struct sd_div_table *div_table; - unsigned int div_num; - unsigned int div_min; - unsigned int div_max; -}; - -/* SDn divider - * sd_srcfc sd_fc div - * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc - *------------------------------------------------------------------- - * 0 0 0 (1) 1 (4) 4 - * 0 0 1 (2) 1 (4) 8 - * 1 0 2 (4) 1 (4) 16 - * 1 0 3 (8) 1 (4) 32 - * 1 0 4 (16) 1 (4) 64 - * 0 0 0 (1) 0 (2) 2 - * 0 0 1 (2) 0 (2) 4 - * 1 0 2 (4) 0 (2) 8 - * 1 0 3 (8) 0 (2) 16 - * 1 0 4 (16) 0 (2) 32 - */ -static const struct sd_div_table cpg_sd_div_table[] = { -/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */ - CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4), - CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8), - CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16), - CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32), - CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64), - CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2), - CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4), - CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8), - CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16), - CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32), -}; - -#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw) - -static int cpg_sd_clock_enable(struct clk_hw *hw) -{ - struct sd_clock *clock = to_sd_clock(hw); - u32 val, sd_fc; - unsigned int i; - - val = clk_readl(clock->reg); - - sd_fc = val & CPG_SD_FC_MASK; - for (i = 0; i < clock->div_num; i++) - if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) - break; - - if (i >= clock->div_num) - return -EINVAL; - - val &= ~(CPG_SD_STP_MASK); - val |= clock->div_table[i].val & CPG_SD_STP_MASK; - - clk_writel(val, clock->reg); - - return 0; -} - -static void cpg_sd_clock_disable(struct clk_hw *hw) -{ - struct sd_clock *clock = to_sd_clock(hw); - - clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg); -} - -static int cpg_sd_clock_is_enabled(struct clk_hw *hw) -{ - struct sd_clock *clock = to_sd_clock(hw); - - return !(clk_readl(clock->reg) & CPG_SD_STP_MASK); -} - -static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct sd_clock *clock = to_sd_clock(hw); - unsigned long rate = parent_rate; - u32 val, sd_fc; - unsigned int i; - - val = clk_readl(clock->reg); - - sd_fc = val & CPG_SD_FC_MASK; - for (i = 0; i < clock->div_num; i++) - if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) - break; - - if (i >= clock->div_num) - return -EINVAL; - - return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div); -} - -static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock, - unsigned long rate, - unsigned long parent_rate) -{ - unsigned int div; - - if (!rate) - rate = 1; - - div = DIV_ROUND_CLOSEST(parent_rate, rate); - - return clamp_t(unsigned int, div, clock->div_min, clock->div_max); -} - -static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct sd_clock *clock = to_sd_clock(hw); - unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate); - - return DIV_ROUND_CLOSEST(*parent_rate, div); -} - -static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct sd_clock *clock = to_sd_clock(hw); - unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); - u32 val; - unsigned int i; - - for (i = 0; i < clock->div_num; i++) - if (div == clock->div_table[i].div) - break; - - if (i >= clock->div_num) - return -EINVAL; - - val = clk_readl(clock->reg); - val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); - val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); - clk_writel(val, clock->reg); - - return 0; -} - -static const struct clk_ops cpg_sd_clock_ops = { - .enable = cpg_sd_clock_enable, - .disable = cpg_sd_clock_disable, - .is_enabled = cpg_sd_clock_is_enabled, - .recalc_rate = cpg_sd_clock_recalc_rate, - .round_rate = cpg_sd_clock_round_rate, - .set_rate = cpg_sd_clock_set_rate, -}; - -static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, - void __iomem *base, - const char *parent_name) -{ - struct clk_init_data init; - struct sd_clock *clock; - struct clk *clk; - unsigned int i; - - clock = kzalloc(sizeof(*clock), GFP_KERNEL); - if (!clock) - return ERR_PTR(-ENOMEM); - - init.name = core->name; - init.ops = &cpg_sd_clock_ops; - init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; - init.parent_names = &parent_name; - init.num_parents = 1; - - clock->reg = base + core->offset; - clock->hw.init = &init; - clock->div_table = cpg_sd_div_table; - clock->div_num = ARRAY_SIZE(cpg_sd_div_table); - - clock->div_max = clock->div_table[0].div; - clock->div_min = clock->div_max; - for (i = 1; i < clock->div_num; i++) { - clock->div_max = max(clock->div_max, clock->div_table[i].div); - clock->div_min = min(clock->div_min, clock->div_table[i].div); - } - - clk = clk_register(NULL, &clock->hw); - if (IS_ERR(clk)) - kfree(clock); - - return clk; -} - -#define CPG_PLL0CR 0x00d8 -#define CPG_PLL2CR 0x002c -#define CPG_PLL4CR 0x01f4 /* * CPG Clock Data @@ -512,13 +283,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, (((md) & BIT(19)) >> 18) | \ (((md) & BIT(17)) >> 17)) -struct cpg_pll_config { - unsigned int extal_div; - unsigned int pll1_mult; - unsigned int pll3_mult; -}; - -static const struct cpg_pll_config cpg_pll_configs[16] __initconst = { +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { /* EXTAL div PLL1 mult PLL3 mult */ { 1, 192, 192, }, { 1, 192, 128, }, @@ -538,112 +303,9 @@ static const struct cpg_pll_config cpg_pll_configs[16] __initconst = { { 2, 192, 192, }, }; -static const struct cpg_pll_config *cpg_pll_config __initdata; - -static -struct clk * __init r8a7795_cpg_clk_register(struct device *dev, - const struct cpg_core_clk *core, - const struct cpg_mssr_info *info, - struct clk **clks, - void __iomem *base) -{ - const struct clk *parent; - unsigned int mult = 1; - unsigned int div = 1; - u32 value; - - parent = clks[core->parent]; - if (IS_ERR(parent)) - return ERR_CAST(parent); - - switch (core->type) { - case CLK_TYPE_GEN3_MAIN: - div = cpg_pll_config->extal_div; - break; - - case CLK_TYPE_GEN3_PLL0: - /* - * PLL0 is a configurable multiplier clock. Register it as a - * fixed factor clock for now as there's no generic multiplier - * clock implementation and we currently have no need to change - * the multiplier value. - */ - value = readl(base + CPG_PLL0CR); - mult = (((value >> 24) & 0x7f) + 1) * 2; - break; - - case CLK_TYPE_GEN3_PLL1: - mult = cpg_pll_config->pll1_mult; - break; - - case CLK_TYPE_GEN3_PLL2: - /* - * PLL2 is a configurable multiplier clock. Register it as a - * fixed factor clock for now as there's no generic multiplier - * clock implementation and we currently have no need to change - * the multiplier value. - */ - value = readl(base + CPG_PLL2CR); - mult = (((value >> 24) & 0x7f) + 1) * 2; - break; - - case CLK_TYPE_GEN3_PLL3: - mult = cpg_pll_config->pll3_mult; - break; - - case CLK_TYPE_GEN3_PLL4: - /* - * PLL4 is a configurable multiplier clock. Register it as a - * fixed factor clock for now as there's no generic multiplier - * clock implementation and we currently have no need to change - * the multiplier value. - */ - value = readl(base + CPG_PLL4CR); - mult = (((value >> 24) & 0x7f) + 1) * 2; - break; - - case CLK_TYPE_GEN3_SD: - return cpg_sd_clk_register(core, base, __clk_get_name(parent)); - - case CLK_TYPE_GEN3_R: - /* RINT is default. Only if EXTALR is populated, we switch to it */ - value = readl(base + CPG_RCKCR) & 0x3f; - - if (clk_get_rate(clks[CLK_EXTALR])) { - parent = clks[CLK_EXTALR]; - value |= BIT(15); - } - - writel(value, base + CPG_RCKCR); - break; - - default: - return ERR_PTR(-EINVAL); - } - - return clk_register_fixed_factor(NULL, core->name, - __clk_get_name(parent), 0, mult, div); -} - -/* - * Reset register definitions. - */ -#define MODEMR 0xe6160060 - -static u32 rcar_gen3_read_mode_pins(void) -{ - void __iomem *modemr = ioremap_nocache(MODEMR, 4); - u32 mode; - - BUG_ON(!modemr); - mode = ioread32(modemr); - iounmap(modemr); - - return mode; -} - static int __init r8a7795_cpg_mssr_init(struct device *dev) { + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; u32 cpg_mode = rcar_gen3_read_mode_pins(); cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; @@ -652,7 +314,7 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev) return -EINVAL; } - return 0; + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR); } const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = { @@ -673,5 +335,5 @@ const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = { /* Callbacks */ .init = r8a7795_cpg_mssr_init, - .cpg_clk_register = r8a7795_cpg_clk_register, + .cpg_clk_register = rcar_gen3_cpg_clk_register, }; diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c new file mode 100644 index 000000000000..c84b549c14d2 --- /dev/null +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -0,0 +1,192 @@ +/* + * r8a7796 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2016 Glider bvba + * + * Based on r8a7795-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include <dt-bindings/clock/r8a7796-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A7796_CLK_OSC, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL2, + CLK_PLL3, + CLK_PLL4, + CLK_PLL1_DIV2, + CLK_PLL1_DIV4, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_SSPSRC, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), + + /* Core Clock Outputs */ + DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), + DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED("zx", R8A7796_CLK_ZX, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED("s0d1", R8A7796_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A7796_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A7796_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A7796_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A7796_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A7796_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A7796_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s1d1", R8A7796_CLK_S1D1, CLK_S1, 1, 1), + DEF_FIXED("s1d2", R8A7796_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A7796_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A7796_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A7796_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A7796_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A7796_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1), + + DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), +}; + +static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { + DEF_MOD("scif2", 310, R8A7796_CLK_S3D4), + DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1), +}; + +static const unsigned int r8a7796_crit_mod_clks[] __initconst = { + MOD_CLK_ID(408), /* INTC-AP (GIC) */ +}; + + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 + * 14 13 19 17 (MHz) + *------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 + * 0 0 1 0 Prohibited setting + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 + * 0 1 1 0 Prohibited setting + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 + * 1 0 1 0 Prohibited setting + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 + * 1 1 1 0 Prohibited setting + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ + (((md) & BIT(13)) >> 11) | \ + (((md) & BIT(19)) >> 18) | \ + (((md) & BIT(17)) >> 17)) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { + /* EXTAL div PLL1 mult PLL3 mult */ + { 1, 192, 192, }, + { 1, 192, 128, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 192, }, + { 1, 160, 160, }, + { 1, 160, 106, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 160, }, + { 1, 128, 128, }, + { 1, 128, 84, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 128, }, + { 2, 192, 192, }, + { 2, 192, 128, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 192, }, +}; + +static int __init r8a7796_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; + u32 cpg_mode = rcar_gen3_read_mode_pins(); + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + if (!cpg_pll_config->extal_div) { + dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); + return -EINVAL; + } + + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR); +} + +const struct cpg_mssr_info r8a7796_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a7796_core_clks, + .num_core_clks = ARRAY_SIZE(r8a7796_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a7796_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a7796_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a7796_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a7796_crit_mod_clks), + + /* Callbacks */ + .init = r8a7796_cpg_mssr_init, + .cpg_clk_register = rcar_gen3_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c new file mode 100644 index 000000000000..bb4f2f9a8c2f --- /dev/null +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -0,0 +1,359 @@ +/* + * R-Car Gen3 Clock Pulse Generator + * + * Copyright (C) 2015-2016 Glider bvba + * + * Based on clk-rcar-gen3.c + * + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/bug.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/slab.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +#define CPG_PLL0CR 0x00d8 +#define CPG_PLL2CR 0x002c +#define CPG_PLL4CR 0x01f4 + + +/* + * SDn Clock + */ +#define CPG_SD_STP_HCK BIT(9) +#define CPG_SD_STP_CK BIT(8) + +#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK) +#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0) + +#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \ +{ \ + .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \ + ((stp_ck) ? CPG_SD_STP_CK : 0) | \ + ((sd_srcfc) << 2) | \ + ((sd_fc) << 0), \ + .div = (sd_div), \ +} + +struct sd_div_table { + u32 val; + unsigned int div; +}; + +struct sd_clock { + struct clk_hw hw; + void __iomem *reg; + const struct sd_div_table *div_table; + unsigned int div_num; + unsigned int div_min; + unsigned int div_max; +}; + +/* SDn divider + * sd_srcfc sd_fc div + * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc + *------------------------------------------------------------------- + * 0 0 0 (1) 1 (4) 4 + * 0 0 1 (2) 1 (4) 8 + * 1 0 2 (4) 1 (4) 16 + * 1 0 3 (8) 1 (4) 32 + * 1 0 4 (16) 1 (4) 64 + * 0 0 0 (1) 0 (2) 2 + * 0 0 1 (2) 0 (2) 4 + * 1 0 2 (4) 0 (2) 8 + * 1 0 3 (8) 0 (2) 16 + * 1 0 4 (16) 0 (2) 32 + */ +static const struct sd_div_table cpg_sd_div_table[] = { +/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */ + CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4), + CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8), + CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16), + CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32), + CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64), + CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2), + CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4), + CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8), + CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16), + CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32), +}; + +#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw) + +static int cpg_sd_clock_enable(struct clk_hw *hw) +{ + struct sd_clock *clock = to_sd_clock(hw); + u32 val, sd_fc; + unsigned int i; + + val = clk_readl(clock->reg); + + sd_fc = val & CPG_SD_FC_MASK; + for (i = 0; i < clock->div_num; i++) + if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) + break; + + if (i >= clock->div_num) + return -EINVAL; + + val &= ~(CPG_SD_STP_MASK); + val |= clock->div_table[i].val & CPG_SD_STP_MASK; + + clk_writel(val, clock->reg); + + return 0; +} + +static void cpg_sd_clock_disable(struct clk_hw *hw) +{ + struct sd_clock *clock = to_sd_clock(hw); + + clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg); +} + +static int cpg_sd_clock_is_enabled(struct clk_hw *hw) +{ + struct sd_clock *clock = to_sd_clock(hw); + + return !(clk_readl(clock->reg) & CPG_SD_STP_MASK); +} + +static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sd_clock *clock = to_sd_clock(hw); + unsigned long rate = parent_rate; + u32 val, sd_fc; + unsigned int i; + + val = clk_readl(clock->reg); + + sd_fc = val & CPG_SD_FC_MASK; + for (i = 0; i < clock->div_num; i++) + if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) + break; + + if (i >= clock->div_num) + return -EINVAL; + + return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div); +} + +static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock, + unsigned long rate, + unsigned long parent_rate) +{ + unsigned int div; + + if (!rate) + rate = 1; + + div = DIV_ROUND_CLOSEST(parent_rate, rate); + + return clamp_t(unsigned int, div, clock->div_min, clock->div_max); +} + +static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct sd_clock *clock = to_sd_clock(hw); + unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate); + + return DIV_ROUND_CLOSEST(*parent_rate, div); +} + +static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct sd_clock *clock = to_sd_clock(hw); + unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); + u32 val; + unsigned int i; + + for (i = 0; i < clock->div_num; i++) + if (div == clock->div_table[i].div) + break; + + if (i >= clock->div_num) + return -EINVAL; + + val = clk_readl(clock->reg); + val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); + val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); + clk_writel(val, clock->reg); + + return 0; +} + +static const struct clk_ops cpg_sd_clock_ops = { + .enable = cpg_sd_clock_enable, + .disable = cpg_sd_clock_disable, + .is_enabled = cpg_sd_clock_is_enabled, + .recalc_rate = cpg_sd_clock_recalc_rate, + .round_rate = cpg_sd_clock_round_rate, + .set_rate = cpg_sd_clock_set_rate, +}; + +static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, + void __iomem *base, + const char *parent_name) +{ + struct clk_init_data init; + struct sd_clock *clock; + struct clk *clk; + unsigned int i; + + clock = kzalloc(sizeof(*clock), GFP_KERNEL); + if (!clock) + return ERR_PTR(-ENOMEM); + + init.name = core->name; + init.ops = &cpg_sd_clock_ops; + init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; + init.parent_names = &parent_name; + init.num_parents = 1; + + clock->reg = base + core->offset; + clock->hw.init = &init; + clock->div_table = cpg_sd_div_table; + clock->div_num = ARRAY_SIZE(cpg_sd_div_table); + + clock->div_max = clock->div_table[0].div; + clock->div_min = clock->div_max; + for (i = 1; i < clock->div_num; i++) { + clock->div_max = max(clock->div_max, clock->div_table[i].div); + clock->div_min = min(clock->div_min, clock->div_table[i].div); + } + + clk = clk_register(NULL, &clock->hw); + if (IS_ERR(clk)) + kfree(clock); + + return clk; +} + + +static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; +static unsigned int cpg_clk_extalr __initdata; + +struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, + const struct cpg_core_clk *core, const struct cpg_mssr_info *info, + struct clk **clks, void __iomem *base) +{ + const struct clk *parent; + unsigned int mult = 1; + unsigned int div = 1; + u32 value; + + parent = clks[core->parent]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + switch (core->type) { + case CLK_TYPE_GEN3_MAIN: + div = cpg_pll_config->extal_div; + break; + + case CLK_TYPE_GEN3_PLL0: + /* + * PLL0 is a configurable multiplier clock. Register it as a + * fixed factor clock for now as there's no generic multiplier + * clock implementation and we currently have no need to change + * the multiplier value. + */ + value = readl(base + CPG_PLL0CR); + mult = (((value >> 24) & 0x7f) + 1) * 2; + break; + + case CLK_TYPE_GEN3_PLL1: + mult = cpg_pll_config->pll1_mult; + break; + + case CLK_TYPE_GEN3_PLL2: + /* + * PLL2 is a configurable multiplier clock. Register it as a + * fixed factor clock for now as there's no generic multiplier + * clock implementation and we currently have no need to change + * the multiplier value. + */ + value = readl(base + CPG_PLL2CR); + mult = (((value >> 24) & 0x7f) + 1) * 2; + break; + + case CLK_TYPE_GEN3_PLL3: + mult = cpg_pll_config->pll3_mult; + break; + + case CLK_TYPE_GEN3_PLL4: + /* + * PLL4 is a configurable multiplier clock. Register it as a + * fixed factor clock for now as there's no generic multiplier + * clock implementation and we currently have no need to change + * the multiplier value. + */ + value = readl(base + CPG_PLL4CR); + mult = (((value >> 24) & 0x7f) + 1) * 2; + break; + + case CLK_TYPE_GEN3_SD: + return cpg_sd_clk_register(core, base, __clk_get_name(parent)); + + case CLK_TYPE_GEN3_R: + /* + * RINT is default. + * Only if EXTALR is populated, we switch to it. + */ + value = readl(base + CPG_RCKCR) & 0x3f; + + if (clk_get_rate(clks[cpg_clk_extalr])) { + parent = clks[cpg_clk_extalr]; + value |= BIT(15); + } + + writel(value, base + CPG_RCKCR); + break; + + default: + return ERR_PTR(-EINVAL); + } + + return clk_register_fixed_factor(NULL, core->name, + __clk_get_name(parent), 0, mult, div); +} + +/* + * Reset register definitions. + */ +#define MODEMR 0xe6160060 + +u32 __init rcar_gen3_read_mode_pins(void) +{ + void __iomem *modemr = ioremap_nocache(MODEMR, 4); + u32 mode; + + BUG_ON(!modemr); + mode = ioread32(modemr); + iounmap(modemr); + + return mode; +} + +int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, + unsigned int clk_extalr) +{ + cpg_pll_config = config; + cpg_clk_extalr = clk_extalr; + return 0; +} diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h new file mode 100644 index 000000000000..f699085147d1 --- /dev/null +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -0,0 +1,43 @@ +/* + * R-Car Gen3 Clock Pulse Generator + * + * Copyright (C) 2015-2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#ifndef __CLK_RENESAS_RCAR_GEN3_CPG_H__ +#define __CLK_RENESAS_RCAR_GEN3_CPG_H__ + +enum rcar_gen3_clk_types { + CLK_TYPE_GEN3_MAIN = CLK_TYPE_CUSTOM, + CLK_TYPE_GEN3_PLL0, + CLK_TYPE_GEN3_PLL1, + CLK_TYPE_GEN3_PLL2, + CLK_TYPE_GEN3_PLL3, + CLK_TYPE_GEN3_PLL4, + CLK_TYPE_GEN3_SD, + CLK_TYPE_GEN3_R, +}; + +#define DEF_GEN3_SD(_name, _id, _parent, _offset) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset) + +struct rcar_gen3_cpg_pll_config { + unsigned int extal_div; + unsigned int pll1_mult; + unsigned int pll3_mult; +}; + +#define CPG_RCKCR 0x240 + +u32 rcar_gen3_read_mode_pins(void); +struct clk *rcar_gen3_cpg_clk_register(struct device *dev, + const struct cpg_core_clk *core, const struct cpg_mssr_info *info, + struct clk **clks, void __iomem *base); +int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, + unsigned int clk_extalr); + +#endif diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 210cd744a7a9..e1365e7491ae 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -509,6 +509,12 @@ static const struct of_device_id cpg_mssr_match[] = { .data = &r8a7795_cpg_mssr_info, }, #endif +#ifdef CONFIG_ARCH_R8A7796 + { + .compatible = "renesas,r8a7796-cpg-mssr", + .data = &r8a7796_cpg_mssr_info, + }, +#endif { /* sentinel */ } }; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 0d1e3e811e79..ee7edfaf1408 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -131,4 +131,5 @@ struct cpg_mssr_info { }; extern const struct cpg_mssr_info r8a7795_cpg_mssr_info; +extern const struct cpg_mssr_info r8a7796_cpg_mssr_info; #endif diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 016bdb0b793a..db6e5a9e6de6 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -151,8 +151,8 @@ PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; -PNAME(mux_sclk_macphy_50m_p) = { "ext_gmac", "phy_50m_out" }; -PNAME(mux_sclk_gmac_pre_p) = { "sclk_gmac_src", "sclk_macphy_50m" }; +PNAME(mux_sclk_mac_extclk_p) = { "ext_gmac", "phy_50m_out" }; +PNAME(mux_sclk_gmac_pre_p) = { "sclk_gmac_src", "sclk_mac_extclk" }; PNAME(mux_sclk_macphy_p) = { "sclk_gmac_src", "ext_gmac" }; static struct rockchip_pll_clock rk3228_pll_clks[] __initdata = { @@ -170,6 +170,34 @@ static struct rockchip_pll_clock rk3228_pll_clks[] __initdata = { #define DFLAGS CLK_DIVIDER_HIWORD_MASK #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) +static struct rockchip_clk_branch rk3228_i2s0_fracmux __initdata = + MUX(0, "i2s0_pre", mux_i2s0_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(9), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_i2s1_fracmux __initdata = + MUX(0, "i2s1_pre", mux_i2s1_pre_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(3), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_i2s2_fracmux __initdata = + MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(16), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_spdif_fracmux __initdata = + MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(6), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_uart0_fracmux __initdata = + MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(13), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_uart1_fracmux __initdata = + MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(14), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rk3228_uart2_fracmux __initdata = + MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(15), 8, 2, MFLAGS); + static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { /* * Clock-Architecture Diagram 1 @@ -335,7 +363,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKGATE_CON(2), 6, GFLAGS), GATE(0, "sclk_hsadc", "ext_hsadc", 0, - RK3288_CLKGATE_CON(10), 12, GFLAGS), + RK2928_CLKGATE_CON(10), 12, GFLAGS), COMPOSITE(0, "sclk_wifi", mux_pll_src_cpll_gpll_usb480m_p, 0, RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS, @@ -379,22 +407,21 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 3, GFLAGS), - COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT, - RK3288_CLKSEL_CON(8), 0, - RK3288_CLKGATE_CON(0), 4, GFLAGS), - COMPOSITE_NODIV(SCLK_I2S0, "sclk_i2s0", mux_i2s0_p, 0, - RK2928_CLKSEL_CON(9), 8, 2, MFLAGS, + COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(8), 0, + RK2928_CLKGATE_CON(0), 4, GFLAGS, + &rk3228_i2s0_fracmux), + GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT, RK2928_CLKGATE_CON(0), 5, GFLAGS), COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(3), 15, 1, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 10, GFLAGS), - COMPOSITE_FRAC(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, - RK3288_CLKSEL_CON(7), 0, - RK3288_CLKGATE_CON(0), 11, GFLAGS), - MUX(0, "i2s1_pre", mux_i2s1_pre_p, 0, - RK2928_CLKSEL_CON(3), 8, 2, MFLAGS), - GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", 0, + COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(7), 0, + RK2928_CLKGATE_CON(0), 11, GFLAGS, + &rk3228_i2s1_fracmux), + GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, RK2928_CLKGATE_CON(0), 14, GFLAGS), COMPOSITE_NODIV(SCLK_I2S_OUT, "i2s_out", mux_i2s_out_p, 0, RK2928_CLKSEL_CON(3), 12, 1, MFLAGS, @@ -403,21 +430,20 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(16), 15, 1, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 7, GFLAGS), - COMPOSITE_FRAC(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, - RK3288_CLKSEL_CON(30), 0, - RK3288_CLKGATE_CON(0), 8, GFLAGS), - COMPOSITE_NODIV(SCLK_I2S2, "sclk_i2s2", mux_i2s2_p, 0, - RK2928_CLKSEL_CON(16), 8, 2, MFLAGS, + COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(30), 0, + RK2928_CLKGATE_CON(0), 8, GFLAGS, + &rk3228_i2s2_fracmux), + GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT, RK2928_CLKGATE_CON(0), 9, GFLAGS), COMPOSITE(0, "sclk_spdif_src", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(6), 15, 1, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(2), 10, GFLAGS), - COMPOSITE_FRAC(0, "spdif_frac", "sclk_spdif_src", CLK_SET_RATE_PARENT, - RK3288_CLKSEL_CON(20), 0, - RK3288_CLKGATE_CON(2), 12, GFLAGS), - MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0, - RK2928_CLKSEL_CON(6), 8, 2, MFLAGS), + COMPOSITE_FRACMUX(0, "spdif_frac", "sclk_spdif_src", CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(20), 0, + RK2928_CLKGATE_CON(2), 12, GFLAGS, + &rk3228_spdif_fracmux), GATE(0, "jtag", "ext_jtag", 0, RK2928_CLKGATE_CON(1), 3, GFLAGS), @@ -456,45 +482,42 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { COMPOSITE(0, "uart2_src", mux_pll_src_cpll_gpll_usb480m_p, 0, RK2928_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 12, GFLAGS), - COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, + COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(17), 0, - RK2928_CLKGATE_CON(1), 9, GFLAGS), - COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, + RK2928_CLKGATE_CON(1), 9, GFLAGS, + &rk3228_uart0_fracmux), + COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(18), 0, - RK2928_CLKGATE_CON(1), 11, GFLAGS), - COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, + RK2928_CLKGATE_CON(1), 11, GFLAGS, + &rk3228_uart1_fracmux), + COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(19), 0, - RK2928_CLKGATE_CON(1), 13, GFLAGS), - MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, - RK2928_CLKSEL_CON(13), 8, 2, MFLAGS), - MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, - RK2928_CLKSEL_CON(14), 8, 2, MFLAGS), - MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, - RK2928_CLKSEL_CON(15), 8, 2, MFLAGS), + RK2928_CLKGATE_CON(1), 13, GFLAGS, + &rk3228_uart2_fracmux), COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(2), 14, 1, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(1), 0, GFLAGS), - COMPOSITE(0, "sclk_gmac_src", mux_pll_src_2plls_p, 0, + COMPOSITE(SCLK_MAC_SRC, "sclk_gmac_src", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(5), 7, 1, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(1), 7, GFLAGS), - MUX(0, "sclk_macphy_50m", mux_sclk_macphy_50m_p, 0, + MUX(SCLK_MAC_EXTCLK, "sclk_mac_extclk", mux_sclk_mac_extclk_p, 0, RK2928_CLKSEL_CON(29), 10, 1, MFLAGS), - MUX(0, "sclk_gmac_pre", mux_sclk_gmac_pre_p, 0, + MUX(SCLK_MAC, "sclk_gmac_pre", mux_sclk_gmac_pre_p, 0, RK2928_CLKSEL_CON(5), 5, 1, MFLAGS), - GATE(0, "sclk_mac_refout", "sclk_gmac_pre", 0, + GATE(SCLK_MAC_REFOUT, "sclk_mac_refout", "sclk_gmac_pre", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS), - GATE(0, "sclk_mac_ref", "sclk_gmac_pre", 0, + GATE(SCLK_MAC_REF, "sclk_mac_ref", "sclk_gmac_pre", 0, RK2928_CLKGATE_CON(5), 3, GFLAGS), - GATE(0, "sclk_mac_rx", "sclk_gmac_pre", 0, + GATE(SCLK_MAC_RX, "sclk_mac_rx", "sclk_gmac_pre", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS), - GATE(0, "sclk_mac_tx", "sclk_gmac_pre", 0, + GATE(SCLK_MAC_TX, "sclk_mac_tx", "sclk_gmac_pre", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS), - COMPOSITE(0, "sclk_macphy", mux_sclk_macphy_p, 0, + COMPOSITE(SCLK_MAC_PHY, "sclk_macphy", mux_sclk_macphy_p, 0, RK2928_CLKSEL_CON(29), 12, 1, MFLAGS, 8, 2, DFLAGS, RK2928_CLKGATE_CON(5), 7, GFLAGS), - COMPOSITE(0, "sclk_gmac_out", mux_pll_src_2plls_p, 0, + COMPOSITE(SCLK_MAC_OUT, "sclk_gmac_out", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(5), 15, 1, MFLAGS, 8, 5, DFLAGS, RK2928_CLKGATE_CON(2), 2, GFLAGS), @@ -528,7 +551,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { /* PD_PERI */ GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 0, GFLAGS), - GATE(0, "aclk_gmac", "aclk_peri", 0, RK2928_CLKGATE_CON(11), 4, GFLAGS), + GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK2928_CLKGATE_CON(11), 4, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 0, GFLAGS), GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 1, GFLAGS), @@ -544,7 +567,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { GATE(0, "hclk_host2_arb", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 14, GFLAGS), GATE(0, "hclk_peri_noc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 1, GFLAGS), - GATE(0, "pclk_gmac", "pclk_peri", 0, RK2928_CLKGATE_CON(11), 5, GFLAGS), + GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK2928_CLKGATE_CON(11), 5, GFLAGS), GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), /* PD_GPU */ @@ -558,10 +581,10 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { GATE(0, "aclk_bus_noc", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 1, GFLAGS), GATE(0, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 3, GFLAGS), - GATE(0, "hclk_i2s0_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS), - GATE(0, "hclk_i2s1_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS), - GATE(0, "hclk_i2s2_2ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS), - GATE(0, "hclk_spdif_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS), + GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS), + GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS), + GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS), + GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS), GATE(0, "hclk_tsp", "hclk_cpu", 0, RK2928_CLKGATE_CON(10), 11, GFLAGS), GATE(0, "hclk_crypto_mst", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS), GATE(0, "hclk_crypto_slv", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 8059a8d3ea36..c109d80e7a8a 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -586,7 +586,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(8), 15, GFLAGS), COMPOSITE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", mux_pll_src_cpll_gpll_p, 0, - RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 0, 5, DFLAGS, + RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3399_CLKGATE_CON(10), 6, GFLAGS), /* i2s */ COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0, @@ -1500,6 +1500,7 @@ static void __init rk3399_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; + struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -1514,6 +1515,14 @@ static void __init rk3399_clk_init(struct device_node *np) return; } + /* Watchdog pclk is controlled by RK3399 SECURE_GRF_SOC_CON3[8]. */ + clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_alive", 0, 1, 1); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock pclk_wdt: %ld\n", + __func__, PTR_ERR(clk)); + else + rockchip_clk_add_lookup(ctx, clk, PCLK_WDT); + rockchip_clk_register_plls(ctx, rk3399_pll_clks, ARRAY_SIZE(rk3399_pll_clks), -1); diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig index 20c5fe92ab4a..addc65270e43 100644 --- a/drivers/clk/samsung/Kconfig +++ b/drivers/clk/samsung/Kconfig @@ -9,6 +9,15 @@ config EXYNOS_ARM64_COMMON_CLK bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST depends on COMMON_CLK_SAMSUNG +config EXYNOS_AUDSS_CLK_CON + tristate "Samsung Exynos AUDSS clock controller support" + depends on COMMON_CLK_SAMSUNG + default y if ARCH_EXYNOS + help + Support for the Audio Subsystem CLKCON clock controller present + on some Exynos SoC variants. Choose M or Y here if you want to + use audio devices such as I2S, PCM, etc. + # For S3C24XX platforms, select following symbols: config S3C2410_COMMON_CLK bool "Samsung S3C2410 clock controller support" if COMPILE_TEST diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index fc367d4b2902..57f4dc6dc447 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o -obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o +obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 813003d6ce09..8bf7e805fd34 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -45,6 +45,13 @@ #define E4210_DIV_STAT_CPU0 0x400 #define E4210_DIV_STAT_CPU1 0x404 +#define E5433_MUX_SEL2 0x008 +#define E5433_MUX_STAT2 0x208 +#define E5433_DIV_CPU0 0x400 +#define E5433_DIV_CPU1 0x404 +#define E5433_DIV_STAT_CPU0 0x500 +#define E5433_DIV_STAT_CPU1 0x504 + #define E4210_DIV0_RATIO0_MASK 0x7 #define E4210_DIV1_HPM_MASK (0x7 << 4) #define E4210_DIV1_COPY_MASK (0x7 << 0) @@ -253,6 +260,102 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, } /* + * Helper function to set the 'safe' dividers for the CPU clock. The parameters + * div and mask contain the divider value and the register bit mask of the + * dividers to be programmed. + */ +static void exynos5433_set_safe_div(void __iomem *base, unsigned long div, + unsigned long mask) +{ + unsigned long div0; + + div0 = readl(base + E5433_DIV_CPU0); + div0 = (div0 & ~mask) | (div & mask); + writel(div0, base + E5433_DIV_CPU0); + wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask); +} + +/* handler for pre-rate change notification from parent clock */ +static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, + struct exynos_cpuclk *cpuclk, void __iomem *base) +{ + const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; + unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_div = 0, alt_div_mask = DIV_MASK; + unsigned long div0, div1 = 0, mux_reg; + unsigned long flags; + + /* find out the divider values to use for clock data */ + while ((cfg_data->prate * 1000) != ndata->new_rate) { + if (cfg_data->prate == 0) + return -EINVAL; + cfg_data++; + } + + spin_lock_irqsave(cpuclk->lock, flags); + + /* + * For the selected PLL clock frequency, get the pre-defined divider + * values. + */ + div0 = cfg_data->div0; + div1 = cfg_data->div1; + + /* + * If the old parent clock speed is less than the clock speed of + * the alternate parent, then it should be ensured that at no point + * the armclk speed is more than the old_prate until the dividers are + * set. Also workaround the issue of the dividers being set to lower + * values before the parent clock speed is set to new lower speed + * (this can result in too high speed of armclk output clocks). + */ + if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) { + unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate); + + alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; + WARN_ON(alt_div >= MAX_DIV); + + exynos5433_set_safe_div(base, alt_div, alt_div_mask); + div0 |= alt_div; + } + + /* select the alternate parent */ + mux_reg = readl(base + E5433_MUX_SEL2); + writel(mux_reg | 1, base + E5433_MUX_SEL2); + wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2); + + /* alternate parent is active now. set the dividers */ + writel(div0, base + E5433_DIV_CPU0); + wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL); + + writel(div1, base + E5433_DIV_CPU1); + wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL); + + spin_unlock_irqrestore(cpuclk->lock, flags); + return 0; +} + +/* handler for post-rate change notification from parent clock */ +static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata, + struct exynos_cpuclk *cpuclk, void __iomem *base) +{ + unsigned long div = 0, div_mask = DIV_MASK; + unsigned long mux_reg; + unsigned long flags; + + spin_lock_irqsave(cpuclk->lock, flags); + + /* select apll as the alternate parent */ + mux_reg = readl(base + E5433_MUX_SEL2); + writel(mux_reg & ~1, base + E5433_MUX_SEL2); + wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1); + + exynos5433_set_safe_div(base, div, div_mask); + spin_unlock_irqrestore(cpuclk->lock, flags); + return 0; +} + +/* * This notifier function is called for the pre-rate and post-rate change * notifications of the parent clock of cpuclk. */ @@ -275,6 +378,29 @@ static int exynos_cpuclk_notifier_cb(struct notifier_block *nb, return notifier_from_errno(err); } +/* + * This notifier function is called for the pre-rate and post-rate change + * notifications of the parent clock of cpuclk. + */ +static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct exynos_cpuclk *cpuclk; + void __iomem *base; + int err = 0; + + cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb); + base = cpuclk->ctrl_base; + + if (event == PRE_RATE_CHANGE) + err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base); + else if (event == POST_RATE_CHANGE) + err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base); + + return notifier_from_errno(err); +} + /* helper function to register a CPU clock */ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, unsigned int lookup_id, const char *name, const char *parent, @@ -301,7 +427,10 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, cpuclk->ctrl_base = ctx->reg_base + offset; cpuclk->lock = &ctx->lock; cpuclk->flags = flags; - cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; + if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT) + cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb; + else + cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; cpuclk->alt_parent = __clk_lookup(alt_parent); if (!cpuclk->alt_parent) { diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h index 37874d3c3165..d4b6b517fe1b 100644 --- a/drivers/clk/samsung/clk-cpu.h +++ b/drivers/clk/samsung/clk-cpu.h @@ -57,10 +57,12 @@ struct exynos_cpuclk { struct notifier_block clk_nb; unsigned long flags; -/* The CPU clock registers has DIV1 configuration register */ +/* The CPU clock registers have DIV1 configuration register */ #define CLK_CPU_HAS_DIV1 (1 << 0) /* When ALT parent is active, debug clocks need safe divider values */ #define CLK_CPU_NEEDS_DEBUG_ALT_DIV (1 << 1) +/* The CPU clock registers have Exynos5433-compatible layout */ +#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2) }; extern int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 4e9584d79089..bdf8b971f332 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -273,17 +273,7 @@ static struct platform_driver exynos_audss_clk_driver = { .remove = exynos_audss_clk_remove, }; -static int __init exynos_audss_clk_init(void) -{ - return platform_driver_register(&exynos_audss_clk_driver); -} -core_initcall(exynos_audss_clk_init); - -static void __exit exynos_audss_clk_exit(void) -{ - platform_driver_unregister(&exynos_audss_clk_driver); -} -module_exit(exynos_audss_clk_exit); +module_platform_driver(exynos_audss_clk_driver); MODULE_AUTHOR("Padmavathi Venna <padma.v@samsung.com>"); MODULE_DESCRIPTION("Exynos Audio Subsystem Clock Controller"); diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 7cd02ff37a1f..96fab6cfb202 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -151,6 +151,8 @@ static void __init exynos5_clkout_init(struct device_node *node) } CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", exynos5_clkout_init); +CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu", + exynos5_clkout_init); CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", exynos5_clkout_init); CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 16575ee874cb..1b81e283f605 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -103,7 +103,7 @@ #define PWR_CTRL1_USE_CORE1_WFI (1 << 1) #define PWR_CTRL1_USE_CORE0_WFI (1 << 0) -static unsigned long exynos3250_cmu_clk_regs[] __initdata = { +static const unsigned long exynos3250_cmu_clk_regs[] __initconst = { SRC_LEFTBUS, DIV_LEFTBUS, GATE_IP_LEFTBUS, @@ -226,7 +226,7 @@ PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti", PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" }; PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" }; -static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock fixed_factor_clks[] __initconst = { FFACTOR(0, "sclk_mpll_1600", "mout_mpll", 1, 1, 0), FFACTOR(0, "sclk_mpll_mif", "mout_mpll", 1, 2, 0), FFACTOR(0, "sclk_bpll", "fout_bpll", 1, 2, 0), @@ -237,7 +237,7 @@ static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = { FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0), }; -static struct samsung_mux_clock mux_clks[] __initdata = { +static const struct samsung_mux_clock mux_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -326,7 +326,7 @@ static struct samsung_mux_clock mux_clks[] __initdata = { CLK_SET_RATE_PARENT, 0), }; -static struct samsung_div_clock div_clks[] __initdata = { +static const struct samsung_div_clock div_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -429,7 +429,7 @@ static struct samsung_div_clock div_clks[] __initdata = { DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3), }; -static struct samsung_gate_clock gate_clks[] __initdata = { +static const struct samsung_gate_clock gate_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -669,7 +669,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = { }; /* APLL & MPLL & BPLL & UPLL */ -static struct samsung_pll_rate_table exynos3250_pll_rates[] = { +static const struct samsung_pll_rate_table exynos3250_pll_rates[] __initconst = { PLL_35XX_RATE(1200000000, 400, 4, 1), PLL_35XX_RATE(1100000000, 275, 3, 1), PLL_35XX_RATE(1066000000, 533, 6, 1), @@ -691,7 +691,7 @@ static struct samsung_pll_rate_table exynos3250_pll_rates[] = { }; /* EPLL */ -static struct samsung_pll_rate_table exynos3250_epll_rates[] = { +static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = { PLL_36XX_RATE(800000000, 200, 3, 1, 0), PLL_36XX_RATE(288000000, 96, 2, 2, 0), PLL_36XX_RATE(192000000, 128, 2, 3, 0), @@ -710,7 +710,7 @@ static struct samsung_pll_rate_table exynos3250_epll_rates[] = { }; /* VPLL */ -static struct samsung_pll_rate_table exynos3250_vpll_rates[] = { +static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = { PLL_36XX_RATE(600000000, 100, 2, 1, 0), PLL_36XX_RATE(533000000, 266, 3, 2, 32768), PLL_36XX_RATE(519230987, 173, 2, 2, 5046), @@ -740,7 +740,7 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = { { /* sentinel */ } }; -static struct samsung_pll_clock exynos3250_plls[] __initdata = { +static const struct samsung_pll_clock exynos3250_plls[] __initconst = { PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, exynos3250_pll_rates), PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", @@ -772,7 +772,7 @@ static void __init exynos3_core_down_clock(void __iomem *reg_base) __raw_writel(0x0, reg_base + PWR_CTRL2); } -static struct samsung_cmu_info cmu_info __initdata = { +static const struct samsung_cmu_info cmu_info __initconst = { .pll_clks = exynos3250_plls, .nr_pll_clks = ARRAY_SIZE(exynos3250_plls), .mux_clks = mux_clks, @@ -848,7 +848,7 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); #define EPLL_CON2 0x111c #define SRC_EPLL 0x1120 -static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = { +static const unsigned long exynos3250_cmu_dmc_clk_regs[] __initconst = { BPLL_LOCK, BPLL_CON0, BPLL_CON1, @@ -874,7 +874,7 @@ PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", }; PNAME(mout_dphy_p) = { "mout_mpll_mif", "mout_bpll", }; -static struct samsung_mux_clock dmc_mux_clks[] __initdata = { +static const struct samsung_mux_clock dmc_mux_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -893,7 +893,7 @@ static struct samsung_mux_clock dmc_mux_clks[] __initdata = { MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_EPLL, 4, 1), }; -static struct samsung_div_clock dmc_div_clks[] __initdata = { +static const struct samsung_div_clock dmc_div_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -910,14 +910,14 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = { DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3), }; -static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = { +static const struct samsung_pll_clock exynos3250_dmc_plls[] __initconst = { PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates), PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates), }; -static struct samsung_cmu_info dmc_cmu_info __initdata = { +static const struct samsung_cmu_info dmc_cmu_info __initconst = { .pll_clks = exynos3250_dmc_plls, .nr_pll_clks = ARRAY_SIZE(exynos3250_dmc_plls), .mux_clks = dmc_mux_clks, @@ -947,7 +947,7 @@ CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc", #define GATE_IP_ISP1 0x804 #define GATE_SCLK_ISP 0x900 -static struct samsung_div_clock isp_div_clks[] __initdata = { +static const struct samsung_div_clock isp_div_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -967,7 +967,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = { DIV(CLK_DIV_MPWM, "div_mpwm", "div_isp1", DIV_ISP1, 0, 3), }; -static struct samsung_gate_clock isp_gate_clks[] __initdata = { +static const struct samsung_gate_clock isp_gate_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -1063,7 +1063,7 @@ static struct samsung_gate_clock isp_gate_clks[] __initdata = { GATE_SCLK_ISP, 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info isp_cmu_info __initdata = { +static const struct samsung_cmu_info isp_cmu_info __initconst = { .div_clks = isp_div_clks, .nr_div_clks = ARRAY_SIZE(isp_div_clks), .gate_clks = isp_gate_clks, @@ -1079,14 +1079,15 @@ static int __init exynos3250_cmu_isp_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id exynos3250_cmu_isp_of_match[] = { +static const struct of_device_id exynos3250_cmu_isp_of_match[] __initconst = { { .compatible = "samsung,exynos3250-cmu-isp", }, { /* sentinel */ } }; -static struct platform_driver exynos3250_cmu_isp_driver = { +static struct platform_driver exynos3250_cmu_isp_driver __initdata = { .driver = { .name = "exynos3250-cmu-isp", + .suppress_bind_attrs = true, .of_match_table = exynos3250_cmu_isp_of_match, }, }; diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 7b3d0f975987..faab9b31baf5 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -169,7 +169,7 @@ static struct samsung_clk_reg_dump *exynos4_save_pll; * list of controller registers to be saved and restored during a * suspend/resume cycle. */ -static unsigned long exynos4210_clk_save[] __initdata = { +static const unsigned long exynos4210_clk_save[] __initconst = { E4210_SRC_IMAGE, E4210_SRC_LCD1, E4210_SRC_MASK_LCD1, @@ -181,7 +181,7 @@ static unsigned long exynos4210_clk_save[] __initdata = { PWR_CTRL1, }; -static unsigned long exynos4x12_clk_save[] __initdata = { +static const unsigned long exynos4x12_clk_save[] __initconst = { E4X12_GATE_IP_IMAGE, E4X12_GATE_IP_PERIR, E4X12_SRC_CAM1, @@ -192,7 +192,7 @@ static unsigned long exynos4x12_clk_save[] __initdata = { E4X12_PWR_CTRL2, }; -static unsigned long exynos4_clk_pll_regs[] __initdata = { +static const unsigned long exynos4_clk_pll_regs[] __initconst = { EPLL_LOCK, VPLL_LOCK, EPLL_CON0, @@ -203,7 +203,7 @@ static unsigned long exynos4_clk_pll_regs[] __initdata = { VPLL_CON2, }; -static unsigned long exynos4_clk_regs[] __initdata = { +static const unsigned long exynos4_clk_regs[] __initconst = { SRC_LEFTBUS, DIV_LEFTBUS, GATE_IP_LEFTBUS, @@ -505,28 +505,28 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata }; /* fixed rate clocks generated inside the soc */ -static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initconst = { FRATE(0, "sclk_hdmi24m", NULL, 0, 24000000), FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000), FRATE(0, "sclk_usbphy0", NULL, 0, 48000000), }; -static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initconst = { FRATE(0, "sclk_usbphy1", NULL, 0, 48000000), }; -static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initconst = { FFACTOR(0, "sclk_apll_div_2", "sclk_apll", 1, 2, 0), FFACTOR(0, "fout_mpll_div_2", "fout_mpll", 1, 2, 0), FFACTOR(0, "fout_apll_div_2", "fout_apll", 1, 2, 0), FFACTOR(0, "arm_clk_div_2", "div_core2", 1, 2, 0), }; -static struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initconst = { FFACTOR(0, "sclk_mpll_div_2", "sclk_mpll", 1, 2, 0), }; -static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initconst = { FFACTOR(0, "sclk_mpll_user_l_div_2", "mout_mpll_user_l", 1, 2, 0), FFACTOR(0, "sclk_mpll_user_r_div_2", "mout_mpll_user_r", 1, 2, 0), FFACTOR(0, "sclk_mpll_user_t_div_2", "mout_mpll_user_t", 1, 2, 0), @@ -534,7 +534,7 @@ static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initda }; /* list of mux clocks supported in all exynos4 soc's */ -static struct samsung_mux_clock exynos4_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = { MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0, "mout_apll"), @@ -555,11 +555,11 @@ static struct samsung_mux_clock exynos4_mux_clks[] __initdata = { }; /* list of mux clocks supported in exynos4210 soc */ -static struct samsung_mux_clock exynos4210_mux_early[] __initdata = { +static const struct samsung_mux_clock exynos4210_mux_early[] __initconst = { MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1), }; -static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos4210_mux_clks[] __initconst = { MUX(0, "mout_gdl", sclk_ampll_p4210, SRC_LEFTBUS, 0, 1), MUX(0, "mout_clkout_leftbus", clkout_left_p4210, CLKOUT_CMU_LEFTBUS, 0, 5), @@ -622,7 +622,7 @@ static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = { }; /* list of mux clocks supported in exynos4x12 soc */ -static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos4x12_mux_clks[] __initconst = { MUX(0, "mout_mpll_user_l", mout_mpll_p, SRC_LEFTBUS, 4, 1), MUX(0, "mout_gdl", mout_gdl_p4x12, SRC_LEFTBUS, 0, 1), MUX(0, "mout_clkout_leftbus", clkout_left_p4x12, @@ -705,7 +705,7 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = { }; /* list of divider clocks supported in all exynos4 soc's */ -static struct samsung_div_clock exynos4_div_clks[] __initdata = { +static const struct samsung_div_clock exynos4_div_clks[] __initconst = { DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3), DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus", @@ -795,7 +795,7 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = { }; /* list of divider clocks supported in exynos4210 soc */ -static struct samsung_div_clock exynos4210_div_clks[] __initdata = { +static const struct samsung_div_clock exynos4210_div_clks[] __initconst = { DIV(CLK_ACLK200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3), DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4), DIV(0, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4), @@ -806,7 +806,7 @@ static struct samsung_div_clock exynos4210_div_clks[] __initdata = { }; /* list of divider clocks supported in exynos4x12 soc */ -static struct samsung_div_clock exynos4x12_div_clks[] __initdata = { +static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = { DIV(0, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4), DIV(0, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4), DIV(0, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4), @@ -837,7 +837,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = { }; /* list of gate clocks supported in all exynos4 soc's */ -static struct samsung_gate_clock exynos4_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = { /* * After all Exynos4 based platforms are migrated to use device tree, * the device name and clock alias names specified below for some @@ -1043,7 +1043,7 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = { }; /* list of gate clocks supported in exynos4210 soc */ -static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { GATE(CLK_TVENC, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0), GATE(CLK_G2D, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0), GATE(CLK_ROTATOR, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0), @@ -1090,7 +1090,7 @@ static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = { }; /* list of gate clocks supported in exynos4x12 soc */ -static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0), GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0), GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0), @@ -1190,17 +1190,17 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { 0), }; -static struct samsung_clock_alias exynos4_aliases[] __initdata = { +static const struct samsung_clock_alias exynos4_aliases[] __initconst = { ALIAS(CLK_MOUT_CORE, NULL, "moutcore"), ALIAS(CLK_ARM_CLK, NULL, "armclk"), ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"), }; -static struct samsung_clock_alias exynos4210_aliases[] __initdata = { +static const struct samsung_clock_alias exynos4210_aliases[] __initconst = { ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"), }; -static struct samsung_clock_alias exynos4x12_aliases[] __initdata = { +static const struct samsung_clock_alias exynos4x12_aliases[] __initconst = { ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"), }; @@ -1211,7 +1211,7 @@ static struct samsung_clock_alias exynos4x12_aliases[] __initdata = { * controller is first remapped and the value of XOM[0] bit is read to * determine the parent clock. */ -static unsigned long exynos4_get_xom(void) +static unsigned long __init exynos4_get_xom(void) { unsigned long xom = 0; void __iomem *chipid_base; @@ -1264,7 +1264,7 @@ static const struct of_device_id ext_clk_match[] __initconst = { }; /* PLLs PMS values */ -static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4210_apll_rates[] __initconst = { PLL_45XX_RATE(1200000000, 150, 3, 1, 28), PLL_45XX_RATE(1000000000, 250, 6, 1, 28), PLL_45XX_RATE( 800000000, 200, 6, 1, 28), @@ -1277,7 +1277,7 @@ static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = { { /* sentinel */ } }; -static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4210_epll_rates[] __initconst = { PLL_4600_RATE(192000000, 48, 3, 1, 0, 0), PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0), PLL_4600_RATE(180000000, 45, 3, 1, 0, 0), @@ -1288,7 +1288,7 @@ static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = { { /* sentinel */ } }; -static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4210_vpll_rates[] __initconst = { PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0), PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1), PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1), @@ -1297,7 +1297,7 @@ static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = { { /* sentinel */ } }; -static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4x12_apll_rates[] __initconst = { PLL_35XX_RATE(1500000000, 250, 4, 0), PLL_35XX_RATE(1400000000, 175, 3, 0), PLL_35XX_RATE(1300000000, 325, 6, 0), @@ -1315,7 +1315,7 @@ static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = { { /* sentinel */ } }; -static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4x12_epll_rates[] __initconst = { PLL_36XX_RATE(192000000, 48, 3, 1, 0), PLL_36XX_RATE(180633605, 45, 3, 1, 10381), PLL_36XX_RATE(180000000, 45, 3, 1, 0), @@ -1326,7 +1326,7 @@ static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = { { /* sentinel */ } }; -static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = { +static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst = { PLL_36XX_RATE(533000000, 133, 3, 1, 16384), PLL_36XX_RATE(440000000, 110, 3, 1, 0), PLL_36XX_RATE(350000000, 175, 3, 2, 0), @@ -1375,12 +1375,12 @@ static void __init exynos4x12_core_down_clock(void) if (num_possible_cpus() == 4) tmp |= PWR_CTRL1_USE_CORE3_WFE | PWR_CTRL1_USE_CORE2_WFE | PWR_CTRL1_USE_CORE3_WFI | PWR_CTRL1_USE_CORE2_WFI; - __raw_writel(tmp, reg_base + PWR_CTRL1); + writel_relaxed(tmp, reg_base + PWR_CTRL1); /* * Disable the clock up feature in case it was enabled by bootloader. */ - __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); + writel_relaxed(0x0, reg_base + E4X12_PWR_CTRL2); } #define E4210_CPU_DIV0(apll, pclk_dbg, atb, periph, corem1, corem0) \ @@ -1450,8 +1450,6 @@ static void __init exynos4_clk_init(struct device_node *np, panic("%s: failed to map registers\n", __func__); ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks), diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c index 86ee06b226bd..6c9063159717 100644 --- a/drivers/clk/samsung/clk-exynos4415.c +++ b/drivers/clk/samsung/clk-exynos4415.c @@ -111,7 +111,7 @@ #define DIV_CPU0 0x14500 #define DIV_CPU1 0x14504 -static unsigned long exynos4415_cmu_clk_regs[] __initdata = { +static const unsigned long exynos4415_cmu_clk_regs[] __initconst = { SRC_LEFTBUS, DIV_LEFTBUS, GATE_IP_LEFTBUS, @@ -268,16 +268,16 @@ PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" }; PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" }; PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" }; -static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initconst = { /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */ FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0), }; -static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initconst = { FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000), }; -static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos4415_mux_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -427,7 +427,7 @@ static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = { group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1), }; -static struct samsung_div_clock exynos4415_div_clks[] __initdata = { +static const struct samsung_div_clock exynos4415_div_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -566,7 +566,7 @@ static struct samsung_div_clock exynos4415_div_clks[] __initdata = { DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3), }; -static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos4415_gate_clks[] __initconst = { /* * NOTE: Following table is sorted by register address in ascending * order and then bitfield shift in descending order, as it is done @@ -859,7 +859,7 @@ static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = { /* * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL */ -static struct samsung_pll_rate_table exynos4415_pll_rates[] = { +static const struct samsung_pll_rate_table exynos4415_pll_rates[] __initconst = { PLL_35XX_RATE(1600000000, 400, 3, 1), PLL_35XX_RATE(1500000000, 250, 2, 1), PLL_35XX_RATE(1400000000, 175, 3, 0), @@ -891,7 +891,7 @@ static struct samsung_pll_rate_table exynos4415_pll_rates[] = { }; /* EPLL */ -static struct samsung_pll_rate_table exynos4415_epll_rates[] = { +static const struct samsung_pll_rate_table exynos4415_epll_rates[] __initconst = { PLL_36XX_RATE(800000000, 200, 3, 1, 0), PLL_36XX_RATE(288000000, 96, 2, 2, 0), PLL_36XX_RATE(192000000, 128, 2, 3, 0), @@ -909,7 +909,7 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = { { /* sentinel */ } }; -static struct samsung_pll_clock exynos4415_plls[] __initdata = { +static const struct samsung_pll_clock exynos4415_plls[] __initconst = { PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, exynos4415_pll_rates), PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", @@ -922,7 +922,7 @@ static struct samsung_pll_clock exynos4415_plls[] __initdata = { "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates), }; -static struct samsung_cmu_info cmu_info __initdata = { +static const struct samsung_cmu_info cmu_info __initconst = { .pll_clks = exynos4415_plls, .nr_pll_clks = ARRAY_SIZE(exynos4415_plls), .mux_clks = exynos4415_mux_clks, @@ -961,7 +961,7 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); #define SRC_DMC 0x300 #define DIV_DMC1 0x504 -static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = { +static const unsigned long exynos4415_cmu_dmc_clk_regs[] __initconst = { MPLL_LOCK, MPLL_CON0, MPLL_CON1, @@ -978,14 +978,14 @@ PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", }; -static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initconst = { MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1), MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1), MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1), MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1), }; -static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = { +static const struct samsung_div_clock exynos4415_dmc_div_clks[] __initconst = { DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3), DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3), DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus", @@ -995,14 +995,14 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = { DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2), }; -static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = { +static const struct samsung_pll_clock exynos4415_dmc_plls[] __initconst = { PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates), PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates), }; -static struct samsung_cmu_info cmu_dmc_info __initdata = { +static const struct samsung_cmu_info cmu_dmc_info __initconst = { .pll_clks = exynos4415_dmc_plls, .nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls), .mux_clks = exynos4415_dmc_mux_clks, diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 837197db4ffb..27a227d6620c 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -117,7 +117,7 @@ static struct samsung_clk_reg_dump *exynos5250_save; * list of controller registers to be saved and restored during a * suspend/resume cycle. */ -static unsigned long exynos5250_clk_regs[] __initdata = { +static const unsigned long exynos5250_clk_regs[] __initconst = { SRC_CPU, DIV_CPU0, PWR_CTRL1, @@ -190,7 +190,7 @@ static struct syscore_ops exynos5250_clk_syscore_ops = { .resume = exynos5250_clk_resume, }; -static void exynos5250_clk_sleep_init(void) +static void __init exynos5250_clk_sleep_init(void) { exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs)); @@ -203,7 +203,7 @@ static void exynos5250_clk_sleep_init(void) register_syscore_ops(&exynos5250_clk_syscore_ops); } #else -static void exynos5250_clk_sleep_init(void) {} +static void __init exynos5250_clk_sleep_init(void) {} #endif /* list of all parent clock list */ @@ -266,23 +266,23 @@ static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initda }; /* fixed rate clocks generated inside the soc */ -static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initconst = { FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000), FRATE(0, "sclk_hdmi27m", NULL, 0, 27000000), FRATE(0, "sclk_dptxphy", NULL, 0, 24000000), FRATE(0, "sclk_uhostphy", NULL, 0, 48000000), }; -static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initconst = { FFACTOR(0, "fout_mplldiv2", "fout_mpll", 1, 2, 0), FFACTOR(0, "fout_bplldiv2", "fout_bpll", 1, 2, 0), }; -static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initconst = { MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1), }; -static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = { /* * NOTE: Following table is sorted by (clock domain, register address, * bitfield shift) triplet in ascending order. When adding new entries, @@ -378,7 +378,7 @@ static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { MUX(0, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1), }; -static struct samsung_div_clock exynos5250_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5250_div_clks[] __initconst = { /* * NOTE: Following table is sorted by (clock domain, register address, * bitfield shift) triplet in ascending order. When adding new entries, @@ -470,7 +470,7 @@ static struct samsung_div_clock exynos5250_div_clks[] __initdata = { DIV(CLK_DIV_I2S2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6), }; -static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = { /* * NOTE: Following table is sorted by (clock domain, register address, * bitfield shift) triplet in ascending order. When adding new entries, @@ -698,7 +698,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE_IP_ISP1, 7, 0, 0), }; -static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ PLL_36XX_RATE(266000000, 266, 3, 3, 0), @@ -707,7 +707,7 @@ static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { { }, }; -static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ PLL_36XX_RATE(192000000, 64, 2, 2, 0), @@ -721,7 +721,7 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = { { }, }; -static struct samsung_pll_rate_table apll_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_35XX_RATE(rate, m, p, s) */ PLL_35XX_RATE(1700000000, 425, 6, 0), @@ -805,8 +805,7 @@ static void __init exynos5250_clk_init(struct device_node *np) } ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); + samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks, ARRAY_SIZE(exynos5250_fixed_rate_ext_clks), ext_clk_match); diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index 7a7ed075a573..a43642c36039 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -22,7 +22,7 @@ * Applicable for all 2550 Type PLLS for Exynos5260, listed below * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL. */ -static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initconst = { PLL_35XX_RATE(1700000000, 425, 6, 0), PLL_35XX_RATE(1600000000, 200, 3, 0), PLL_35XX_RATE(1500000000, 250, 4, 0), @@ -55,7 +55,7 @@ static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = { /* * Applicable for 2650 Type PLL for AUD_PLL. */ -static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = { PLL_36XX_RATE(1600000000, 200, 3, 0, 0), PLL_36XX_RATE(1200000000, 100, 2, 0, 0), PLL_36XX_RATE(1000000000, 250, 3, 1, 0), @@ -78,7 +78,7 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = { /* CMU_AUD */ -static unsigned long aud_clk_regs[] __initdata = { +static const unsigned long aud_clk_regs[] __initconst = { MUX_SEL_AUD, DIV_AUD0, DIV_AUD1, @@ -92,7 +92,7 @@ PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"}; PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"}; PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"}; -static struct samsung_mux_clock aud_mux_clks[] __initdata = { +static const struct samsung_mux_clock aud_mux_clks[] __initconst = { MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 0, 1), MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p, @@ -101,7 +101,7 @@ static struct samsung_mux_clock aud_mux_clks[] __initdata = { MUX_SEL_AUD, 8, 1), }; -static struct samsung_div_clock aud_div_clks[] __initdata = { +static const struct samsung_div_clock aud_div_clks[] __initconst = { DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user", DIV_AUD0, 0, 4), @@ -113,7 +113,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = { DIV_AUD1, 12, 4), }; -static struct samsung_gate_clock aud_gate_clks[] __initdata = { +static const struct samsung_gate_clock aud_gate_clks[] __initconst = { GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s", EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0), GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm", @@ -154,7 +154,7 @@ CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud", /* CMU_DISP */ -static unsigned long disp_clk_regs[] __initdata = { +static const unsigned long disp_clk_regs[] __initconst = { MUX_SEL_DISP0, MUX_SEL_DISP1, MUX_SEL_DISP2, @@ -201,7 +201,7 @@ PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll", PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk", "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; -static struct samsung_mux_clock disp_mux_clks[] __initdata = { +static const struct samsung_mux_clock disp_mux_clks[] __initconst = { MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user", mout_aclk_disp_333_user_p, MUX_SEL_DISP0, 0, 1), @@ -270,7 +270,7 @@ static struct samsung_mux_clock disp_mux_clks[] __initdata = { MUX_SEL_DISP4, 4, 2), }; -static struct samsung_div_clock disp_div_clks[] __initdata = { +static const struct samsung_div_clock disp_div_clks[] __initconst = { DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111", "mout_aclk_disp_222_user", DIV_DISP, 8, 4), @@ -283,7 +283,7 @@ static struct samsung_div_clock disp_div_clks[] __initdata = { DIV_DISP, 16, 4), }; -static struct samsung_gate_clock disp_gate_clks[] __initdata = { +static const struct samsung_gate_clock disp_gate_clks[] __initconst = { GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel", "mout_phyclk_hdmi_phy_pixel_clko_user", EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0), @@ -344,7 +344,7 @@ CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp", /* CMU_EGL */ -static unsigned long egl_clk_regs[] __initdata = { +static const unsigned long egl_clk_regs[] __initconst = { EGL_PLL_LOCK, EGL_PLL_CON0, EGL_PLL_CON1, @@ -361,13 +361,13 @@ static unsigned long egl_clk_regs[] __initdata = { PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"}; PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"}; -static struct samsung_mux_clock egl_mux_clks[] __initdata = { +static const struct samsung_mux_clock egl_mux_clks[] __initconst = { MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p, MUX_SEL_EGL, 4, 1), MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1), }; -static struct samsung_div_clock egl_div_clks[] __initdata = { +static const struct samsung_div_clock egl_div_clks[] __initconst = { DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3), DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3), DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3), @@ -379,7 +379,7 @@ static struct samsung_div_clock egl_div_clks[] __initdata = { DIV(EGL_DOUT_EGL_PLL, "dout_egl_pll", "mout_egl_b", DIV_EGL, 24, 3), }; -static struct samsung_pll_clock egl_pll_clks[] __initdata = { +static const struct samsung_pll_clock egl_pll_clks[] __initconst = { PLL(pll_2550xx, EGL_FOUT_EGL_PLL, "fout_egl_pll", "fin_pll", EGL_PLL_LOCK, EGL_PLL_CON0, pll2550_24mhz_tbl), @@ -408,7 +408,7 @@ CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl", /* CMU_FSYS */ -static unsigned long fsys_clk_regs[] __initdata = { +static const unsigned long fsys_clk_regs[] __initconst = { MUX_SEL_FSYS0, MUX_SEL_FSYS1, EN_ACLK_FSYS, @@ -431,7 +431,7 @@ PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll", PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll", "phyclk_usbdrd30_udrd30_phyclock"}; -static struct samsung_mux_clock fsys_mux_clks[] __initdata = { +static const struct samsung_mux_clock fsys_mux_clks[] __initconst = { MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER, "mout_phyclk_usbdrd30_phyclock_user", mout_phyclk_usbdrd30_phyclock_user_p, @@ -454,7 +454,7 @@ static struct samsung_mux_clock fsys_mux_clks[] __initdata = { MUX_SEL_FSYS1, 16, 1), }; -static struct samsung_gate_clock fsys_gate_clks[] __initdata = { +static const struct samsung_gate_clock fsys_gate_clks[] __initconst = { GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock", "mout_phyclk_usbdrd30_phyclock_user", EN_SCLK_FSYS, 1, 0, 0), @@ -508,7 +508,7 @@ CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys", /* CMU_G2D */ -static unsigned long g2d_clk_regs[] __initdata = { +static const unsigned long g2d_clk_regs[] __initconst = { MUX_SEL_G2D, MUX_STAT_G2D, DIV_G2D, @@ -535,18 +535,18 @@ static unsigned long g2d_clk_regs[] __initdata = { PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"}; -static struct samsung_mux_clock g2d_mux_clks[] __initdata = { +static const struct samsung_mux_clock g2d_mux_clks[] __initconst = { MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user", mout_aclk_g2d_333_user_p, MUX_SEL_G2D, 0, 1), }; -static struct samsung_div_clock g2d_div_clks[] __initdata = { +static const struct samsung_div_clock g2d_div_clks[] __initconst = { DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user", DIV_G2D, 0, 3), }; -static struct samsung_gate_clock g2d_gate_clks[] __initdata = { +static const struct samsung_gate_clock g2d_gate_clks[] __initconst = { GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user", EN_IP_G2D, 4, 0, 0), GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user", @@ -599,7 +599,7 @@ CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d", /* CMU_G3D */ -static unsigned long g3d_clk_regs[] __initdata = { +static const unsigned long g3d_clk_regs[] __initconst = { G3D_PLL_LOCK, G3D_PLL_CON0, G3D_PLL_CON1, @@ -615,23 +615,23 @@ static unsigned long g3d_clk_regs[] __initdata = { PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"}; -static struct samsung_mux_clock g3d_mux_clks[] __initdata = { +static const struct samsung_mux_clock g3d_mux_clks[] __initconst = { MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, MUX_SEL_G3D, 0, 1), }; -static struct samsung_div_clock g3d_div_clks[] __initdata = { +static const struct samsung_div_clock g3d_div_clks[] __initconst = { DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3), DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3), }; -static struct samsung_gate_clock g3d_gate_clks[] __initdata = { +static const struct samsung_gate_clock g3d_gate_clks[] __initconst = { GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0), GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d", EN_IP_G3D, 3, 0, 0), }; -static struct samsung_pll_clock g3d_pll_clks[] __initdata = { +static const struct samsung_pll_clock g3d_pll_clks[] __initconst = { PLL(pll_2550, G3D_FOUT_G3D_PLL, "fout_g3d_pll", "fin_pll", G3D_PLL_LOCK, G3D_PLL_CON0, pll2550_24mhz_tbl), @@ -662,7 +662,7 @@ CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d", /* CMU_GSCL */ -static unsigned long gscl_clk_regs[] __initdata = { +static const unsigned long gscl_clk_regs[] __initconst = { MUX_SEL_GSCL, DIV_GSCL, EN_ACLK_GSCL, @@ -692,7 +692,7 @@ PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"}; -static struct samsung_mux_clock gscl_mux_clks[] __initdata = { +static const struct samsung_mux_clock gscl_mux_clks[] __initconst = { MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user", mout_aclk_gscl_333_user_p, MUX_SEL_GSCL, 0, 1), @@ -706,7 +706,7 @@ static struct samsung_mux_clock gscl_mux_clks[] __initdata = { MUX_SEL_GSCL, 24, 1), }; -static struct samsung_div_clock gscl_div_clks[] __initdata = { +static const struct samsung_div_clock gscl_div_clks[] __initconst = { DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100", "mout_aclk_m2m_400_user", DIV_GSCL, 0, 3), @@ -715,7 +715,7 @@ static struct samsung_div_clock gscl_div_clks[] __initdata = { DIV_GSCL, 4, 3), }; -static struct samsung_gate_clock gscl_gate_clks[] __initdata = { +static const struct samsung_gate_clock gscl_gate_clks[] __initconst = { GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200", EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0), GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200", @@ -795,7 +795,7 @@ CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl", /* CMU_ISP */ -static unsigned long isp_clk_regs[] __initdata = { +static const unsigned long isp_clk_regs[] __initconst = { MUX_SEL_ISP0, MUX_SEL_ISP1, DIV_ISP, @@ -811,14 +811,14 @@ static unsigned long isp_clk_regs[] __initdata = { PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"}; PNAME(mout_isp_266_user_p) = {"fin_pll", "dout_aclk_isp1_266"}; -static struct samsung_mux_clock isp_mux_clks[] __initdata = { +static const struct samsung_mux_clock isp_mux_clks[] __initconst = { MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p, MUX_SEL_ISP0, 0, 1), MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p, MUX_SEL_ISP0, 4, 1), }; -static struct samsung_div_clock isp_div_clks[] __initdata = { +static const struct samsung_div_clock isp_div_clks[] __initconst = { DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc", DIV_ISP, 0, 3), DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc", @@ -830,7 +830,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = { DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2), }; -static struct samsung_gate_clock isp_gate_clks[] __initdata = { +static const struct samsung_gate_clock isp_gate_clks[] __initconst = { GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266", EN_IP_ISP0, 15, 0, 0), @@ -914,7 +914,7 @@ CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp", /* CMU_KFC */ -static unsigned long kfc_clk_regs[] __initdata = { +static const unsigned long kfc_clk_regs[] __initconst = { KFC_PLL_LOCK, KFC_PLL_CON0, KFC_PLL_CON1, @@ -932,13 +932,13 @@ static unsigned long kfc_clk_regs[] __initdata = { PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"}; PNAME(mout_kfc_p) = {"mout_kfc_pll", "dout_media_pll"}; -static struct samsung_mux_clock kfc_mux_clks[] __initdata = { +static const struct samsung_mux_clock kfc_mux_clks[] __initconst = { MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p, MUX_SEL_KFC0, 0, 1), MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1), }; -static struct samsung_div_clock kfc_div_clks[] __initdata = { +static const struct samsung_div_clock kfc_div_clks[] __initconst = { DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3), DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3), DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3), @@ -949,7 +949,7 @@ static struct samsung_div_clock kfc_div_clks[] __initdata = { DIV(KFC_DOUT_KFC_PLL, "dout_kfc_pll", "mout_kfc", DIV_KFC, 24, 3), }; -static struct samsung_pll_clock kfc_pll_clks[] __initdata = { +static const struct samsung_pll_clock kfc_pll_clks[] __initconst = { PLL(pll_2550xx, KFC_FOUT_KFC_PLL, "fout_kfc_pll", "fin_pll", KFC_PLL_LOCK, KFC_PLL_CON0, pll2550_24mhz_tbl), @@ -978,7 +978,7 @@ CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc", /* CMU_MFC */ -static unsigned long mfc_clk_regs[] __initdata = { +static const unsigned long mfc_clk_regs[] __initconst = { MUX_SEL_MFC, DIV_MFC, EN_ACLK_MFC, @@ -991,18 +991,18 @@ static unsigned long mfc_clk_regs[] __initdata = { PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"}; -static struct samsung_mux_clock mfc_mux_clks[] __initdata = { +static const struct samsung_mux_clock mfc_mux_clks[] __initconst = { MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user", mout_aclk_mfc_333_user_p, MUX_SEL_MFC, 0, 1), }; -static struct samsung_div_clock mfc_div_clks[] __initdata = { +static const struct samsung_div_clock mfc_div_clks[] __initconst = { DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user", DIV_MFC, 0, 3), }; -static struct samsung_gate_clock mfc_gate_clks[] __initdata = { +static const struct samsung_gate_clock mfc_gate_clks[] __initconst = { GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user", EN_IP_MFC, 1, 0, 0), GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user", @@ -1034,7 +1034,7 @@ CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc", /* CMU_MIF */ -static unsigned long mif_clk_regs[] __initdata = { +static const unsigned long mif_clk_regs[] __initconst = { MEM_PLL_LOCK, BUS_PLL_LOCK, MEDIA_PLL_LOCK, @@ -1076,7 +1076,7 @@ PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"}; PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"}; PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"}; -static struct samsung_mux_clock mif_mux_clks[] __initdata = { +static const struct samsung_mux_clock mif_mux_clks[] __initconst = { MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p, MUX_SEL_MIF, 0, 1), MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p, @@ -1093,7 +1093,7 @@ static struct samsung_mux_clock mif_mux_clks[] __initdata = { MUX_SEL_MIF, 24, 1), }; -static struct samsung_div_clock mif_div_clks[] __initdata = { +static const struct samsung_div_clock mif_div_clks[] __initconst = { DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll", DIV_MIF, 0, 3), DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll", @@ -1112,7 +1112,7 @@ static struct samsung_div_clock mif_div_clks[] __initdata = { DIV_MIF, 28, 4), }; -static struct samsung_gate_clock mif_gate_clks[] __initdata = { +static const struct samsung_gate_clock mif_gate_clks[] __initconst = { GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy", EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0), GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy", @@ -1146,7 +1146,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = { CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; -static struct samsung_pll_clock mif_pll_clks[] __initdata = { +static const struct samsung_pll_clock mif_pll_clks[] __initconst = { PLL(pll_2550xx, MIF_FOUT_MEM_PLL, "fout_mem_pll", "fin_pll", MEM_PLL_LOCK, MEM_PLL_CON0, pll2550_24mhz_tbl), @@ -1183,7 +1183,7 @@ CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif", /* CMU_PERI */ -static unsigned long peri_clk_regs[] __initdata = { +static const unsigned long peri_clk_regs[] __initconst = { MUX_SEL_PERI, MUX_SEL_PERI1, DIV_PERI, @@ -1219,7 +1219,7 @@ PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud", PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll", "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; -static struct samsung_mux_clock peri_mux_clks[] __initdata = { +static const struct samsung_mux_clock peri_mux_clks[] __initconst = { MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p, MUX_SEL_PERI1, 4, 2), MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p, @@ -1228,12 +1228,12 @@ static struct samsung_mux_clock peri_mux_clks[] __initdata = { MUX_SEL_PERI1, 20, 2), }; -static struct samsung_div_clock peri_div_clks[] __initdata = { +static const struct samsung_div_clock peri_div_clks[] __initconst = { DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8), DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6), }; -static struct samsung_gate_clock peri_gate_clks[] __initdata = { +static const struct samsung_gate_clock peri_gate_clks[] __initconst = { GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0, CLK_SET_RATE_PARENT, 0), GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1, @@ -1389,7 +1389,7 @@ CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri", /* CMU_TOP */ -static unsigned long top_clk_regs[] __initdata = { +static const unsigned long top_clk_regs[] __initconst = { DISP_PLL_LOCK, AUD_PLL_LOCK, DISP_PLL_CON0, @@ -1430,7 +1430,7 @@ static unsigned long top_clk_regs[] __initdata = { }; /* fixed rate clocks generated inside the soc */ -static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock fixed_rate_clks[] __initconst = { FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL, 0, 270000000), FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL, @@ -1513,7 +1513,7 @@ PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a", PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a", "mout_mediatop_pll_user"}; -static struct samsung_mux_clock top_mux_clks[] __initdata = { +static const struct samsung_mux_clock top_mux_clks[] __initconst = { MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user", mout_mediatop_pll_user_p, MUX_SEL_TOP_PLL0, 0, 1), @@ -1673,7 +1673,7 @@ static struct samsung_mux_clock top_mux_clks[] __initdata = { MUX_SEL_TOP_GSCL, 20, 1), }; -static struct samsung_div_clock top_div_clks[] __initdata = { +static const struct samsung_div_clock top_div_clks[] __initconst = { DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333", DIV_TOP_G2D_MFC, 0, 3), DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333", @@ -1794,7 +1794,7 @@ static struct samsung_div_clock top_div_clks[] __initdata = { }; -static struct samsung_gate_clock top_gate_clks[] __initdata = { +static const struct samsung_gate_clock top_gate_clks[] __initconst = { GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin", "dout_sclk_fsys_mmc0_sdclkin_b", EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0), @@ -1809,7 +1809,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = { CLK_SET_RATE_PARENT, 0), }; -static struct samsung_pll_clock top_pll_clks[] __initdata = { +static const struct samsung_pll_clock top_pll_clks[] __initconst = { PLL(pll_2550xx, TOP_FOUT_DISP_PLL, "fout_disp_pll", "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, pll2550_24mhz_tbl), diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c index d5d5dcabc4a9..54ec486a5e45 100644 --- a/drivers/clk/samsung/clk-exynos5410.c +++ b/drivers/clk/samsung/clk-exynos5410.c @@ -31,11 +31,14 @@ #define SRC_CPU 0x200 #define DIV_CPU0 0x500 #define SRC_CPERI1 0x4204 +#define GATE_IP_G2D 0x8800 #define DIV_TOP0 0x10510 #define DIV_TOP1 0x10514 +#define DIV_FSYS0 0x10548 #define DIV_FSYS1 0x1054c #define DIV_FSYS2 0x10550 #define DIV_PERIC0 0x10558 +#define DIV_PERIC3 0x10564 #define SRC_TOP0 0x10210 #define SRC_TOP1 0x10214 #define SRC_TOP2 0x10218 @@ -44,6 +47,8 @@ #define SRC_MASK_FSYS 0x10340 #define SRC_MASK_PERIC0 0x10350 #define GATE_BUS_FSYS0 0x10740 +#define GATE_TOP_SCLK_FSYS 0x10840 +#define GATE_TOP_SCLK_PERIC 0x10850 #define GATE_IP_FSYS 0x10944 #define GATE_IP_PERIC 0x10950 #define GATE_IP_PERIS 0x10960 @@ -71,12 +76,13 @@ PNAME(mout_kfc_p) = { "mout_kpll", "sclk_mpll", }; PNAME(mpll_user_p) = { "fin_pll", "sclk_mpll", }; PNAME(bpll_user_p) = { "fin_pll", "sclk_bpll", }; PNAME(mpll_bpll_p) = { "sclk_mpll_muxed", "sclk_bpll_muxed", }; +PNAME(sclk_mpll_bpll_p) = { "sclk_mpll_bpll", "fin_pll", }; PNAME(group2_p) = { "fin_pll", "fin_pll", "none", "none", "none", "none", "sclk_mpll_bpll", "none", "none", "sclk_cpll" }; -static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5410_mux_clks[] __initconst = { MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1), MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), @@ -96,16 +102,20 @@ static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = { MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 0, 4), MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 4, 4), MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 8, 4), + MUX(0, "mout_usbd300", sclk_mpll_bpll_p, SRC_FSYS, 28, 1), + MUX(0, "mout_usbd301", sclk_mpll_bpll_p, SRC_FSYS, 29, 1), MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 0, 4), MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 4, 4), MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 8, 4), + MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 12, 4), + MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 4), MUX(0, "mout_aclk200", mpll_bpll_p, SRC_TOP0, 12, 1), MUX(0, "mout_aclk400", mpll_bpll_p, SRC_TOP0, 20, 1), }; -static struct samsung_div_clock exynos5410_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5410_div_clks[] __initconst = { DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3), DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3), @@ -121,6 +131,11 @@ static struct samsung_div_clock exynos5410_div_clks[] __initdata = { DIV(0, "aclk66_pre", "sclk_mpll_muxed", DIV_TOP1, 24, 3), DIV(0, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3), + DIV(0, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4), + DIV(0, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 20, 4), + DIV(0, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4), + DIV(0, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 28, 4), + DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4), DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4), DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4), @@ -137,12 +152,19 @@ static struct samsung_div_clock exynos5410_div_clks[] __initdata = { DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4), DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4), + DIV(0, "dout_pwm", "mout_pwm", DIV_PERIC3, 0, 4), + DIV(0, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3), + DIV(0, "aclk266", "mpll_user_p", DIV_TOP0, 16, 3), DIV(0, "aclk400", "mout_aclk400", DIV_TOP0, 24, 3), }; -static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = { + GATE(CLK_SSS, "sss", "aclk266", GATE_IP_G2D, 2, 0, 0), GATE(CLK_MCT, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0), + GATE(CLK_WDT, "wdt", "aclk66", GATE_IP_PERIS, 19, 0, 0), + GATE(CLK_RTC, "rtc", "aclk66", GATE_IP_PERIS, 20, 0, 0), + GATE(CLK_TMU, "tmu", "aclk66", GATE_IP_PERIS, 21, 0, 0), GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0), @@ -155,9 +177,31 @@ static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = { GATE(CLK_MMC1, "sdmmc1", "aclk200", GATE_BUS_FSYS0, 13, 0, 0), GATE(CLK_MMC2, "sdmmc2", "aclk200", GATE_BUS_FSYS0, 14, 0, 0), + GATE(CLK_SCLK_USBPHY301, "sclk_usbphy301", "dout_usbphy301", + GATE_TOP_SCLK_FSYS, 7, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_USBPHY300, "sclk_usbphy300", "dout_usbphy300", + GATE_TOP_SCLK_FSYS, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_USBD300, "sclk_usbd300", "dout_usbd300", + GATE_TOP_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301", + GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0), + + GATE(CLK_SCLK_PWM, "sclk_pwm", "dout_pwm", + GATE_TOP_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0), + GATE(CLK_UART0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0), GATE(CLK_UART1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0), GATE(CLK_UART2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0), + GATE(CLK_UART3, "uart3", "aclk66", GATE_IP_PERIC, 3, 0, 0), + GATE(CLK_I2C0, "i2c0", "aclk66", GATE_IP_PERIC, 6, 0, 0), + GATE(CLK_I2C1, "i2c1", "aclk66", GATE_IP_PERIC, 7, 0, 0), + GATE(CLK_I2C2, "i2c2", "aclk66", GATE_IP_PERIC, 8, 0, 0), + GATE(CLK_I2C3, "i2c3", "aclk66", GATE_IP_PERIC, 9, 0, 0), + GATE(CLK_USI0, "usi0", "aclk66", GATE_IP_PERIC, 10, 0, 0), + GATE(CLK_USI1, "usi1", "aclk66", GATE_IP_PERIC, 11, 0, 0), + GATE(CLK_USI2, "usi2", "aclk66", GATE_IP_PERIC, 12, 0, 0), + GATE(CLK_USI3, "usi3", "aclk66", GATE_IP_PERIC, 13, 0, 0), + GATE(CLK_PWM, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0), GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0", SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0), @@ -165,9 +209,15 @@ static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = { SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2", SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3", + SRC_MASK_PERIC0, 12, CLK_SET_RATE_PARENT, 0), + + GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_IP_FSYS, 18, 0, 0), + GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_IP_FSYS, 19, 0, 0), + GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_IP_FSYS, 20, 0, 0), }; -static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = { +static const struct samsung_pll_clock exynos5410_plls[nr_plls] __initconst = { [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, NULL), [cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK, diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 92382cef9f90..bb196ca21a77 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -160,7 +160,7 @@ static struct samsung_clk_reg_dump *exynos5800_save; * list of controller registers to be saved and restored during a * suspend/resume cycle. */ -static unsigned long exynos5x_clk_regs[] __initdata = { +static const unsigned long exynos5x_clk_regs[] __initconst = { SRC_CPU, DIV_CPU0, DIV_CPU1, @@ -248,7 +248,7 @@ static unsigned long exynos5x_clk_regs[] __initdata = { DIV_KFC0, }; -static unsigned long exynos5800_clk_regs[] __initdata = { +static const unsigned long exynos5800_clk_regs[] __initconst = { SRC_TOP8, SRC_TOP9, SRC_CAM, @@ -306,7 +306,7 @@ static struct syscore_ops exynos5420_clk_syscore_ops = { .resume = exynos5420_clk_resume, }; -static void exynos5420_clk_sleep_init(void) +static void __init exynos5420_clk_sleep_init(void) { exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs)); @@ -333,7 +333,7 @@ err_soc: return; } #else -static void exynos5420_clk_sleep_init(void) {} +static void __init exynos5420_clk_sleep_init(void) {} #endif /* list of all parent clocks */ @@ -484,7 +484,7 @@ static struct samsung_fixed_rate_clock }; /* fixed rate clocks generated inside the soc */ -static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initconst = { FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000), FRATE(0, "sclk_pwi", NULL, 0, 24000000), FRATE(0, "sclk_usbh20", NULL, 0, 48000000), @@ -492,19 +492,19 @@ static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = { FRATE(0, "sclk_usbh20_scan_clk", NULL, 0, 480000000), }; -static struct samsung_fixed_factor_clock - exynos5x_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock + exynos5x_fixed_factor_clks[] __initconst = { FFACTOR(0, "ff_hsic_12m", "fin_pll", 1, 2, 0), FFACTOR(0, "ff_sw_aclk66", "mout_sw_aclk66", 1, 2, 0), }; -static struct samsung_fixed_factor_clock - exynos5800_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock + exynos5800_fixed_factor_clks[] __initconst = { FFACTOR(0, "ff_dout_epll2", "mout_sclk_epll", 1, 2, 0), FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0), }; -static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = { MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3), MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3), MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3), @@ -553,7 +553,7 @@ static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3), }; -static struct samsung_div_clock exynos5800_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5800_div_clks[] __initconst = { DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3), DIV(0, "dout_aclk550_cam", "mout_aclk550_cam", @@ -569,14 +569,14 @@ static struct samsung_div_clock exynos5800_div_clks[] __initdata = { DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6), }; -static struct samsung_gate_clock exynos5800_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, 0, 0), }; -static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1), MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p, TOP_SPARE2, 4, 1), @@ -606,12 +606,12 @@ static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1), }; -static struct samsung_div_clock exynos5420_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3), }; -static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p, SRC_TOP7, 4, 1), MUX(0, "mout_mspll_kfc", mout_mspll_cpu_p, SRC_TOP7, 8, 2), @@ -778,7 +778,7 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = { MUX(0, "mout_isp_sensor", mout_group2_p, SRC_ISP, 28, 3), }; -static struct samsung_div_clock exynos5x_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3), DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3), DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3), @@ -911,7 +911,7 @@ static struct samsung_div_clock exynos5x_div_clks[] __initdata = { CLK_SET_RATE_PARENT, 0), }; -static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { /* G2D */ GATE(CLK_MDMA0, "mdma0", "aclk266_g2d", GATE_IP_G2D, 1, 0, 0), GATE(CLK_SSS, "sss", "aclk266_g2d", GATE_IP_G2D, 2, 0, 0), @@ -946,7 +946,7 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { GATE_BUS_TOP, 13, 0, 0), GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), - GATE(0, "aclk333", "mout_user_aclk333", + GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", GATE_BUS_TOP, 16, 0, 0), @@ -1219,7 +1219,7 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0), }; -static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] = { +static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { PLL_35XX_RATE(2000000000, 250, 3, 0), PLL_35XX_RATE(1900000000, 475, 6, 0), PLL_35XX_RATE(1800000000, 225, 3, 0), @@ -1356,8 +1356,6 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_soc = soc; ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks, ARRAY_SIZE(exynos5x_fixed_rate_ext_clks), diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 128527b8fbeb..ea1608682d7f 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -11,10 +11,12 @@ #include <linux/clk-provider.h> #include <linux/of.h> +#include <linux/of_address.h> #include <dt-bindings/clock/exynos5433.h> #include "clk.h" +#include "clk-cpu.h" #include "clk-pll.h" /* @@ -108,7 +110,7 @@ #define ENABLE_CMU_TOP 0x0c00 #define ENABLE_CMU_TOP_DIV_STAT 0x0c04 -static unsigned long top_clk_regs[] __initdata = { +static const unsigned long top_clk_regs[] __initconst = { ISP_PLL_LOCK, AUD_PLL_LOCK, ISP_PLL_CON0, @@ -218,11 +220,11 @@ PNAME(mout_sclk_audio0_p) = { "ioclk_audiocdclk0", "oscclk", PNAME(mout_sclk_hdmi_spdif_p) = { "sclk_audio1", "ioclk_spdif_extclk", }; -static struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = { FFACTOR(0, "oscclk_efuse_common", "oscclk", 1, 1, 0), }; -static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock top_fixed_clks[] __initconst = { /* Xi2s{0|1}CDCLK input clock for I2S/PCM */ FRATE(0, "ioclk_audiocdclk1", NULL, 0, 100000000), FRATE(0, "ioclk_audiocdclk0", NULL, 0, 100000000), @@ -238,7 +240,7 @@ static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = { FRATE(0, "ioclk_i2s1_bclk_in", NULL, 0, 12288000), }; -static struct samsung_mux_clock top_mux_clks[] __initdata = { +static const struct samsung_mux_clock top_mux_clks[] __initconst = { /* MUX_SEL_TOP0 */ MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p, MUX_SEL_TOP0, 4, 1), @@ -374,7 +376,7 @@ static struct samsung_mux_clock top_mux_clks[] __initdata = { mout_sclk_hdmi_spdif_p, MUX_SEL_TOP_DISP, 0, 1), }; -static struct samsung_div_clock top_div_clks[] __initdata = { +static const struct samsung_div_clock top_div_clks[] __initconst = { /* DIV_TOP0 */ DIV(CLK_DIV_ACLK_CAM1_333, "div_aclk_cam1_333", "mout_aclk_cam1_333", DIV_TOP0, 28, 3), @@ -538,7 +540,7 @@ static struct samsung_div_clock top_div_clks[] __initdata = { DIV_TOP_PERIC4, 0, 4), }; -static struct samsung_gate_clock top_gate_clks[] __initdata = { +static const struct samsung_gate_clock top_gate_clks[] __initconst = { /* ENABLE_ACLK_TOP */ GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400", ENABLE_ACLK_TOP, 30, 0, 0), @@ -639,7 +641,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = { /* ENABLE_SCLK_TOP_FSYS */ GATE(CLK_SCLK_PCIE_100_FSYS, "sclk_pcie_100_fsys", "div_sclk_pcie_100", - ENABLE_SCLK_TOP_FSYS, 7, 0, 0), + ENABLE_SCLK_TOP_FSYS, 7, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_MMC2_FSYS, "sclk_mmc2_fsys", "div_sclk_mmc2_b", ENABLE_SCLK_TOP_FSYS, 6, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_MMC1_FSYS, "sclk_mmc1_fsys", "div_sclk_mmc1_b", @@ -668,11 +670,14 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = { GATE(CLK_SCLK_PCM1_PERIC, "sclk_pcm1_peric", "div_sclk_pcm1", ENABLE_SCLK_TOP_PERIC, 7, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_UART2_PERIC, "sclk_uart2_peric", "div_sclk_uart2", - ENABLE_SCLK_TOP_PERIC, 5, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_TOP_PERIC, 5, CLK_SET_RATE_PARENT | + CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_UART1_PERIC, "sclk_uart1_peric", "div_sclk_uart1", - ENABLE_SCLK_TOP_PERIC, 4, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_TOP_PERIC, 4, CLK_SET_RATE_PARENT | + CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_UART0_PERIC, "sclk_uart0_peric", "div_sclk_uart0", - ENABLE_SCLK_TOP_PERIC, 3, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_TOP_PERIC, 3, CLK_SET_RATE_PARENT | + CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_SPI2_PERIC, "sclk_spi2_peric", "div_sclk_spi2_b", ENABLE_SCLK_TOP_PERIC, 2, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1_PERIC, "sclk_spi1_peric", "div_sclk_spi1_b", @@ -693,7 +698,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = { * ATLAS_PLL & APOLLO_PLL & MEM0_PLL & MEM1_PLL & BUS_PLL & MFC_PLL * & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL */ -static struct samsung_pll_rate_table exynos5443_pll_rates[] = { +static const struct samsung_pll_rate_table exynos5443_pll_rates[] __initconst = { PLL_35XX_RATE(2500000000U, 625, 6, 0), PLL_35XX_RATE(2400000000U, 500, 5, 0), PLL_35XX_RATE(2300000000U, 575, 6, 0), @@ -744,7 +749,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = { }; /* AUD_PLL */ -static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = { +static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initconst = { PLL_36XX_RATE(400000000U, 200, 3, 2, 0), PLL_36XX_RATE(393216000U, 197, 3, 2, -25690), PLL_36XX_RATE(384000000U, 128, 2, 2, 0), @@ -757,14 +762,14 @@ static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = { { /* sentinel */ } }; -static struct samsung_pll_clock top_pll_clks[] __initdata = { +static const struct samsung_pll_clock top_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk", ISP_PLL_LOCK, ISP_PLL_CON0, exynos5443_pll_rates), PLL(pll_36xx, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk", AUD_PLL_LOCK, AUD_PLL_CON0, exynos5443_aud_pll_rates), }; -static struct samsung_cmu_info top_cmu_info __initdata = { +static const struct samsung_cmu_info top_cmu_info __initconst = { .pll_clks = top_pll_clks, .nr_pll_clks = ARRAY_SIZE(top_pll_clks), .mux_clks = top_mux_clks, @@ -800,7 +805,7 @@ CLK_OF_DECLARE(exynos5433_cmu_top, "samsung,exynos5433-cmu-top", #define DIV_CPIF 0x0600 #define ENABLE_SCLK_CPIF 0x0a00 -static unsigned long cpif_clk_regs[] __initdata = { +static const unsigned long cpif_clk_regs[] __initconst = { MPHY_PLL_LOCK, MPHY_PLL_CON0, MPHY_PLL_CON1, @@ -813,32 +818,32 @@ static unsigned long cpif_clk_regs[] __initdata = { /* list of all parent clock list */ PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", }; -static struct samsung_pll_clock cpif_pll_clks[] __initdata = { +static const struct samsung_pll_clock cpif_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_MPHY_PLL, "fout_mphy_pll", "oscclk", MPHY_PLL_LOCK, MPHY_PLL_CON0, exynos5443_pll_rates), }; -static struct samsung_mux_clock cpif_mux_clks[] __initdata = { +static const struct samsung_mux_clock cpif_mux_clks[] __initconst = { /* MUX_SEL_CPIF0 */ MUX(CLK_MOUT_MPHY_PLL, "mout_mphy_pll", mout_mphy_pll_p, MUX_SEL_CPIF0, 0, 1), }; -static struct samsung_div_clock cpif_div_clks[] __initdata = { +static const struct samsung_div_clock cpif_div_clks[] __initconst = { /* DIV_CPIF */ DIV(CLK_DIV_SCLK_MPHY, "div_sclk_mphy", "mout_mphy_pll", DIV_CPIF, 0, 6), }; -static struct samsung_gate_clock cpif_gate_clks[] __initdata = { +static const struct samsung_gate_clock cpif_gate_clks[] __initconst = { /* ENABLE_SCLK_CPIF */ GATE(CLK_SCLK_MPHY_PLL, "sclk_mphy_pll", "mout_mphy_pll", - ENABLE_SCLK_CPIF, 9, 0, 0), + ENABLE_SCLK_CPIF, 9, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_UFS_MPHY, "sclk_ufs_mphy", "div_sclk_mphy", ENABLE_SCLK_CPIF, 4, 0, 0), }; -static struct samsung_cmu_info cpif_cmu_info __initdata = { +static const struct samsung_cmu_info cpif_cmu_info __initconst = { .pll_clks = cpif_pll_clks, .nr_pll_clks = ARRAY_SIZE(cpif_pll_clks), .mux_clks = cpif_mux_clks, @@ -939,7 +944,7 @@ CLK_OF_DECLARE(exynos5433_cmu_cpif, "samsung,exynos5433-cmu-cpif", #define PAUSE 0x1008 #define DDRPHY_LOCK_CTRL 0x100c -static unsigned long mif_clk_regs[] __initdata = { +static const unsigned long mif_clk_regs[] __initconst = { MEM0_PLL_LOCK, MEM1_PLL_LOCK, BUS_PLL_LOCK, @@ -1004,7 +1009,7 @@ static unsigned long mif_clk_regs[] __initdata = { DDRPHY_LOCK_CTRL, }; -static struct samsung_pll_clock mif_pll_clks[] __initdata = { +static const struct samsung_pll_clock mif_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_MEM0_PLL, "fout_mem0_pll", "oscclk", MEM0_PLL_LOCK, MEM0_PLL_CON0, exynos5443_pll_rates), PLL(pll_35xx, CLK_FOUT_MEM1_PLL, "fout_mem1_pll", "oscclk", @@ -1065,7 +1070,7 @@ PNAME(mout_sclk_decon_tv_vclk_b_p) = { "mout_sclk_decon_tv_vclk_a", PNAME(mout_sclk_dsim1_c_p) = { "mout_sclk_dsim1_b", "sclk_mphy_pll", }; PNAME(mout_sclk_dsim1_b_p) = { "mout_sclk_dsim1_a", "mout_mfc_pll_div2",}; -static struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initconst = { /* dout_{mfc|bus|mem1|mem0}_pll is half fixed rate from parent mux */ FFACTOR(CLK_DOUT_MFC_PLL, "dout_mfc_pll", "mout_mfc_pll", 1, 1, 0), FFACTOR(CLK_DOUT_BUS_PLL, "dout_bus_pll", "mout_bus_pll", 1, 1, 0), @@ -1073,7 +1078,7 @@ static struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initdata = { FFACTOR(CLK_DOUT_MEM0_PLL, "dout_mem0_pll", "mout_mem0_pll", 1, 1, 0), }; -static struct samsung_mux_clock mif_mux_clks[] __initdata = { +static const struct samsung_mux_clock mif_mux_clks[] __initconst = { /* MUX_SEL_MIF0 */ MUX(CLK_MOUT_MFC_PLL_DIV2, "mout_mfc_pll_div2", mout_mfc_pll_div2_p, MUX_SEL_MIF0, 28, 1), @@ -1169,7 +1174,7 @@ static struct samsung_mux_clock mif_mux_clks[] __initdata = { MUX_SEL_MIF7, 0, 1), }; -static struct samsung_div_clock mif_div_clks[] __initdata = { +static const struct samsung_div_clock mif_div_clks[] __initconst = { /* DIV_MIF1 */ DIV(CLK_DIV_SCLK_HPM_MIF, "div_sclk_hpm_mif", "div_clk2x_phy", DIV_MIF1, 16, 2), @@ -1223,7 +1228,7 @@ static struct samsung_div_clock mif_div_clks[] __initdata = { 0, 3), }; -static struct samsung_gate_clock mif_gate_clks[] __initdata = { +static const struct samsung_gate_clock mif_gate_clks[] __initconst = { /* ENABLE_ACLK_MIF0 */ GATE(CLK_CLK2X_PHY1, "clk2k_phy1", "div_clk2x_phy", ENABLE_ACLK_MIF0, 19, CLK_IGNORE_UNUSED, 0), @@ -1440,11 +1445,13 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = { /* ENABLE_PCLK_MIF_SECURE_DREX0_TZ */ GATE(CLK_PCLK_DREX0_TZ, "pclk_drex0_tz", "div_aclk_mif_133", - ENABLE_PCLK_MIF_SECURE_DREX0_TZ, 0, 0, 0), + ENABLE_PCLK_MIF_SECURE_DREX0_TZ, 0, + CLK_IGNORE_UNUSED, 0), /* ENABLE_PCLK_MIF_SECURE_DREX1_TZ */ GATE(CLK_PCLK_DREX1_TZ, "pclk_drex1_tz", "div_aclk_mif_133", - ENABLE_PCLK_MIF_SECURE_DREX1_TZ, 0, 0, 0), + ENABLE_PCLK_MIF_SECURE_DREX1_TZ, 0, + CLK_IGNORE_UNUSED, 0), /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */ GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133", @@ -1486,7 +1493,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = { ENABLE_SCLK_MIF, 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info mif_cmu_info __initdata = { +static const struct samsung_cmu_info mif_cmu_info __initconst = { .pll_clks = mif_pll_clks, .nr_pll_clks = ARRAY_SIZE(mif_pll_clks), .mux_clks = mif_mux_clks, @@ -1522,7 +1529,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mif, "samsung,exynos5433-cmu-mif", #define ENABLE_IP_PERIC1 0x0B04 #define ENABLE_IP_PERIC2 0x0B08 -static unsigned long peric_clk_regs[] __initdata = { +static const unsigned long peric_clk_regs[] __initconst = { DIV_PERIC, ENABLE_ACLK_PERIC, ENABLE_PCLK_PERIC0, @@ -1533,13 +1540,13 @@ static unsigned long peric_clk_regs[] __initdata = { ENABLE_IP_PERIC2, }; -static struct samsung_div_clock peric_div_clks[] __initdata = { +static const struct samsung_div_clock peric_div_clks[] __initconst = { /* DIV_PERIC */ DIV(CLK_DIV_SCLK_SCI, "div_sclk_sci", "oscclk", DIV_PERIC, 4, 4), DIV(CLK_DIV_SCLK_SC_IN, "div_sclk_sc_in", "oscclk", DIV_PERIC, 0, 4), }; -static struct samsung_gate_clock peric_gate_clks[] __initdata = { +static const struct samsung_gate_clock peric_gate_clks[] __initconst = { /* ENABLE_ACLK_PERIC */ GATE(CLK_ACLK_AHB2APB_PERIC2P, "aclk_ahb2apb_peric2p", "aclk_peric_66", ENABLE_ACLK_PERIC, 3, CLK_IGNORE_UNUSED, 0), @@ -1654,8 +1661,7 @@ static struct samsung_gate_clock peric_gate_clks[] __initdata = { GATE(CLK_SCLK_IOCLK_SPI2, "sclk_ioclk_spi2", "ioclk_spi2_clk_in", ENABLE_SCLK_PERIC, 13, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_IOCLK_SPI1, "sclk_ioclk_spi1", "ioclk_spi1_clk_in", - ENABLE_SCLK_PERIC, 12, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 12, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_IOCLK_SPI0, "sclk_ioclk_spi0", "ioclk_spi0_clk_in", ENABLE_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_IOCLK_I2S1_BCLK, "sclk_ioclk_i2s1_bclk", @@ -1670,18 +1676,21 @@ static struct samsung_gate_clock peric_gate_clks[] __initdata = { GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, 5, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, - 4, CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + 4, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI0, "sclk_spi0", "sclk_spi0_peric", ENABLE_SCLK_PERIC, 3, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_UART2, "sclk_uart2", "sclk_uart2_peric", - ENABLE_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 2, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_UART1, "sclk_uart1", "sclk_uart1_peric", - ENABLE_SCLK_PERIC, 1, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 1, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_UART0, "sclk_uart0", "sclk_uart0_peric", - ENABLE_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 0, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info peric_cmu_info __initdata = { +static const struct samsung_cmu_info peric_cmu_info __initconst = { .div_clks = peric_div_clks, .nr_div_clks = ARRAY_SIZE(peric_div_clks), .gate_clks = peric_gate_clks, @@ -1728,7 +1737,7 @@ CLK_OF_DECLARE(exynos5433_cmu_peric, "samsung,exynos5433-cmu-peric", #define ENABLE_IP_PERIS_SECURE_ANTIBRK_CNT 0x0b1c #define ENABLE_IP_PERIS_SECURE_OTP_CON 0x0b20 -static unsigned long peris_clk_regs[] __initdata = { +static const unsigned long peris_clk_regs[] __initconst = { ENABLE_ACLK_PERIS, ENABLE_PCLK_PERIS, ENABLE_PCLK_PERIS_SECURE_TZPC, @@ -1756,7 +1765,7 @@ static unsigned long peris_clk_regs[] __initdata = { ENABLE_IP_PERIS_SECURE_OTP_CON, }; -static struct samsung_gate_clock peris_gate_clks[] __initdata = { +static const struct samsung_gate_clock peris_gate_clks[] __initconst = { /* ENABLE_ACLK_PERIS */ GATE(CLK_ACLK_AHB2APB_PERIS1P, "aclk_ahb2apb_peris1p", "aclk_peris_66", ENABLE_ACLK_PERIS, 2, CLK_IGNORE_UNUSED, 0), @@ -1875,7 +1884,7 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = { ENABLE_SCLK_PERIS_SECURE_OTP_CON, 0, 0, 0), }; -static struct samsung_cmu_info peris_cmu_info __initdata = { +static const struct samsung_cmu_info peris_cmu_info __initconst = { .gate_clks = peris_gate_clks, .nr_gate_clks = ARRAY_SIZE(peris_gate_clks), .nr_clk_ids = PERIS_NR_CLK, @@ -1959,7 +1968,7 @@ PNAME(mout_sclk_mphy_p) = { "mout_sclk_ufs_mphy_user", "mout_phyclk_lli_mphy_to_ufs_user", }; -static unsigned long fsys_clk_regs[] __initdata = { +static const unsigned long fsys_clk_regs[] __initconst = { MUX_SEL_FSYS0, MUX_SEL_FSYS1, MUX_SEL_FSYS2, @@ -1980,7 +1989,7 @@ static unsigned long fsys_clk_regs[] __initdata = { ENABLE_IP_FSYS1, }; -static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = { /* PHY clocks from USBDRD30_PHY */ FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY, "phyclk_usbdrd30_udrd30_phyclock_phy", NULL, @@ -2020,7 +2029,7 @@ static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = { NULL, 0, 26000000), }; -static struct samsung_mux_clock fsys_mux_clks[] __initdata = { +static const struct samsung_mux_clock fsys_mux_clks[] __initconst = { /* MUX_SEL_FSYS0 */ MUX(CLK_MOUT_SCLK_UFS_MPHY_USER, "mout_sclk_ufs_mphy_user", mout_sclk_ufs_mphy_user_p, MUX_SEL_FSYS0, 4, 1), @@ -2104,7 +2113,7 @@ static struct samsung_mux_clock fsys_mux_clks[] __initdata = { MUX_SEL_FSYS4, 0, 1), }; -static struct samsung_gate_clock fsys_gate_clks[] __initdata = { +static const struct samsung_gate_clock fsys_gate_clks[] __initconst = { /* ENABLE_ACLK_FSYS0 */ GATE(CLK_ACLK_PCIE, "aclk_pcie", "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS0, 13, CLK_IGNORE_UNUSED, 0), @@ -2138,7 +2147,7 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = { GATE(CLK_ACLK_SMMU_PDMA1, "aclk_smmu_pdma1", "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1, 25, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_BTS_PCIE, "aclk_bts_pcie", "mout_aclk_fsys_200_user", - ENABLE_ACLK_FSYS1, 24, 0, 0), + ENABLE_ACLK_FSYS1, 24, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_AXIUS_PDMA1, "aclk_axius_pdma1", "mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1, 22, CLK_IGNORE_UNUSED, 0), @@ -2185,13 +2194,13 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = { /* ENABLE_PCLK_FSYS */ GATE(CLK_PCLK_PCIE_CTRL, "pclk_pcie_ctrl", "mout_aclk_fsys_200_user", - ENABLE_PCLK_FSYS, 17, 0, 0), + ENABLE_PCLK_FSYS, 17, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_SMMU_PDMA1, "pclk_smmu_pdma1", "mout_aclk_fsys_200_user", ENABLE_PCLK_FSYS, 16, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_PCIE_PHY, "pclk_pcie_phy", "mout_aclk_fsys_200_user", - ENABLE_PCLK_FSYS, 14, 0, 0), + ENABLE_PCLK_FSYS, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_BTS_PCIE, "pclk_bts_pcie", "mout_aclk_fsys_200_user", - ENABLE_PCLK_FSYS, 13, 0, 0), + ENABLE_PCLK_FSYS, 13, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_SMMU_PDMA0, "pclk_smmu_pdma0", "mout_aclk_fsys_200_user", ENABLE_PCLK_FSYS, 8, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_BTS_UFS, "pclk_bts_ufs", "mout_aclk_fsys_200_user", @@ -2270,11 +2279,12 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = { ENABLE_SCLK_FSYS, 0, 0, 0), /* ENABLE_IP_FSYS0 */ + GATE(CLK_PCIE, "pcie", "sclk_pcie_100", ENABLE_IP_FSYS0, 17, 0, 0), GATE(CLK_PDMA1, "pdma1", "aclk_pdma1", ENABLE_IP_FSYS0, 15, 0, 0), GATE(CLK_PDMA0, "pdma0", "aclk_pdma0", ENABLE_IP_FSYS0, 0, 0, 0), }; -static struct samsung_cmu_info fsys_cmu_info __initdata = { +static const struct samsung_cmu_info fsys_cmu_info __initconst = { .mux_clks = fsys_mux_clks, .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks), .gate_clks = fsys_gate_clks, @@ -2310,7 +2320,7 @@ CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys", #define DIV_ENABLE_IP_G2D1 0x0b04 #define DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D 0x0b08 -static unsigned long g2d_clk_regs[] __initdata = { +static const unsigned long g2d_clk_regs[] __initconst = { MUX_SEL_G2D0, MUX_SEL_ENABLE_G2D0, DIV_G2D, @@ -2327,7 +2337,7 @@ static unsigned long g2d_clk_regs[] __initdata = { PNAME(mout_aclk_g2d_266_user_p) = { "oscclk", "aclk_g2d_266", }; PNAME(mout_aclk_g2d_400_user_p) = { "oscclk", "aclk_g2d_400", }; -static struct samsung_mux_clock g2d_mux_clks[] __initdata = { +static const struct samsung_mux_clock g2d_mux_clks[] __initconst = { /* MUX_SEL_G2D0 */ MUX(CLK_MUX_ACLK_G2D_266_USER, "mout_aclk_g2d_266_user", mout_aclk_g2d_266_user_p, MUX_SEL_G2D0, 4, 1), @@ -2335,13 +2345,13 @@ static struct samsung_mux_clock g2d_mux_clks[] __initdata = { mout_aclk_g2d_400_user_p, MUX_SEL_G2D0, 0, 1), }; -static struct samsung_div_clock g2d_div_clks[] __initdata = { +static const struct samsung_div_clock g2d_div_clks[] __initconst = { /* DIV_G2D */ DIV(CLK_DIV_PCLK_G2D, "div_pclk_g2d", "mout_aclk_g2d_266_user", DIV_G2D, 0, 2), }; -static struct samsung_gate_clock g2d_gate_clks[] __initdata = { +static const struct samsung_gate_clock g2d_gate_clks[] __initconst = { /* DIV_ENABLE_ACLK_G2D */ GATE(CLK_ACLK_SMMU_MDMA1, "aclk_smmu_mdma1", "mout_aclk_g2d_266_user", DIV_ENABLE_ACLK_G2D, 12, 0, 0), @@ -2398,7 +2408,7 @@ static struct samsung_gate_clock g2d_gate_clks[] __initdata = { DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D, 0, 0, 0), }; -static struct samsung_cmu_info g2d_cmu_info __initdata = { +static const struct samsung_cmu_info g2d_cmu_info __initconst = { .mux_clks = g2d_mux_clks, .nr_mux_clks = ARRAY_SIZE(g2d_mux_clks), .div_clks = g2d_div_clks, @@ -2454,7 +2464,7 @@ CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d", #define CLKOUT_CMU_DISP 0x0c00 #define CLKOUT_CMU_DISP_DIV_STAT 0x0c04 -static unsigned long disp_clk_regs[] __initdata = { +static const unsigned long disp_clk_regs[] __initconst = { DISP_PLL_LOCK, DISP_PLL_CON0, DISP_PLL_CON1, @@ -2527,12 +2537,12 @@ PNAME(mout_sclk_decon_tv_vclk_c_disp_p) = { PNAME(mout_sclk_decon_tv_vclk_b_disp_p) = { "mout_sclk_decon_tv_vclk_a_disp", "mout_sclk_decon_tv_vclk_user", }; -static struct samsung_pll_clock disp_pll_clks[] __initdata = { +static const struct samsung_pll_clock disp_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", "oscclk", DISP_PLL_LOCK, DISP_PLL_CON0, exynos5443_pll_rates), }; -static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initconst = { /* * sclk_rgb_{vclk|tv_vclk} is half clock of sclk_decon_{vclk|tv_vclk}. * The divider has fixed value (2) between sclk_rgb_{vclk|tv_vclk} @@ -2544,7 +2554,7 @@ static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = { 1, 2, 0), }; -static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock disp_fixed_clks[] __initconst = { /* PHY clocks from MIPI_DPHY1 */ FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000), FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000), @@ -2558,7 +2568,7 @@ static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = { NULL, 0, 166000000), }; -static struct samsung_mux_clock disp_mux_clks[] __initdata = { +static const struct samsung_mux_clock disp_mux_clks[] __initconst = { /* MUX_SEL_DISP0 */ MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p, MUX_SEL_DISP0, 0, 1), @@ -2633,7 +2643,7 @@ static struct samsung_mux_clock disp_mux_clks[] __initdata = { mout_sclk_decon_vclk_p, MUX_SEL_DISP4, 0, 1), }; -static struct samsung_div_clock disp_div_clks[] __initdata = { +static const struct samsung_div_clock disp_div_clks[] __initconst = { /* DIV_DISP */ DIV(CLK_DIV_SCLK_DSIM1_DISP, "div_sclk_dsim1_disp", "mout_sclk_dsim1_b_disp", DIV_DISP, 24, 3), @@ -2651,7 +2661,7 @@ static struct samsung_div_clock disp_div_clks[] __initdata = { DIV_DISP, 0, 2), }; -static struct samsung_gate_clock disp_gate_clks[] __initdata = { +static const struct samsung_gate_clock disp_gate_clks[] __initconst = { /* ENABLE_ACLK_DISP0 */ GATE(CLK_ACLK_DECON_TV, "aclk_decon_tv", "mout_aclk_disp_333_user", ENABLE_ACLK_DISP0, 2, 0, 0), @@ -2811,7 +2821,7 @@ static struct samsung_gate_clock disp_gate_clks[] __initdata = { "div_sclk_decon_eclk_disp", ENABLE_SCLK_DISP, 2, 0, 0), }; -static struct samsung_cmu_info disp_cmu_info __initdata = { +static const struct samsung_cmu_info disp_cmu_info __initconst = { .pll_clks = disp_pll_clks, .nr_pll_clks = ARRAY_SIZE(disp_pll_clks), .mux_clks = disp_mux_clks, @@ -2856,7 +2866,7 @@ CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp", #define ENABLE_IP_AUD0 0x0b00 #define ENABLE_IP_AUD1 0x0b04 -static unsigned long aud_clk_regs[] __initdata = { +static const unsigned long aud_clk_regs[] __initconst = { MUX_SEL_AUD0, MUX_SEL_AUD1, MUX_ENABLE_AUD0, @@ -2875,13 +2885,13 @@ static unsigned long aud_clk_regs[] __initdata = { PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", }; PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",}; -static struct samsung_fixed_rate_clock aud_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock aud_fixed_clks[] __initconst = { FRATE(0, "ioclk_jtag_tclk", NULL, 0, 33000000), FRATE(0, "ioclk_slimbus_clk", NULL, 0, 25000000), FRATE(0, "ioclk_i2s_bclk", NULL, 0, 50000000), }; -static struct samsung_mux_clock aud_mux_clks[] __initdata = { +static const struct samsung_mux_clock aud_mux_clks[] __initconst = { /* MUX_SEL_AUD0 */ MUX(CLK_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_aud_p, MUX_SEL_AUD0, 0, 1), @@ -2893,7 +2903,7 @@ static struct samsung_mux_clock aud_mux_clks[] __initdata = { MUX_SEL_AUD1, 0, 1), }; -static struct samsung_div_clock aud_div_clks[] __initdata = { +static const struct samsung_div_clock aud_div_clks[] __initconst = { /* DIV_AUD0 */ DIV(CLK_DIV_ATCLK_AUD, "div_atclk_aud", "div_aud_ca5", DIV_AUD0, 12, 4), @@ -2915,7 +2925,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = { DIV_AUD1, 0, 4), }; -static struct samsung_gate_clock aud_gate_clks[] __initdata = { +static const struct samsung_gate_clock aud_gate_clks[] __initconst = { /* ENABLE_ACLK_AUD */ GATE(CLK_ACLK_INTR_CTRL, "aclk_intr_ctrl", "div_aclk_aud", ENABLE_ACLK_AUD, 12, 0, 0), @@ -2962,7 +2972,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = { /* ENABLE_SCLK_AUD0 */ GATE(CLK_ATCLK_AUD, "atclk_aud", "div_atclk_aud", ENABLE_SCLK_AUD0, - 2, 0, 0), + 2, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_DBG_AUD, "pclk_dbg_aud", "div_pclk_dbg_aud", ENABLE_SCLK_AUD0, 1, 0, 0), GATE(CLK_SCLK_AUD_CA5, "sclk_aud_ca5", "div_aud_ca5", ENABLE_SCLK_AUD0, @@ -2976,7 +2986,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = { GATE(CLK_SCLK_AUD_SLIMBUS, "sclk_aud_slimbus", "div_sclk_aud_slimbus", ENABLE_SCLK_AUD1, 4, 0, 0), GATE(CLK_SCLK_AUD_UART, "sclk_aud_uart", "div_sclk_aud_uart", - ENABLE_SCLK_AUD1, 3, 0, 0), + ENABLE_SCLK_AUD1, 3, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_AUD_PCM, "sclk_aud_pcm", "div_sclk_aud_pcm", ENABLE_SCLK_AUD1, 2, 0, 0), GATE(CLK_SCLK_I2S_BCLK, "sclk_i2s_bclk", "ioclk_i2s_bclk", @@ -2985,7 +2995,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = { ENABLE_SCLK_AUD1, 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info aud_cmu_info __initdata = { +static const struct samsung_cmu_info aud_cmu_info __initconst = { .mux_clks = aud_mux_clks, .nr_mux_clks = ARRAY_SIZE(aud_mux_clks), .div_clks = aud_div_clks, @@ -3031,24 +3041,24 @@ PNAME(mout_aclk_bus2_400_p) = { "oscclk", "aclk_bus2_400", }; ENABLE_IP_BUS0, \ ENABLE_IP_BUS1 -static unsigned long bus01_clk_regs[] __initdata = { +static const unsigned long bus01_clk_regs[] __initconst = { CMU_BUS_COMMON_CLK_REGS, }; -static unsigned long bus2_clk_regs[] __initdata = { +static const unsigned long bus2_clk_regs[] __initconst = { MUX_SEL_BUS2, MUX_ENABLE_BUS2, CMU_BUS_COMMON_CLK_REGS, }; -static struct samsung_div_clock bus0_div_clks[] __initdata = { +static const struct samsung_div_clock bus0_div_clks[] __initconst = { /* DIV_BUS0 */ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus0_133", "aclk_bus0_400", DIV_BUS, 0, 3), }; /* CMU_BUS0 clocks */ -static struct samsung_gate_clock bus0_gate_clks[] __initdata = { +static const struct samsung_gate_clock bus0_gate_clks[] __initconst = { /* ENABLE_ACLK_BUS0 */ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus0p", "div_pclk_bus0_133", ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0), @@ -3067,13 +3077,13 @@ static struct samsung_gate_clock bus0_gate_clks[] __initdata = { }; /* CMU_BUS1 clocks */ -static struct samsung_div_clock bus1_div_clks[] __initdata = { +static const struct samsung_div_clock bus1_div_clks[] __initconst = { /* DIV_BUS1 */ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus1_133", "aclk_bus1_400", DIV_BUS, 0, 3), }; -static struct samsung_gate_clock bus1_gate_clks[] __initdata = { +static const struct samsung_gate_clock bus1_gate_clks[] __initconst = { /* ENABLE_ACLK_BUS1 */ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus1p", "div_pclk_bus1_133", ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0), @@ -3092,19 +3102,19 @@ static struct samsung_gate_clock bus1_gate_clks[] __initdata = { }; /* CMU_BUS2 clocks */ -static struct samsung_mux_clock bus2_mux_clks[] __initdata = { +static const struct samsung_mux_clock bus2_mux_clks[] __initconst = { /* MUX_SEL_BUS2 */ MUX(CLK_MOUT_ACLK_BUS2_400_USER, "mout_aclk_bus2_400_user", mout_aclk_bus2_400_p, MUX_SEL_BUS2, 0, 1), }; -static struct samsung_div_clock bus2_div_clks[] __initdata = { +static const struct samsung_div_clock bus2_div_clks[] __initconst = { /* DIV_BUS2 */ DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus2_133", "mout_aclk_bus2_400_user", DIV_BUS, 0, 3), }; -static struct samsung_gate_clock bus2_gate_clks[] __initdata = { +static const struct samsung_gate_clock bus2_gate_clks[] __initconst = { /* ENABLE_ACLK_BUS2 */ GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus2p", "div_pclk_bus2_133", ENABLE_ACLK_BUS, 3, CLK_IGNORE_UNUSED, 0), @@ -3133,19 +3143,19 @@ static struct samsung_gate_clock bus2_gate_clks[] __initdata = { .nr_gate_clks = ARRAY_SIZE(bus##id##_gate_clks), \ .nr_clk_ids = BUSx_NR_CLK -static struct samsung_cmu_info bus0_cmu_info __initdata = { +static const struct samsung_cmu_info bus0_cmu_info __initconst = { CMU_BUS_INFO_CLKS(0), .clk_regs = bus01_clk_regs, .nr_clk_regs = ARRAY_SIZE(bus01_clk_regs), }; -static struct samsung_cmu_info bus1_cmu_info __initdata = { +static const struct samsung_cmu_info bus1_cmu_info __initconst = { CMU_BUS_INFO_CLKS(1), .clk_regs = bus01_clk_regs, .nr_clk_regs = ARRAY_SIZE(bus01_clk_regs), }; -static struct samsung_cmu_info bus2_cmu_info __initdata = { +static const struct samsung_cmu_info bus2_cmu_info __initconst = { CMU_BUS_INFO_CLKS(2), .mux_clks = bus2_mux_clks, .nr_mux_clks = ARRAY_SIZE(bus2_mux_clks), @@ -3189,7 +3199,7 @@ exynos5433_cmu_bus_init(2); #define CLKOUT_CMU_G3D_DIV_STAT 0x0c04 #define CLK_STOPCTRL 0x1000 -static unsigned long g3d_clk_regs[] __initdata = { +static const unsigned long g3d_clk_regs[] __initconst = { G3D_PLL_LOCK, G3D_PLL_CON0, G3D_PLL_CON1, @@ -3212,12 +3222,12 @@ static unsigned long g3d_clk_regs[] __initdata = { PNAME(mout_aclk_g3d_400_p) = { "mout_g3d_pll", "aclk_g3d_400", }; PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", }; -static struct samsung_pll_clock g3d_pll_clks[] __initdata = { +static const struct samsung_pll_clock g3d_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk", G3D_PLL_LOCK, G3D_PLL_CON0, exynos5443_pll_rates), }; -static struct samsung_mux_clock g3d_mux_clks[] __initdata = { +static const struct samsung_mux_clock g3d_mux_clks[] __initconst = { /* MUX_SEL_G3D */ MUX_F(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p, MUX_SEL_G3D, 8, 1, CLK_SET_RATE_PARENT, 0), @@ -3225,7 +3235,7 @@ static struct samsung_mux_clock g3d_mux_clks[] __initdata = { MUX_SEL_G3D, 0, 1, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_div_clock g3d_div_clks[] __initdata = { +static const struct samsung_div_clock g3d_div_clks[] __initconst = { /* DIV_G3D */ DIV(CLK_DIV_SCLK_HPM_G3D, "div_sclk_hpm_g3d", "mout_g3d_pll", DIV_G3D, 8, 2), @@ -3235,7 +3245,7 @@ static struct samsung_div_clock g3d_div_clks[] __initdata = { 0, 3, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_gate_clock g3d_gate_clks[] __initdata = { +static const struct samsung_gate_clock g3d_gate_clks[] __initconst = { /* ENABLE_ACLK_G3D */ GATE(CLK_ACLK_BTS_G3D1, "aclk_bts_g3d1", "div_aclk_g3d", ENABLE_ACLK_G3D, 7, 0, 0), @@ -3269,7 +3279,7 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = { ENABLE_SCLK_G3D, 0, 0, 0), }; -static struct samsung_cmu_info g3d_cmu_info __initdata = { +static const struct samsung_cmu_info g3d_cmu_info __initconst = { .pll_clks = g3d_pll_clks, .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks), .mux_clks = g3d_mux_clks, @@ -3310,7 +3320,7 @@ CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d", #define ENABLE_IP_GSCL_SECURE_SMMU_GSCL1 0x0b0c #define ENABLE_IP_GSCL_SECURE_SMMU_GSCL2 0x0b10 -static unsigned long gscl_clk_regs[] __initdata = { +static const unsigned long gscl_clk_regs[] __initconst = { MUX_SEL_GSCL, MUX_ENABLE_GSCL, ENABLE_ACLK_GSCL, @@ -3332,7 +3342,7 @@ static unsigned long gscl_clk_regs[] __initdata = { PNAME(aclk_gscl_111_user_p) = { "oscclk", "aclk_gscl_111", }; PNAME(aclk_gscl_333_user_p) = { "oscclk", "aclk_gscl_333", }; -static struct samsung_mux_clock gscl_mux_clks[] __initdata = { +static const struct samsung_mux_clock gscl_mux_clks[] __initconst = { /* MUX_SEL_GSCL */ MUX(CLK_MOUT_ACLK_GSCL_111_USER, "mout_aclk_gscl_111_user", aclk_gscl_111_user_p, MUX_SEL_GSCL, 4, 1), @@ -3340,7 +3350,7 @@ static struct samsung_mux_clock gscl_mux_clks[] __initdata = { aclk_gscl_333_user_p, MUX_SEL_GSCL, 0, 1), }; -static struct samsung_gate_clock gscl_gate_clks[] __initdata = { +static const struct samsung_gate_clock gscl_gate_clks[] __initconst = { /* ENABLE_ACLK_GSCL */ GATE(CLK_ACLK_BTS_GSCL2, "aclk_bts_gscl2", "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 11, 0, 0), @@ -3356,9 +3366,11 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = { GATE(CLK_ACLK_GSCLNP_111, "aclk_gsclnp_111", "mout_aclk_gscl_111_user", ENABLE_ACLK_GSCL, 6, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_GSCLRTND_333, "aclk_gsclrtnd_333", - "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 5, 0, 0), + "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 5, + CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_GSCLBEND_333, "aclk_gsclbend_333", - "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 4, 0, 0), + "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 4, + CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_GSD, "aclk_gsd", "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 3, 0, 0), GATE(CLK_ACLK_GSCL2, "aclk_gscl2", "mout_aclk_gscl_333_user", @@ -3412,7 +3424,7 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = { ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2, 0, 0, 0), }; -static struct samsung_cmu_info gscl_cmu_info __initdata = { +static const struct samsung_cmu_info gscl_cmu_info __initconst = { .mux_clks = gscl_mux_clks, .nr_mux_clks = ARRAY_SIZE(gscl_mux_clks), .gate_clks = gscl_gate_clks, @@ -3465,7 +3477,7 @@ CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl", #define APOLLO_INTR_SPREAD_USE_STANDBYWFI 0x1084 #define APOLLO_INTR_SPREAD_BLOCKING_DURATION 0x1088 -static unsigned long apollo_clk_regs[] __initdata = { +static const unsigned long apollo_clk_regs[] __initconst = { APOLLO_PLL_LOCK, APOLLO_PLL_CON0, APOLLO_PLL_CON1, @@ -3500,15 +3512,16 @@ PNAME(mout_bus_pll_apollo_user_p) = { "oscclk", "sclk_bus_pll_apollo", }; PNAME(mout_apollo_p) = { "mout_apollo_pll", "mout_bus_pll_apollo_user", }; -static struct samsung_pll_clock apollo_pll_clks[] __initdata = { +static const struct samsung_pll_clock apollo_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_APOLLO_PLL, "fout_apollo_pll", "oscclk", APOLLO_PLL_LOCK, APOLLO_PLL_CON0, exynos5443_pll_rates), }; -static struct samsung_mux_clock apollo_mux_clks[] __initdata = { +static const struct samsung_mux_clock apollo_mux_clks[] __initconst = { /* MUX_SEL_APOLLO0 */ MUX_F(CLK_MOUT_APOLLO_PLL, "mout_apollo_pll", mout_apollo_pll_p, - MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT, 0), + MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT | + CLK_RECALC_NEW_RATES, 0), /* MUX_SEL_APOLLO1 */ MUX(CLK_MOUT_BUS_PLL_APOLLO_USER, "mout_bus_pll_apollo_user", @@ -3519,7 +3532,7 @@ static struct samsung_mux_clock apollo_mux_clks[] __initdata = { 0, 1, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_div_clock apollo_div_clks[] __initdata = { +static const struct samsung_div_clock apollo_div_clks[] __initconst = { /* DIV_APOLLO0 */ DIV_F(CLK_DIV_CNTCLK_APOLLO, "div_cntclk_apollo", "div_apollo2", DIV_APOLLO0, 24, 3, CLK_GET_RATE_NOCACHE, @@ -3550,7 +3563,7 @@ static struct samsung_div_clock apollo_div_clks[] __initdata = { CLK_DIVIDER_READ_ONLY), }; -static struct samsung_gate_clock apollo_gate_clks[] __initdata = { +static const struct samsung_gate_clock apollo_gate_clks[] __initconst = { /* ENABLE_ACLK_APOLLO */ GATE(CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS, "aclk_asatbslv_apollo_3_cssys", "div_atclk_apollo", ENABLE_ACLK_APOLLO, @@ -3589,28 +3602,64 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = { ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo", ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0), - GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2", - ENABLE_SCLK_APOLLO, 0, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; -static struct samsung_cmu_info apollo_cmu_info __initdata = { - .pll_clks = apollo_pll_clks, - .nr_pll_clks = ARRAY_SIZE(apollo_pll_clks), - .mux_clks = apollo_mux_clks, - .nr_mux_clks = ARRAY_SIZE(apollo_mux_clks), - .div_clks = apollo_div_clks, - .nr_div_clks = ARRAY_SIZE(apollo_div_clks), - .gate_clks = apollo_gate_clks, - .nr_gate_clks = ARRAY_SIZE(apollo_gate_clks), - .nr_clk_ids = APOLLO_NR_CLK, - .clk_regs = apollo_clk_regs, - .nr_clk_regs = ARRAY_SIZE(apollo_clk_regs), +#define E5433_APOLLO_DIV0(cntclk, pclk_dbg, atclk, pclk, aclk) \ + (((cntclk) << 24) | ((pclk_dbg) << 20) | ((atclk) << 16) | \ + ((pclk) << 12) | ((aclk) << 8)) + +#define E5433_APOLLO_DIV1(hpm, copy) \ + (((hpm) << 4) | ((copy) << 0)) + +static const struct exynos_cpuclk_cfg_data exynos5433_apolloclk_d[] __initconst = { + { 1300000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 1200000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 1100000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 1000000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 900000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 800000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 700000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), }, + { 600000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), }, + { 500000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), }, + { 400000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), }, + { 0 }, }; static void __init exynos5433_cmu_apollo_init(struct device_node *np) { - samsung_cmu_register_one(np, &apollo_cmu_info); + void __iomem *reg_base; + struct samsung_clk_provider *ctx; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + panic("%s: failed to map registers\n", __func__); + return; + } + + ctx = samsung_clk_init(np, reg_base, APOLLO_NR_CLK); + if (!ctx) { + panic("%s: unable to allocate ctx\n", __func__); + return; + } + + samsung_clk_register_pll(ctx, apollo_pll_clks, + ARRAY_SIZE(apollo_pll_clks), reg_base); + samsung_clk_register_mux(ctx, apollo_mux_clks, + ARRAY_SIZE(apollo_mux_clks)); + samsung_clk_register_div(ctx, apollo_div_clks, + ARRAY_SIZE(apollo_div_clks)); + samsung_clk_register_gate(ctx, apollo_gate_clks, + ARRAY_SIZE(apollo_gate_clks)); + + exynos_register_cpu_clock(ctx, CLK_SCLK_APOLLO, "apolloclk", + mout_apollo_p[0], mout_apollo_p[1], 0x200, + exynos5433_apolloclk_d, ARRAY_SIZE(exynos5433_apolloclk_d), + CLK_CPU_HAS_E5433_REGS_LAYOUT); + + samsung_clk_sleep_init(reg_base, apollo_clk_regs, + ARRAY_SIZE(apollo_clk_regs)); + + samsung_clk_of_add_provider(np, ctx); } CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo", exynos5433_cmu_apollo_init); @@ -3651,7 +3700,7 @@ CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo", #define ATLAS_INTR_SPREAD_USE_STANDBYWFI 0x1084 #define ATLAS_INTR_SPREAD_BLOCKING_DURATION 0x1088 -static unsigned long atlas_clk_regs[] __initdata = { +static const unsigned long atlas_clk_regs[] __initconst = { ATLAS_PLL_LOCK, ATLAS_PLL_CON0, ATLAS_PLL_CON1, @@ -3686,15 +3735,16 @@ PNAME(mout_bus_pll_atlas_user_p) = { "oscclk", "sclk_bus_pll_atlas", }; PNAME(mout_atlas_p) = { "mout_atlas_pll", "mout_bus_pll_atlas_user", }; -static struct samsung_pll_clock atlas_pll_clks[] __initdata = { +static const struct samsung_pll_clock atlas_pll_clks[] __initconst = { PLL(pll_35xx, CLK_FOUT_ATLAS_PLL, "fout_atlas_pll", "oscclk", ATLAS_PLL_LOCK, ATLAS_PLL_CON0, exynos5443_pll_rates), }; -static struct samsung_mux_clock atlas_mux_clks[] __initdata = { +static const struct samsung_mux_clock atlas_mux_clks[] __initconst = { /* MUX_SEL_ATLAS0 */ MUX_F(CLK_MOUT_ATLAS_PLL, "mout_atlas_pll", mout_atlas_pll_p, - MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT, 0), + MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT | + CLK_RECALC_NEW_RATES, 0), /* MUX_SEL_ATLAS1 */ MUX(CLK_MOUT_BUS_PLL_ATLAS_USER, "mout_bus_pll_atlas_user", @@ -3705,7 +3755,7 @@ static struct samsung_mux_clock atlas_mux_clks[] __initdata = { 0, 1, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_div_clock atlas_div_clks[] __initdata = { +static const struct samsung_div_clock atlas_div_clks[] __initconst = { /* DIV_ATLAS0 */ DIV_F(CLK_DIV_CNTCLK_ATLAS, "div_cntclk_atlas", "div_atlas2", DIV_ATLAS0, 24, 3, CLK_GET_RATE_NOCACHE, @@ -3736,7 +3786,7 @@ static struct samsung_div_clock atlas_div_clks[] __initdata = { CLK_DIVIDER_READ_ONLY), }; -static struct samsung_gate_clock atlas_gate_clks[] __initdata = { +static const struct samsung_gate_clock atlas_gate_clks[] __initconst = { /* ENABLE_ACLK_ATLAS */ GATE(CLK_ACLK_ATB_AUD_CSSYS, "aclk_atb_aud_cssys", "div_atclk_atlas", ENABLE_ACLK_ATLAS, @@ -3801,28 +3851,69 @@ static struct samsung_gate_clock atlas_gate_clks[] __initdata = { ENABLE_SCLK_ATLAS, 2, CLK_IGNORE_UNUSED, 0), GATE(CLK_ATCLK, "atclk", "div_atclk_atlas", ENABLE_SCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0), - GATE(CLK_SCLK_ATLAS, "sclk_atlas", "div_atlas2", - ENABLE_SCLK_ATLAS, 0, - CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; -static struct samsung_cmu_info atlas_cmu_info __initdata = { - .pll_clks = atlas_pll_clks, - .nr_pll_clks = ARRAY_SIZE(atlas_pll_clks), - .mux_clks = atlas_mux_clks, - .nr_mux_clks = ARRAY_SIZE(atlas_mux_clks), - .div_clks = atlas_div_clks, - .nr_div_clks = ARRAY_SIZE(atlas_div_clks), - .gate_clks = atlas_gate_clks, - .nr_gate_clks = ARRAY_SIZE(atlas_gate_clks), - .nr_clk_ids = ATLAS_NR_CLK, - .clk_regs = atlas_clk_regs, - .nr_clk_regs = ARRAY_SIZE(atlas_clk_regs), +#define E5433_ATLAS_DIV0(cntclk, pclk_dbg, atclk, pclk, aclk) \ + (((cntclk) << 24) | ((pclk_dbg) << 20) | ((atclk) << 16) | \ + ((pclk) << 12) | ((aclk) << 8)) + +#define E5433_ATLAS_DIV1(hpm, copy) \ + (((hpm) << 4) | ((copy) << 0)) + +static const struct exynos_cpuclk_cfg_data exynos5433_atlasclk_d[] __initconst = { + { 1900000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), }, + { 1800000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), }, + { 1700000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), }, + { 1600000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), }, + { 1500000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 1400000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 1300000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 1200000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 1100000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 1000000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), }, + { 900000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), }, + { 800000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), }, + { 700000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), }, + { 600000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), }, + { 500000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), }, + { 0 }, }; static void __init exynos5433_cmu_atlas_init(struct device_node *np) { - samsung_cmu_register_one(np, &atlas_cmu_info); + void __iomem *reg_base; + struct samsung_clk_provider *ctx; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + panic("%s: failed to map registers\n", __func__); + return; + } + + ctx = samsung_clk_init(np, reg_base, ATLAS_NR_CLK); + if (!ctx) { + panic("%s: unable to allocate ctx\n", __func__); + return; + } + + samsung_clk_register_pll(ctx, atlas_pll_clks, + ARRAY_SIZE(atlas_pll_clks), reg_base); + samsung_clk_register_mux(ctx, atlas_mux_clks, + ARRAY_SIZE(atlas_mux_clks)); + samsung_clk_register_div(ctx, atlas_div_clks, + ARRAY_SIZE(atlas_div_clks)); + samsung_clk_register_gate(ctx, atlas_gate_clks, + ARRAY_SIZE(atlas_gate_clks)); + + exynos_register_cpu_clock(ctx, CLK_SCLK_ATLAS, "atlasclk", + mout_atlas_p[0], mout_atlas_p[1], 0x200, + exynos5433_atlasclk_d, ARRAY_SIZE(exynos5433_atlasclk_d), + CLK_CPU_HAS_E5433_REGS_LAYOUT); + + samsung_clk_sleep_init(reg_base, atlas_clk_regs, + ARRAY_SIZE(atlas_clk_regs)); + + samsung_clk_of_add_provider(np, ctx); } CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas", exynos5433_cmu_atlas_init); @@ -3853,7 +3944,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas", #define ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER1 0x0b0c #define ENABLE_IP_MSCL_SECURE_SMMU_JPEG 0x0b10 -static unsigned long mscl_clk_regs[] __initdata = { +static const unsigned long mscl_clk_regs[] __initconst = { MUX_SEL_MSCL0, MUX_SEL_MSCL1, MUX_ENABLE_MSCL0, @@ -3881,7 +3972,7 @@ PNAME(mout_aclk_mscl_400_user_p) = { "oscclk", "aclk_mscl_400", }; PNAME(mout_sclk_jpeg_p) = { "mout_sclk_jpeg_user", "mout_aclk_mscl_400_user", }; -static struct samsung_mux_clock mscl_mux_clks[] __initdata = { +static const struct samsung_mux_clock mscl_mux_clks[] __initconst = { /* MUX_SEL_MSCL0 */ MUX(CLK_MOUT_SCLK_JPEG_USER, "mout_sclk_jpeg_user", mout_sclk_jpeg_user_p, MUX_SEL_MSCL0, 4, 1), @@ -3893,13 +3984,13 @@ static struct samsung_mux_clock mscl_mux_clks[] __initdata = { MUX_SEL_MSCL1, 0, 1), }; -static struct samsung_div_clock mscl_div_clks[] __initdata = { +static const struct samsung_div_clock mscl_div_clks[] __initconst = { /* DIV_MSCL */ DIV(CLK_DIV_PCLK_MSCL, "div_pclk_mscl", "mout_aclk_mscl_400_user", DIV_MSCL, 0, 3), }; -static struct samsung_gate_clock mscl_gate_clks[] __initdata = { +static const struct samsung_gate_clock mscl_gate_clks[] __initconst = { /* ENABLE_ACLK_MSCL */ GATE(CLK_ACLK_BTS_JPEG, "aclk_bts_jpeg", "mout_aclk_mscl_400_user", ENABLE_ACLK_MSCL, 9, 0, 0), @@ -3977,7 +4068,7 @@ static struct samsung_gate_clock mscl_gate_clks[] __initdata = { CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; -static struct samsung_cmu_info mscl_cmu_info __initdata = { +static const struct samsung_cmu_info mscl_cmu_info __initconst = { .mux_clks = mscl_mux_clks, .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks), .div_clks = mscl_div_clks, @@ -4012,7 +4103,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl", #define ENABLE_IP_MFC1 0x0b04 #define ENABLE_IP_MFC_SECURE_SMMU_MFC 0x0b08 -static unsigned long mfc_clk_regs[] __initdata = { +static const unsigned long mfc_clk_regs[] __initconst = { MUX_SEL_MFC, MUX_ENABLE_MFC, DIV_MFC, @@ -4027,19 +4118,19 @@ static unsigned long mfc_clk_regs[] __initdata = { PNAME(mout_aclk_mfc_400_user_p) = { "oscclk", "aclk_mfc_400", }; -static struct samsung_mux_clock mfc_mux_clks[] __initdata = { +static const struct samsung_mux_clock mfc_mux_clks[] __initconst = { /* MUX_SEL_MFC */ MUX(CLK_MOUT_ACLK_MFC_400_USER, "mout_aclk_mfc_400_user", mout_aclk_mfc_400_user_p, MUX_SEL_MFC, 0, 0), }; -static struct samsung_div_clock mfc_div_clks[] __initdata = { +static const struct samsung_div_clock mfc_div_clks[] __initconst = { /* DIV_MFC */ DIV(CLK_DIV_PCLK_MFC, "div_pclk_mfc", "mout_aclk_mfc_400_user", DIV_MFC, 0, 2), }; -static struct samsung_gate_clock mfc_gate_clks[] __initdata = { +static const struct samsung_gate_clock mfc_gate_clks[] __initconst = { /* ENABLE_ACLK_MFC */ GATE(CLK_ACLK_BTS_MFC_1, "aclk_bts_mfc_1", "mout_aclk_mfc_400_user", ENABLE_ACLK_MFC, 6, 0, 0), @@ -4085,7 +4176,7 @@ static struct samsung_gate_clock mfc_gate_clks[] __initdata = { 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info mfc_cmu_info __initdata = { +static const struct samsung_cmu_info mfc_cmu_info __initconst = { .mux_clks = mfc_mux_clks, .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks), .div_clks = mfc_div_clks, @@ -4120,7 +4211,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc", #define ENABLE_IP_HEVC1 0x0b04 #define ENABLE_IP_HEVC_SECURE_SMMU_HEVC 0x0b08 -static unsigned long hevc_clk_regs[] __initdata = { +static const unsigned long hevc_clk_regs[] __initconst = { MUX_SEL_HEVC, MUX_ENABLE_HEVC, DIV_HEVC, @@ -4135,19 +4226,19 @@ static unsigned long hevc_clk_regs[] __initdata = { PNAME(mout_aclk_hevc_400_user_p) = { "oscclk", "aclk_hevc_400", }; -static struct samsung_mux_clock hevc_mux_clks[] __initdata = { +static const struct samsung_mux_clock hevc_mux_clks[] __initconst = { /* MUX_SEL_HEVC */ MUX(CLK_MOUT_ACLK_HEVC_400_USER, "mout_aclk_hevc_400_user", mout_aclk_hevc_400_user_p, MUX_SEL_HEVC, 0, 0), }; -static struct samsung_div_clock hevc_div_clks[] __initdata = { +static const struct samsung_div_clock hevc_div_clks[] __initconst = { /* DIV_HEVC */ DIV(CLK_DIV_PCLK_HEVC, "div_pclk_hevc", "mout_aclk_hevc_400_user", DIV_HEVC, 0, 2), }; -static struct samsung_gate_clock hevc_gate_clks[] __initdata = { +static const struct samsung_gate_clock hevc_gate_clks[] __initconst = { /* ENABLE_ACLK_HEVC */ GATE(CLK_ACLK_BTS_HEVC_1, "aclk_bts_hevc_1", "mout_aclk_hevc_400_user", ENABLE_ACLK_HEVC, 6, 0, 0), @@ -4195,7 +4286,7 @@ static struct samsung_gate_clock hevc_gate_clks[] __initdata = { 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info hevc_cmu_info __initdata = { +static const struct samsung_cmu_info hevc_cmu_info __initconst = { .mux_clks = hevc_mux_clks, .nr_mux_clks = ARRAY_SIZE(hevc_mux_clks), .div_clks = hevc_div_clks, @@ -4232,7 +4323,7 @@ CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc", #define ENABLE_IP_ISP2 0x0b08 #define ENABLE_IP_ISP3 0x0b0c -static unsigned long isp_clk_regs[] __initdata = { +static const unsigned long isp_clk_regs[] __initconst = { MUX_SEL_ISP, MUX_ENABLE_ISP, DIV_ISP, @@ -4250,7 +4341,7 @@ static unsigned long isp_clk_regs[] __initdata = { PNAME(mout_aclk_isp_dis_400_user_p) = { "oscclk", "aclk_isp_dis_400", }; PNAME(mout_aclk_isp_400_user_p) = { "oscclk", "aclk_isp_400", }; -static struct samsung_mux_clock isp_mux_clks[] __initdata = { +static const struct samsung_mux_clock isp_mux_clks[] __initconst = { /* MUX_SEL_ISP */ MUX(CLK_MOUT_ACLK_ISP_DIS_400_USER, "mout_aclk_isp_dis_400_user", mout_aclk_isp_dis_400_user_p, MUX_SEL_ISP, 4, 0), @@ -4258,7 +4349,7 @@ static struct samsung_mux_clock isp_mux_clks[] __initdata = { mout_aclk_isp_400_user_p, MUX_SEL_ISP, 0, 0), }; -static struct samsung_div_clock isp_div_clks[] __initdata = { +static const struct samsung_div_clock isp_div_clks[] __initconst = { /* DIV_ISP */ DIV(CLK_DIV_PCLK_ISP_DIS, "div_pclk_isp_dis", "mout_aclk_isp_dis_400_user", DIV_ISP, 12, 3), @@ -4270,7 +4361,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = { "mout_aclk_isp_400_user", DIV_ISP, 0, 3), }; -static struct samsung_gate_clock isp_gate_clks[] __initdata = { +static const struct samsung_gate_clock isp_gate_clks[] __initconst = { /* ENABLE_ACLK_ISP0 */ GATE(CLK_ACLK_ISP_D_GLUE, "aclk_isp_d_glue", "mout_aclk_isp_400_user", ENABLE_ACLK_ISP0, 6, CLK_IGNORE_UNUSED, 0), @@ -4448,7 +4539,7 @@ static struct samsung_gate_clock isp_gate_clks[] __initdata = { 0, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info isp_cmu_info __initdata = { +static const struct samsung_cmu_info isp_cmu_info __initconst = { .mux_clks = isp_mux_clks, .nr_mux_clks = ARRAY_SIZE(isp_mux_clks), .div_clks = isp_div_clks, @@ -4504,7 +4595,7 @@ CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp", #define ENABLE_IP_CAM02 0X0b08 #define ENABLE_IP_CAM03 0X0b0C -static unsigned long cam0_clk_regs[] __initdata = { +static const unsigned long cam0_clk_regs[] __initconst = { MUX_SEL_CAM00, MUX_SEL_CAM01, MUX_SEL_CAM02, @@ -4588,14 +4679,14 @@ PNAME(mout_sclk_pixelasync_lite_c_init_a_p) = { "mout_aclk_cam0_552_user", "mout_aclk_cam0_400_user", }; -static struct samsung_fixed_rate_clock cam0_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock cam0_fixed_clks[] __initconst = { FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY, "phyclk_rxbyteclkhs0_s4_phy", NULL, 0, 100000000), FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY, "phyclk_rxbyteclkhs0_s2a_phy", NULL, 0, 100000000), }; -static struct samsung_mux_clock cam0_mux_clks[] __initdata = { +static const struct samsung_mux_clock cam0_mux_clks[] __initconst = { /* MUX_SEL_CAM00 */ MUX(CLK_MOUT_ACLK_CAM0_333_USER, "mout_aclk_cam0_333_user", mout_aclk_cam0_333_user_p, MUX_SEL_CAM00, 8, 1), @@ -4669,7 +4760,7 @@ static struct samsung_mux_clock cam0_mux_clks[] __initdata = { MUX_SEL_CAM04, 0, 1), }; -static struct samsung_div_clock cam0_div_clks[] __initdata = { +static const struct samsung_div_clock cam0_div_clks[] __initconst = { /* DIV_CAM00 */ DIV(CLK_DIV_PCLK_CAM0_50, "div_pclk_cam0_50", "div_aclk_cam0_200", DIV_CAM00, 8, 2), @@ -4716,7 +4807,7 @@ static struct samsung_div_clock cam0_div_clks[] __initdata = { "mout_sclk_pixelasync_lite_c_init_b", DIV_CAM03, 0, 3), }; -static struct samsung_gate_clock cam0_gate_clks[] __initdata = { +static const struct samsung_gate_clock cam0_gate_clks[] __initconst = { /* ENABLE_ACLK_CAM00 */ GATE(CLK_ACLK_CSIS1, "aclk_csis1", "div_aclk_csis1", ENABLE_ACLK_CAM00, 6, 0, 0), @@ -4923,7 +5014,7 @@ static struct samsung_gate_clock cam0_gate_clks[] __initdata = { ENABLE_SCLK_CAM0, 0, 0, 0), }; -static struct samsung_cmu_info cam0_cmu_info __initdata = { +static const struct samsung_cmu_info cam0_cmu_info __initconst = { .mux_clks = cam0_mux_clks, .nr_mux_clks = ARRAY_SIZE(cam0_mux_clks), .div_clks = cam0_div_clks, @@ -4970,7 +5061,7 @@ CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0", #define ENABLE_IP_CAM11 0X0b04 #define ENABLE_IP_CAM12 0X0b08 -static unsigned long cam1_clk_regs[] __initdata = { +static const unsigned long cam1_clk_regs[] __initconst = { MUX_SEL_CAM10, MUX_SEL_CAM11, MUX_SEL_CAM12, @@ -5016,12 +5107,12 @@ PNAME(mout_aclk_lite_c_b_p) = { "mout_aclk_lite_c_a", PNAME(mout_aclk_lite_c_a_p) = { "mout_aclk_cam1_552_user", "mout_aclk_cam1_400_user", }; -static struct samsung_fixed_rate_clock cam1_fixed_clks[] __initdata = { +static const struct samsung_fixed_rate_clock cam1_fixed_clks[] __initconst = { FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b_phy", NULL, 0, 100000000), }; -static struct samsung_mux_clock cam1_mux_clks[] __initdata = { +static const struct samsung_mux_clock cam1_mux_clks[] __initconst = { /* MUX_SEL_CAM10 */ MUX(CLK_MOUT_SCLK_ISP_UART_USER, "mout_sclk_isp_uart_user", mout_sclk_isp_uart_user_p, MUX_SEL_CAM10, 20, 1), @@ -5057,7 +5148,7 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = { MUX_SEL_CAM12, 0, 1), }; -static struct samsung_div_clock cam1_div_clks[] __initdata = { +static const struct samsung_div_clock cam1_div_clks[] __initconst = { /* DIV_CAM10 */ DIV(CLK_DIV_SCLK_ISP_MPWM, "div_sclk_isp_mpwm", "div_pclk_cam1_83", DIV_CAM10, 16, 2), @@ -5081,7 +5172,7 @@ static struct samsung_div_clock cam1_div_clks[] __initdata = { DIV_CAM11, 0, 3), }; -static struct samsung_gate_clock cam1_gate_clks[] __initdata = { +static const struct samsung_gate_clock cam1_gate_clks[] __initconst = { /* ENABLE_ACLK_CAM10 */ GATE(CLK_ACLK_ISP_GIC, "aclk_isp_gic", "mout_aclk_cam1_333_user", ENABLE_ACLK_CAM10, 4, 0, 0), @@ -5296,7 +5387,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = { ENABLE_SCLK_CAM1, 0, 0, 0), }; -static struct samsung_cmu_info cam1_cmu_info __initdata = { +static const struct samsung_cmu_info cam1_cmu_info __initconst = { .mux_clks = cam1_mux_clks, .nr_mux_clks = ARRAY_SIZE(cam1_mux_clks), .div_clks = cam1_div_clks, diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c index c57cff1e1798..a57d01b99b76 100644 --- a/drivers/clk/samsung/clk-exynos5440.c +++ b/drivers/clk/samsung/clk-exynos5440.c @@ -35,7 +35,7 @@ static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initda }; /* fixed rate clocks */ -static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initconst = { FRATE(0, "ppll", NULL, 0, 1000000000), FRATE(0, "usb_phy0", NULL, 0, 60000000), FRATE(0, "usb_phy1", NULL, 0, 60000000), @@ -44,26 +44,26 @@ static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = }; /* fixed factor clocks */ -static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initconst = { FFACTOR(0, "div250", "ppll", 1, 4, 0), FFACTOR(0, "div200", "ppll", 1, 5, 0), FFACTOR(0, "div125", "div250", 1, 2, 0), }; /* mux clocks */ -static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = { +static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = { MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1), MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p, CPU_CLK_STATUS, 0, 1, "armclk"), }; /* divider clocks */ -static struct samsung_div_clock exynos5440_div_clks[] __initdata = { +static const struct samsung_div_clock exynos5440_div_clks[] __initconst = { DIV(CLK_SPI_BAUD, "div_spi", "mout_spi", MISC_DOUT1, 3, 2), }; /* gate clocks */ -static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = { +static const struct samsung_gate_clock exynos5440_gate_clks[] __initconst = { GATE(CLK_PB0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0), GATE(CLK_PR0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0), GATE(CLK_PR1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0), @@ -125,8 +125,6 @@ static void __init exynos5440_clk_init(struct device_node *np) } ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks, ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match); diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index ad68d463b12c..5931a4140c3d 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -36,7 +36,7 @@ #define ENABLE_ACLK_TOPC1 0x0804 #define ENABLE_SCLK_TOPC1 0x0A04 -static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initconst = { FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_topc_bus0_pll", 1, 2, 0), FFACTOR(0, "ffac_topc_bus0_pll_div4", "ffac_topc_bus0_pll_div2", 1, 2, 0), @@ -69,7 +69,7 @@ PNAME(mout_topc_mfc_pll_half_p) = { "mout_topc_mfc_pll", PNAME(mout_topc_bus0_pll_out_p) = {"mout_topc_bus0_pll", "ffac_topc_bus0_pll_div2"}; -static unsigned long topc_clk_regs[] __initdata = { +static const unsigned long topc_clk_regs[] __initconst = { CC_PLL_LOCK, BUS0_PLL_LOCK, BUS1_DPLL_LOCK, @@ -89,7 +89,7 @@ static unsigned long topc_clk_regs[] __initdata = { DIV_TOPC3, }; -static struct samsung_mux_clock topc_mux_clks[] __initdata = { +static const struct samsung_mux_clock topc_mux_clks[] __initconst = { MUX(0, "mout_topc_bus0_pll", mout_topc_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1), MUX(0, "mout_topc_bus1_pll", mout_topc_bus1_pll_ctrl_p, @@ -118,7 +118,7 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = { MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2), }; -static struct samsung_div_clock topc_div_clks[] __initdata = { +static const struct samsung_div_clock topc_div_clks[] __initconst = { DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133", DIV_TOPC0, 4, 4), @@ -139,14 +139,14 @@ static struct samsung_div_clock topc_div_clks[] __initdata = { DIV_TOPC3, 28, 4), }; -static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = { +static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = { PLL_36XX_RATE(491520000, 20, 1, 0, 31457), {}, }; -static struct samsung_gate_clock topc_gate_clks[] __initdata = { +static const struct samsung_gate_clock topc_gate_clks[] __initconst = { GATE(ACLK_CCORE_133, "aclk_ccore_133", "dout_aclk_ccore_133", - ENABLE_ACLK_TOPC0, 4, 0, 0), + ENABLE_ACLK_TOPC0, 4, CLK_IS_CRITICAL, 0), GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532", ENABLE_ACLK_TOPC1, 20, 0, 0), @@ -174,7 +174,7 @@ static struct samsung_gate_clock topc_gate_clks[] __initdata = { ENABLE_SCLK_TOPC1, 0, 0, 0), }; -static struct samsung_pll_clock topc_pll_clks[] __initdata = { +static const struct samsung_pll_clock topc_pll_clks[] __initconst = { PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK, BUS0_PLL_CON0, NULL), PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK, @@ -187,7 +187,7 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = { AUD_PLL_CON0, pll1460x_24mhz_tbl), }; -static struct samsung_cmu_info topc_cmu_info __initdata = { +static const struct samsung_cmu_info topc_cmu_info __initconst = { .pll_clks = topc_pll_clks, .nr_pll_clks = ARRAY_SIZE(topc_pll_clks), .mux_clks = topc_mux_clks, @@ -256,7 +256,7 @@ PNAME(mout_top0_group3) = {"ioclk_audiocdclk0", PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll_user", "mout_top0_bus0_pll_half", "mout_top0_bus1_pll_half"}; -static unsigned long top0_clk_regs[] __initdata = { +static const unsigned long top0_clk_regs[] __initconst = { MUX_SEL_TOP00, MUX_SEL_TOP01, MUX_SEL_TOP03, @@ -275,7 +275,7 @@ static unsigned long top0_clk_regs[] __initdata = { ENABLE_SCLK_TOP0_PERIC3, }; -static struct samsung_mux_clock top0_mux_clks[] __initdata = { +static const struct samsung_mux_clock top0_mux_clks[] __initconst = { MUX(0, "mout_top0_aud_pll_user", mout_top0_aud_pll_user_p, MUX_SEL_TOP00, 0, 1), MUX(0, "mout_top0_mfc_pll_user", mout_top0_mfc_pll_user_p, @@ -315,7 +315,7 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = { MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2), }; -static struct samsung_div_clock top0_div_clks[] __initdata = { +static const struct samsung_div_clock top0_div_clks[] __initconst = { DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66", DIV_TOP03, 12, 6), DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66", @@ -338,7 +338,7 @@ static struct samsung_div_clock top0_div_clks[] __initdata = { DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12), }; -static struct samsung_gate_clock top0_gate_clks[] __initdata = { +static const struct samsung_gate_clock top0_gate_clks[] __initconst = { GATE(CLK_ACLK_PERIC0_66, "aclk_peric0_66", "dout_aclk_peric0_66", ENABLE_ACLK_TOP03, 20, CLK_SET_RATE_PARENT, 0), GATE(CLK_ACLK_PERIC1_66, "aclk_peric1_66", "dout_aclk_peric1_66", @@ -372,7 +372,7 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = { ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initconst = { FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll_user", 1, 2, 0), FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll_user", @@ -381,7 +381,7 @@ static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = { FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll_user", 1, 2, 0), }; -static struct samsung_cmu_info top0_cmu_info __initdata = { +static const struct samsung_cmu_info top0_cmu_info __initconst = { .mux_clks = top0_mux_clks, .nr_mux_clks = ARRAY_SIZE(top0_mux_clks), .div_clks = top0_div_clks, @@ -438,7 +438,7 @@ PNAME(mout_top1_group1) = {"mout_top1_bus0_pll_half", "mout_top1_bus1_pll_half", "mout_top1_cc_pll_half", "mout_top1_mfc_pll_half"}; -static unsigned long top1_clk_regs[] __initdata = { +static const unsigned long top1_clk_regs[] __initconst = { MUX_SEL_TOP10, MUX_SEL_TOP11, MUX_SEL_TOP13, @@ -455,7 +455,7 @@ static unsigned long top1_clk_regs[] __initdata = { ENABLE_SCLK_TOP1_FSYS11, }; -static struct samsung_mux_clock top1_mux_clks[] __initdata = { +static const struct samsung_mux_clock top1_mux_clks[] __initconst = { MUX(0, "mout_top1_mfc_pll_user", mout_top1_mfc_pll_user_p, MUX_SEL_TOP10, 4, 1), MUX(0, "mout_top1_cc_pll_user", mout_top1_cc_pll_user_p, @@ -494,7 +494,7 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = { MUX_SEL_TOP1_FSYS11, 24, 2), }; -static struct samsung_div_clock top1_div_clks[] __initdata = { +static const struct samsung_div_clock top1_div_clks[] __initconst = { DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200", DIV_TOP13, 24, 4), DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200", @@ -521,7 +521,7 @@ static struct samsung_div_clock top1_div_clks[] __initdata = { "mout_sclk_phy_fsys1_26m", DIV_TOP1_FSYS11, 24, 6), }; -static struct samsung_gate_clock top1_gate_clks[] __initdata = { +static const struct samsung_gate_clock top1_gate_clks[] __initconst = { GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2", ENABLE_SCLK_TOP1_FSYS0, 16, CLK_SET_RATE_PARENT, 0), GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300", @@ -539,7 +539,8 @@ static struct samsung_gate_clock top1_gate_clks[] __initdata = { ENABLE_SCLK_TOP1_FSYS11, 12, CLK_SET_RATE_PARENT, 0), GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200", - ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT, 0), + ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT | + CLK_IS_CRITICAL, 0), GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200", ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0), @@ -548,7 +549,7 @@ static struct samsung_gate_clock top1_gate_clks[] __initdata = { 24, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initconst = { FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll_user", 1, 2, 0), FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll_user", @@ -557,7 +558,7 @@ static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = { FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll_user", 1, 2, 0), }; -static struct samsung_cmu_info top1_cmu_info __initdata = { +static const struct samsung_cmu_info top1_cmu_info __initconst = { .mux_clks = top1_mux_clks, .nr_mux_clks = ARRAY_SIZE(top1_mux_clks), .div_clks = top1_div_clks, @@ -591,22 +592,22 @@ CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1", */ PNAME(mout_aclk_ccore_133_user_p) = { "fin_pll", "aclk_ccore_133" }; -static unsigned long ccore_clk_regs[] __initdata = { +static const unsigned long ccore_clk_regs[] __initconst = { MUX_SEL_CCORE, ENABLE_PCLK_CCORE, }; -static struct samsung_mux_clock ccore_mux_clks[] __initdata = { +static const struct samsung_mux_clock ccore_mux_clks[] __initconst = { MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_user_p, MUX_SEL_CCORE, 1, 1), }; -static struct samsung_gate_clock ccore_gate_clks[] __initdata = { +static const struct samsung_gate_clock ccore_gate_clks[] __initconst = { GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user", ENABLE_PCLK_CCORE, 8, 0, 0), }; -static struct samsung_cmu_info ccore_cmu_info __initdata = { +static const struct samsung_cmu_info ccore_cmu_info __initconst = { .mux_clks = ccore_mux_clks, .nr_mux_clks = ARRAY_SIZE(ccore_mux_clks), .gate_clks = ccore_gate_clks, @@ -633,20 +634,20 @@ CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore", PNAME(mout_aclk_peric0_66_user_p) = { "fin_pll", "aclk_peric0_66" }; PNAME(mout_sclk_uart0_user_p) = { "fin_pll", "sclk_uart0" }; -static unsigned long peric0_clk_regs[] __initdata = { +static const unsigned long peric0_clk_regs[] __initconst = { MUX_SEL_PERIC0, ENABLE_PCLK_PERIC0, ENABLE_SCLK_PERIC0, }; -static struct samsung_mux_clock peric0_mux_clks[] __initdata = { +static const struct samsung_mux_clock peric0_mux_clks[] __initconst = { MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_user_p, MUX_SEL_PERIC0, 0, 1), MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_user_p, MUX_SEL_PERIC0, 16, 1), }; -static struct samsung_gate_clock peric0_gate_clks[] __initdata = { +static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user", ENABLE_PCLK_PERIC0, 8, 0, 0), GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user", @@ -673,7 +674,7 @@ static struct samsung_gate_clock peric0_gate_clks[] __initdata = { GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0), }; -static struct samsung_cmu_info peric0_cmu_info __initdata = { +static const struct samsung_cmu_info peric0_cmu_info __initconst = { .mux_clks = peric0_mux_clks, .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks), .gate_clks = peric0_gate_clks, @@ -709,7 +710,7 @@ PNAME(mout_sclk_spi2_user_p) = { "fin_pll", "sclk_spi2" }; PNAME(mout_sclk_spi3_user_p) = { "fin_pll", "sclk_spi3" }; PNAME(mout_sclk_spi4_user_p) = { "fin_pll", "sclk_spi4" }; -static unsigned long peric1_clk_regs[] __initdata = { +static const unsigned long peric1_clk_regs[] __initconst = { MUX_SEL_PERIC10, MUX_SEL_PERIC11, MUX_SEL_PERIC12, @@ -717,7 +718,7 @@ static unsigned long peric1_clk_regs[] __initdata = { ENABLE_SCLK_PERIC10, }; -static struct samsung_mux_clock peric1_mux_clks[] __initdata = { +static const struct samsung_mux_clock peric1_mux_clks[] __initconst = { MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_user_p, MUX_SEL_PERIC10, 0, 1), @@ -739,7 +740,7 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = { MUX_SEL_PERIC11, 28, 1), }; -static struct samsung_gate_clock peric1_gate_clks[] __initdata = { +static const struct samsung_gate_clock peric1_gate_clks[] __initconst = { GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user", ENABLE_PCLK_PERIC1, 4, 0, 0), GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user", @@ -797,7 +798,7 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = { ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0), }; -static struct samsung_cmu_info peric1_cmu_info __initdata = { +static const struct samsung_cmu_info peric1_cmu_info __initconst = { .mux_clks = peric1_mux_clks, .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks), .gate_clks = peric1_gate_clks, @@ -825,7 +826,7 @@ CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1", /* List of parent clocks for Muxes in CMU_PERIS */ PNAME(mout_aclk_peris_66_user_p) = { "fin_pll", "aclk_peris_66" }; -static unsigned long peris_clk_regs[] __initdata = { +static const unsigned long peris_clk_regs[] __initconst = { MUX_SEL_PERIS, ENABLE_PCLK_PERIS, ENABLE_PCLK_PERIS_SECURE_CHIPID, @@ -833,12 +834,12 @@ static unsigned long peris_clk_regs[] __initdata = { ENABLE_SCLK_PERIS_SECURE_CHIPID, }; -static struct samsung_mux_clock peris_mux_clks[] __initdata = { +static const struct samsung_mux_clock peris_mux_clks[] __initconst = { MUX(0, "mout_aclk_peris_66_user", mout_aclk_peris_66_user_p, MUX_SEL_PERIS, 0, 1), }; -static struct samsung_gate_clock peris_gate_clks[] __initdata = { +static const struct samsung_gate_clock peris_gate_clks[] __initconst = { GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user", ENABLE_PCLK_PERIS, 6, 0, 0), GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user", @@ -852,7 +853,7 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = { GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0), }; -static struct samsung_cmu_info peris_cmu_info __initdata = { +static const struct samsung_cmu_info peris_cmu_info __initconst = { .mux_clks = peris_mux_clks, .nr_mux_clks = ARRAY_SIZE(peris_mux_clks), .gate_clks = peris_gate_clks, @@ -893,12 +894,12 @@ PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p) = { "fin_pll", "phyclk_usbdrd300_udrd30_pipe_pclk" }; /* fixed rate clocks used in the FSYS0 block */ -static struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = { +static const struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initconst = { FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL, 0, 60000000), FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL, 0, 125000000), }; -static unsigned long fsys0_clk_regs[] __initdata = { +static const unsigned long fsys0_clk_regs[] __initconst = { MUX_SEL_FSYS00, MUX_SEL_FSYS01, MUX_SEL_FSYS02, @@ -909,7 +910,7 @@ static unsigned long fsys0_clk_regs[] __initdata = { ENABLE_SCLK_FSYS04, }; -static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { +static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = { MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_user_p, MUX_SEL_FSYS00, 24, 1), @@ -926,7 +927,7 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { MUX_SEL_FSYS02, 28, 1), }; -static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { +static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = { GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user", ENABLE_ACLK_FSYS00, 3, 0, 0), GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user", @@ -960,7 +961,7 @@ static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { ENABLE_SCLK_FSYS04, 28, 0, 0), }; -static struct samsung_cmu_info fsys0_cmu_info __initdata = { +static const struct samsung_cmu_info fsys0_cmu_info __initconst = { .fixed_clks = fixed_rate_clks_fsys0, .nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys0), .mux_clks = fsys0_mux_clks, @@ -1005,7 +1006,7 @@ PNAME(mout_phyclk_ufs20_rx0_user_p) = { "fin_pll", "phyclk_ufs20_rx0_symbol" }; PNAME(mout_phyclk_ufs20_rx1_user_p) = { "fin_pll", "phyclk_ufs20_rx1_symbol" }; /* fixed rate clocks used in the FSYS1 block */ -static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = { +static const struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initconst = { FRATE(PHYCLK_UFS20_TX0_SYMBOL, "phyclk_ufs20_tx0_symbol", NULL, 0, 300000000), FRATE(PHYCLK_UFS20_RX0_SYMBOL, "phyclk_ufs20_rx0_symbol", NULL, @@ -1014,7 +1015,7 @@ static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = { 0, 300000000), }; -static unsigned long fsys1_clk_regs[] __initdata = { +static const unsigned long fsys1_clk_regs[] __initconst = { MUX_SEL_FSYS10, MUX_SEL_FSYS11, MUX_SEL_FSYS12, @@ -1026,7 +1027,7 @@ static unsigned long fsys1_clk_regs[] __initdata = { ENABLE_SCLK_FSYS13, }; -static struct samsung_mux_clock fsys1_mux_clks[] __initdata = { +static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = { MUX(MOUT_FSYS1_PHYCLK_SEL1, "mout_fsys1_phyclk_sel1", mout_fsys1_group_p, MUX_SEL_FSYS10, 16, 2), MUX(0, "mout_fsys1_phyclk_sel0", mout_fsys1_group_p, @@ -1049,12 +1050,12 @@ static struct samsung_mux_clock fsys1_mux_clks[] __initdata = { mout_phyclk_ufs20_tx0_user_p, MUX_SEL_FSYS12, 28, 1), }; -static struct samsung_div_clock fsys1_div_clks[] __initdata = { +static const struct samsung_div_clock fsys1_div_clks[] __initconst = { DIV(DOUT_PCLK_FSYS1, "dout_pclk_fsys1", "mout_aclk_fsys1_200_user", DIV_FSYS1, 0, 2), }; -static struct samsung_gate_clock fsys1_gate_clks[] __initdata = { +static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = { GATE(SCLK_UFSUNIPRO20_USER, "sclk_ufsunipro20_user", "mout_sclk_ufsunipro20_user", ENABLE_SCLK_FSYS11, 20, 0, 0), @@ -1089,7 +1090,7 @@ static struct samsung_gate_clock fsys1_gate_clks[] __initdata = { ENABLE_SCLK_FSYS13, 24, CLK_IGNORE_UNUSED, 0), }; -static struct samsung_cmu_info fsys1_cmu_info __initdata = { +static const struct samsung_cmu_info fsys1_cmu_info __initconst = { .fixed_clks = fixed_rate_clks_fsys1, .nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys1), .mux_clks = fsys1_mux_clks, @@ -1119,22 +1120,22 @@ CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1", /* List of parent clocks for Muxes in CMU_MSCL */ PNAME(mout_aclk_mscl_532_user_p) = { "fin_pll", "aclk_mscl_532" }; -static unsigned long mscl_clk_regs[] __initdata = { +static const unsigned long mscl_clk_regs[] __initconst = { MUX_SEL_MSCL, DIV_MSCL, ENABLE_ACLK_MSCL, ENABLE_PCLK_MSCL, }; -static struct samsung_mux_clock mscl_mux_clks[] __initdata = { +static const struct samsung_mux_clock mscl_mux_clks[] __initconst = { MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532", mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1), }; -static struct samsung_div_clock mscl_div_clks[] __initdata = { +static const struct samsung_div_clock mscl_div_clks[] __initconst = { DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532", DIV_MSCL, 0, 3), }; -static struct samsung_gate_clock mscl_gate_clks[] __initdata = { +static const struct samsung_gate_clock mscl_gate_clks[] __initconst = { GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532", ENABLE_ACLK_MSCL, 31, 0, 0), @@ -1204,7 +1205,7 @@ static struct samsung_gate_clock mscl_gate_clks[] __initdata = { ENABLE_PCLK_MSCL, 20, 0, 0), }; -static struct samsung_cmu_info mscl_cmu_info __initdata = { +static const struct samsung_cmu_info mscl_cmu_info __initconst = { .mux_clks = mscl_mux_clks, .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks), .div_clks = mscl_div_clks, @@ -1238,7 +1239,7 @@ CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl", PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" }; PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" }; -static unsigned long aud_clk_regs[] __initdata = { +static const unsigned long aud_clk_regs[] __initconst = { MUX_SEL_AUD, DIV_AUD0, DIV_AUD1, @@ -1247,13 +1248,13 @@ static unsigned long aud_clk_regs[] __initdata = { ENABLE_SCLK_AUD, }; -static struct samsung_mux_clock aud_mux_clks[] __initdata = { +static const struct samsung_mux_clock aud_mux_clks[] __initconst = { MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1), MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1), MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1), }; -static struct samsung_div_clock aud_div_clks[] __initdata = { +static const struct samsung_div_clock aud_div_clks[] __initconst = { DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4), DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4), DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4), @@ -1265,7 +1266,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = { DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4), }; -static struct samsung_gate_clock aud_gate_clks[] __initdata = { +static const struct samsung_gate_clock aud_gate_clks[] __initconst = { GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm", ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0), GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s", @@ -1293,7 +1294,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = { GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0), }; -static struct samsung_cmu_info aud_cmu_info __initdata = { +static const struct samsung_cmu_info aud_cmu_info __initconst = { .mux_clks = aud_mux_clks, .nr_mux_clks = ARRAY_SIZE(aud_mux_clks), .div_clks = aud_div_clks, diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index b7dd396100d8..48139bd510f1 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -79,7 +79,7 @@ static unsigned long samsung_pll2126_recalc_rate(struct clk_hw *hw, u32 pll_con, mdiv, pdiv, sdiv; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLL2126_MDIV_SHIFT) & PLL2126_MDIV_MASK; pdiv = (pll_con >> PLL2126_PDIV_SHIFT) & PLL2126_PDIV_MASK; sdiv = (pll_con >> PLL2126_SDIV_SHIFT) & PLL2126_SDIV_MASK; @@ -112,7 +112,7 @@ static unsigned long samsung_pll3000_recalc_rate(struct clk_hw *hw, u32 pll_con, mdiv, pdiv, sdiv; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLL3000_MDIV_SHIFT) & PLL3000_MDIV_MASK; pdiv = (pll_con >> PLL3000_PDIV_SHIFT) & PLL3000_PDIV_MASK; sdiv = (pll_con >> PLL3000_SDIV_SHIFT) & PLL3000_SDIV_MASK; @@ -149,7 +149,7 @@ static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, pll_con; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK; pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK; sdiv = (pll_con >> PLL35XX_SDIV_SHIFT) & PLL35XX_SDIV_MASK; @@ -186,19 +186,19 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); if (!(samsung_pll35xx_mp_change(rate, tmp))) { /* If only s change, change just s value only*/ tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT); tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT; - __raw_writel(tmp, pll->con_reg); + writel_relaxed(tmp, pll->con_reg); return 0; } /* Set PLL lock time. */ - __raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR, + writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ @@ -208,12 +208,12 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) | (rate->pdiv << PLL35XX_PDIV_SHIFT) | (rate->sdiv << PLL35XX_SDIV_SHIFT); - __raw_writel(tmp, pll->con_reg); + writel_relaxed(tmp, pll->con_reg); /* wait_lock_time */ do { cpu_relax(); - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); } while (!(tmp & (PLL35XX_LOCK_STAT_MASK << PLL35XX_LOCK_STAT_SHIFT))); return 0; @@ -253,8 +253,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, s16 kdiv; u64 fvco = parent_rate; - pll_con0 = __raw_readl(pll->con_reg); - pll_con1 = __raw_readl(pll->con_reg + 4); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con1 = readl_relaxed(pll->con_reg + 4); mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK; pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK; sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK; @@ -294,20 +294,20 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - pll_con0 = __raw_readl(pll->con_reg); - pll_con1 = __raw_readl(pll->con_reg + 4); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con1 = readl_relaxed(pll->con_reg + 4); if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) { /* If only s change, change just s value only*/ pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT); pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT); - __raw_writel(pll_con0, pll->con_reg); + writel_relaxed(pll_con0, pll->con_reg); return 0; } /* Set PLL lock time. */ - __raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg); + writel_relaxed(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) | @@ -316,16 +316,16 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate, pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) | (rate->pdiv << PLL36XX_PDIV_SHIFT) | (rate->sdiv << PLL36XX_SDIV_SHIFT); - __raw_writel(pll_con0, pll->con_reg); + writel_relaxed(pll_con0, pll->con_reg); pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT); pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT; - __raw_writel(pll_con1, pll->con_reg + 4); + writel_relaxed(pll_con1, pll->con_reg + 4); /* wait_lock_time */ do { cpu_relax(); - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); } while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT))); return 0; @@ -366,7 +366,7 @@ static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, pll_con; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK; pdiv = (pll_con >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK; sdiv = (pll_con >> PLL45XX_SDIV_SHIFT) & PLL45XX_SDIV_MASK; @@ -409,14 +409,14 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - con0 = __raw_readl(pll->con_reg); - con1 = __raw_readl(pll->con_reg + 0x4); + con0 = readl_relaxed(pll->con_reg); + con1 = readl_relaxed(pll->con_reg + 0x4); if (!(samsung_pll45xx_mp_change(con0, con1, rate))) { /* If only s change, change just s value only*/ con0 &= ~(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT); con0 |= rate->sdiv << PLL45XX_SDIV_SHIFT; - __raw_writel(con0, pll->con_reg); + writel_relaxed(con0, pll->con_reg); return 0; } @@ -430,29 +430,29 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate, (rate->sdiv << PLL45XX_SDIV_SHIFT); /* Set PLL AFC value. */ - con1 = __raw_readl(pll->con_reg + 0x4); + con1 = readl_relaxed(pll->con_reg + 0x4); con1 &= ~(PLL45XX_AFC_MASK << PLL45XX_AFC_SHIFT); con1 |= (rate->afc << PLL45XX_AFC_SHIFT); /* Set PLL lock time. */ switch (pll->type) { case pll_4502: - __raw_writel(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg); + writel_relaxed(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg); break; case pll_4508: - __raw_writel(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg); + writel_relaxed(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg); break; default: break; } /* Set new configuration. */ - __raw_writel(con1, pll->con_reg + 0x4); - __raw_writel(con0, pll->con_reg); + writel_relaxed(con1, pll->con_reg + 0x4); + writel_relaxed(con0, pll->con_reg); /* Wait for locking. */ start = ktime_get(); - while (!(__raw_readl(pll->con_reg) & PLL45XX_LOCKED)) { + while (!(readl_relaxed(pll->con_reg) & PLL45XX_LOCKED)) { ktime_t delta = ktime_sub(ktime_get(), start); if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) { @@ -513,8 +513,8 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift; u64 fvco = parent_rate; - pll_con0 = __raw_readl(pll->con_reg); - pll_con1 = __raw_readl(pll->con_reg + 4); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con1 = readl_relaxed(pll->con_reg + 4); mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ? PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK); pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK; @@ -560,14 +560,14 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - con0 = __raw_readl(pll->con_reg); - con1 = __raw_readl(pll->con_reg + 0x4); + con0 = readl_relaxed(pll->con_reg); + con1 = readl_relaxed(pll->con_reg + 0x4); if (!(samsung_pll46xx_mpk_change(con0, con1, rate))) { /* If only s change, change just s value only*/ con0 &= ~(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT); con0 |= rate->sdiv << PLL46XX_SDIV_SHIFT; - __raw_writel(con0, pll->con_reg); + writel_relaxed(con0, pll->con_reg); return 0; } @@ -596,7 +596,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate, (rate->sdiv << PLL46XX_SDIV_SHIFT); /* Set PLL K, MFR and MRR values. */ - con1 = __raw_readl(pll->con_reg + 0x4); + con1 = readl_relaxed(pll->con_reg + 0x4); con1 &= ~((PLL46XX_KDIV_MASK << PLL46XX_KDIV_SHIFT) | (PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT) | (PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT)); @@ -605,13 +605,13 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate, (rate->mrr << PLL46XX_MRR_SHIFT); /* Write configuration to PLL */ - __raw_writel(lock, pll->lock_reg); - __raw_writel(con0, pll->con_reg); - __raw_writel(con1, pll->con_reg + 0x4); + writel_relaxed(lock, pll->lock_reg); + writel_relaxed(con0, pll->con_reg); + writel_relaxed(con1, pll->con_reg + 0x4); /* Wait for locking. */ start = ktime_get(); - while (!(__raw_readl(pll->con_reg) & PLL46XX_LOCKED)) { + while (!(readl_relaxed(pll->con_reg) & PLL46XX_LOCKED)) { ktime_t delta = ktime_sub(ktime_get(), start); if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) { @@ -656,7 +656,7 @@ static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, pll_con; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); if (pll->type == pll_6552_s3c2416) { mdiv = (pll_con >> PLL6552_MDIV_SHIFT_2416) & PLL6552_MDIV_MASK; pdiv = (pll_con >> PLL6552_PDIV_SHIFT_2416) & PLL6552_PDIV_MASK; @@ -696,8 +696,8 @@ static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1; u64 fvco = parent_rate; - pll_con0 = __raw_readl(pll->con_reg); - pll_con1 = __raw_readl(pll->con_reg + 0x4); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con1 = readl_relaxed(pll->con_reg + 0x4); mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK; pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK; sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK; @@ -734,7 +734,7 @@ static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw, u32 pll_con, mdiv, pdiv, sdiv; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK; pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK; sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK; @@ -752,7 +752,7 @@ static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw, u32 pll_con, mdiv, pdiv, sdiv; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK; pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK; sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK; @@ -778,7 +778,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); /* Change PLL PMS values */ tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) | @@ -787,7 +787,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate, tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) | (rate->pdiv << PLLS3C2410_PDIV_SHIFT) | (rate->sdiv << PLLS3C2410_SDIV_SHIFT); - __raw_writel(tmp, pll->con_reg); + writel_relaxed(tmp, pll->con_reg); /* Time to settle according to the manual */ udelay(300); @@ -798,7 +798,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate, static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable) { struct samsung_clk_pll *pll = to_clk_pll(hw); - u32 pll_en = __raw_readl(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); + u32 pll_en = readl_relaxed(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); u32 pll_en_orig = pll_en; if (enable) @@ -806,7 +806,7 @@ static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable) else pll_en |= BIT(bit); - __raw_writel(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); + writel_relaxed(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); /* if we started the UPLL, then allow to settle */ if (enable && (pll_en_orig & BIT(bit))) @@ -905,7 +905,7 @@ static unsigned long samsung_pll2550x_recalc_rate(struct clk_hw *hw, u32 r, p, m, s, pll_stat; u64 fvco = parent_rate; - pll_stat = __raw_readl(pll->reg_base + pll->offset * 3); + pll_stat = readl_relaxed(pll->reg_base + pll->offset * 3); r = (pll_stat >> PLL2550X_R_SHIFT) & PLL2550X_R_MASK; if (!r) return 0; @@ -983,7 +983,7 @@ static unsigned long samsung_pll2550xx_recalc_rate(struct clk_hw *hw, u32 mdiv, pdiv, sdiv, pll_con; u64 fvco = parent_rate; - pll_con = __raw_readl(pll->con_reg); + pll_con = readl_relaxed(pll->con_reg); mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK; pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK; sdiv = (pll_con >> PLL2550XX_S_SHIFT) & PLL2550XX_S_MASK; @@ -1019,19 +1019,19 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); if (!(samsung_pll2550xx_mp_change(rate->mdiv, rate->pdiv, tmp))) { /* If only s change, change just s value only*/ tmp &= ~(PLL2550XX_S_MASK << PLL2550XX_S_SHIFT); tmp |= rate->sdiv << PLL2550XX_S_SHIFT; - __raw_writel(tmp, pll->con_reg); + writel_relaxed(tmp, pll->con_reg); return 0; } /* Set PLL lock time. */ - __raw_writel(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg); + writel_relaxed(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ tmp &= ~((PLL2550XX_M_MASK << PLL2550XX_M_SHIFT) | @@ -1040,12 +1040,12 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate, tmp |= (rate->mdiv << PLL2550XX_M_SHIFT) | (rate->pdiv << PLL2550XX_P_SHIFT) | (rate->sdiv << PLL2550XX_S_SHIFT); - __raw_writel(tmp, pll->con_reg); + writel_relaxed(tmp, pll->con_reg); /* wait_lock_time */ do { cpu_relax(); - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); } while (!(tmp & (PLL2550XX_LOCK_STAT_MASK << PLL2550XX_LOCK_STAT_SHIFT))); @@ -1089,8 +1089,8 @@ static unsigned long samsung_pll2650xx_recalc_rate(struct clk_hw *hw, s16 kdiv; u64 fvco = parent_rate; - pll_con0 = __raw_readl(pll->con_reg); - pll_con2 = __raw_readl(pll->con_reg + 8); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con2 = readl_relaxed(pll->con_reg + 8); mdiv = (pll_con0 >> PLL2650XX_MDIV_SHIFT) & PLL2650XX_MDIV_MASK; pdiv = (pll_con0 >> PLL2650XX_PDIV_SHIFT) & PLL2650XX_PDIV_MASK; sdiv = (pll_con0 >> PLL2650XX_SDIV_SHIFT) & PLL2650XX_SDIV_MASK; @@ -1117,8 +1117,8 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate, return -EINVAL; } - pll_con0 = __raw_readl(pll->con_reg); - pll_con2 = __raw_readl(pll->con_reg + 8); + pll_con0 = readl_relaxed(pll->con_reg); + pll_con2 = readl_relaxed(pll->con_reg + 8); /* Change PLL PMS values */ pll_con0 &= ~(PLL2650XX_MDIV_MASK << PLL2650XX_MDIV_SHIFT | @@ -1135,13 +1135,13 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate, << PLL2650XX_KDIV_SHIFT; /* Set PLL lock time. */ - __raw_writel(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg); + writel_relaxed(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg); - __raw_writel(pll_con0, pll->con_reg); - __raw_writel(pll_con2, pll->con_reg + 8); + writel_relaxed(pll_con0, pll->con_reg); + writel_relaxed(pll_con2, pll->con_reg + 8); do { - tmp = __raw_readl(pll->con_reg); + tmp = readl_relaxed(pll->con_reg); } while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT))); return 0; diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c index ec6fb14d951c..ae9a595c72d0 100644 --- a/drivers/clk/samsung/clk-s3c2410-dclk.c +++ b/drivers/clk/samsung/clk-s3c2410-dclk.c @@ -428,8 +428,9 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids); static struct platform_driver s3c24xx_dclk_driver = { .driver = { - .name = "s3c24xx-dclk", - .pm = &s3c24xx_dclk_pm_ops, + .name = "s3c24xx-dclk", + .pm = &s3c24xx_dclk_pm_ops, + .suppress_bind_attrs = true, }, .probe = s3c24xx_dclk_probe, .remove = s3c24xx_dclk_remove, diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index d7b011c1fcf8..d7a1e772d95a 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -374,8 +374,6 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, } ctx = samsung_clk_init(np, reg_base, NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); /* Register external clocks only in non-dt cases */ if (!np) diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c index effe3736ec6b..ec873ee15d37 100644 --- a/drivers/clk/samsung/clk-s3c2412.c +++ b/drivers/clk/samsung/clk-s3c2412.c @@ -265,8 +265,6 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f, } ctx = samsung_clk_init(np, reg_base, NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); /* Register external clocks only in non-dt cases */ if (!np) diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index 37562783b25e..5e24a17e10e6 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -400,8 +400,6 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f, } ctx = samsung_clk_init(np, reg_base, NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); /* Register external clocks only in non-dt cases */ if (!np) diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 60aa775bd374..a48bd5f17330 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -471,8 +471,6 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, } ctx = samsung_clk_init(np, reg_base, NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); /* Register external clocks. */ if (!np) diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c index eefb84b22566..c66ed2d1450e 100644 --- a/drivers/clk/samsung/clk-s5pv210-audss.c +++ b/drivers/clk/samsung/clk-s5pv210-audss.c @@ -18,7 +18,7 @@ #include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <dt-bindings/clock/s5pv210-audss.h> @@ -194,20 +194,6 @@ unregister: return ret; } -static int s5pv210_audss_clk_remove(struct platform_device *pdev) -{ - int i; - - of_clk_del_provider(pdev->dev.of_node); - - for (i = 0; i < clk_data.clk_num; i++) { - if (!IS_ERR(clk_table[i])) - clk_unregister(clk_table[i]); - } - - return 0; -} - static const struct of_device_id s5pv210_audss_clk_of_match[] = { { .compatible = "samsung,s5pv210-audss-clock", }, {}, @@ -216,10 +202,10 @@ static const struct of_device_id s5pv210_audss_clk_of_match[] = { static struct platform_driver s5pv210_audss_clk_driver = { .driver = { .name = "s5pv210-audss-clk", + .suppress_bind_attrs = true, .of_match_table = s5pv210_audss_clk_of_match, }, .probe = s5pv210_audss_clk_probe, - .remove = s5pv210_audss_clk_remove, }; static int __init s5pv210_audss_clk_init(void) @@ -227,14 +213,3 @@ static int __init s5pv210_audss_clk_init(void) return platform_driver_register(&s5pv210_audss_clk_driver); } core_initcall(s5pv210_audss_clk_init); - -static void __exit s5pv210_audss_clk_exit(void) -{ - platform_driver_unregister(&s5pv210_audss_clk_driver); -} -module_exit(s5pv210_audss_clk_exit); - -MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>"); -MODULE_DESCRIPTION("S5PV210 Audio Subsystem Clock Controller"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:s5pv210-audss-clk"); diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c index 52302262045d..fd2725710a6f 100644 --- a/drivers/clk/samsung/clk-s5pv210.c +++ b/drivers/clk/samsung/clk-s5pv210.c @@ -784,8 +784,6 @@ static void __init __s5pv210_clk_init(struct device_node *np, struct samsung_clk_provider *ctx; ctx = samsung_clk_init(np, reg_base, NR_CLKS); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); samsung_clk_register_mux(ctx, early_mux_clks, ARRAY_SIZE(early_mux_clks)); diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index f38a6c49f744..b7d87d6db9dc 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -346,9 +346,9 @@ static struct syscore_ops samsung_clk_syscore_ops = { .resume = samsung_clk_resume, }; -static void samsung_clk_sleep_init(void __iomem *reg_base, - const unsigned long *rdump, - unsigned long nr_rdump) +void samsung_clk_sleep_init(void __iomem *reg_base, + const unsigned long *rdump, + unsigned long nr_rdump) { struct samsung_clock_reg_cache *reg_cache; @@ -370,9 +370,9 @@ static void samsung_clk_sleep_init(void __iomem *reg_base, } #else -static void samsung_clk_sleep_init(void __iomem *reg_base, - const unsigned long *rdump, - unsigned long nr_rdump) {} +void samsung_clk_sleep_init(void __iomem *reg_base, + const unsigned long *rdump, + unsigned long nr_rdump) {} #endif /* @@ -381,7 +381,7 @@ static void samsung_clk_sleep_init(void __iomem *reg_base, */ struct samsung_clk_provider * __init samsung_cmu_register_one( struct device_node *np, - struct samsung_cmu_info *cmu) + const struct samsung_cmu_info *cmu) { void __iomem *reg_base; struct samsung_clk_provider *ctx; diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index aa872d2c5105..da3bdebabf1e 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -261,7 +261,7 @@ struct samsung_gate_clock { #define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \ __GATE(_id, dname, cname, pname, o, b, f, gf, a) -#define PNAME(x) static const char *x[] __initdata +#define PNAME(x) static const char * const x[] __initconst /** * struct samsung_clk_reg_dump: register dump of clock controller registers. @@ -330,28 +330,28 @@ struct samsung_clock_reg_cache { struct samsung_cmu_info { /* list of pll clocks and respective count */ - struct samsung_pll_clock *pll_clks; + const struct samsung_pll_clock *pll_clks; unsigned int nr_pll_clks; /* list of mux clocks and respective count */ - struct samsung_mux_clock *mux_clks; + const struct samsung_mux_clock *mux_clks; unsigned int nr_mux_clks; /* list of div clocks and respective count */ - struct samsung_div_clock *div_clks; + const struct samsung_div_clock *div_clks; unsigned int nr_div_clks; /* list of gate clocks and respective count */ - struct samsung_gate_clock *gate_clks; + const struct samsung_gate_clock *gate_clks; unsigned int nr_gate_clks; /* list of fixed clocks and respective count */ - struct samsung_fixed_rate_clock *fixed_clks; + const struct samsung_fixed_rate_clock *fixed_clks; unsigned int nr_fixed_clks; /* list of fixed factor clocks and respective count */ - struct samsung_fixed_factor_clock *fixed_factor_clks; + const struct samsung_fixed_factor_clock *fixed_factor_clks; unsigned int nr_fixed_factor_clks; /* total number of clocks with IDs assigned*/ unsigned int nr_clk_ids; /* list and number of clocks registers */ - unsigned long *clk_regs; + const unsigned long *clk_regs; unsigned int nr_clk_regs; }; @@ -395,10 +395,14 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, extern struct samsung_clk_provider __init *samsung_cmu_register_one( struct device_node *, - struct samsung_cmu_info *); + const struct samsung_cmu_info *); extern unsigned long _get_rate(const char *clk_name); +extern void samsung_clk_sleep_init(void __iomem *reg_base, + const unsigned long *rdump, + unsigned long nr_rdump); + extern void samsung_clk_save(void __iomem *base, struct samsung_clk_reg_dump *rd, unsigned int num_regs); diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 627267c7ec5c..546bd79c8e3a 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -267,7 +267,6 @@ static void __init st_of_flexgen_setup(struct device_node *np) const char **parents; int num_parents, i; spinlock_t *rlock = NULL; - unsigned long flex_flags = 0; int ret; pnode = of_get_parent(np); @@ -308,12 +307,15 @@ static void __init st_of_flexgen_setup(struct device_node *np) for (i = 0; i < clk_data->clk_num; i++) { struct clk *clk; const char *clk_name; + unsigned long flex_flags = 0; if (of_property_read_string_index(np, "clock-output-names", i, &clk_name)) { break; } + of_clk_detect_critical(np, i, &flex_flags); + /* * If we read an empty clock name then the output is unused */ diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index dec4eaaecc00..09afeb85109c 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -1027,7 +1027,7 @@ static const struct clk_ops st_quadfs_ops = { static struct clk * __init st_clk_register_quadfs_fsynth( const char *name, const char *parent_name, struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, - spinlock_t *lock) + unsigned long flags, spinlock_t *lock) { struct st_clk_quadfs_fsynth *fs; struct clk *clk; @@ -1045,7 +1045,7 @@ static struct clk * __init st_clk_register_quadfs_fsynth( init.name = name; init.ops = &st_quadfs_ops; - init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC; + init.flags = flags | CLK_GET_RATE_NOCACHE | CLK_IS_BASIC; init.parent_names = &parent_name; init.num_parents = 1; @@ -1115,6 +1115,7 @@ static void __init st_of_create_quadfs_fsynths( for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { struct clk *clk; const char *clk_name; + unsigned long flags = 0; if (of_property_read_string_index(np, "clock-output-names", fschan, &clk_name)) { @@ -1127,8 +1128,11 @@ static void __init st_of_create_quadfs_fsynths( if (*clk_name == '\0') continue; + of_clk_detect_critical(np, fschan, &flags); + clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, - quadfs, reg, fschan, lock); + quadfs, reg, fschan, + flags, lock); /* * If there was an error registering this clock output, clean diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 38f6f3a9098e..0b5990e82e0d 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -840,7 +840,7 @@ static const struct clk_ops stm_pll4600c28_ops = { static struct clk * __init clkgen_pll_register(const char *parent_name, struct clkgen_pll_data *pll_data, - void __iomem *reg, + void __iomem *reg, unsigned long pll_flags, const char *clk_name, spinlock_t *lock) { struct clkgen_pll *pll; @@ -854,7 +854,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name, init.name = clk_name; init.ops = pll_data->ops; - init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; + init.flags = pll_flags | CLK_IS_BASIC | CLK_GET_RATE_NOCACHE; init.parent_names = &parent_name; init.num_parents = 1; @@ -948,7 +948,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np) */ clk_data->clks[0] = clkgen_pll_register(parent_name, (struct clkgen_pll_data *) &st_pll1600c65_ax, - reg + CLKGENAx_PLL0_OFFSET, clk_name, NULL); + reg + CLKGENAx_PLL0_OFFSET, 0, clk_name, NULL); if (IS_ERR(clk_data->clks[0])) goto err; @@ -977,7 +977,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np) */ clk_data->clks[2] = clkgen_pll_register(parent_name, (struct clkgen_pll_data *) &st_pll800c65_ax, - reg + CLKGENAx_PLL1_OFFSET, clk_name, NULL); + reg + CLKGENAx_PLL1_OFFSET, 0, clk_name, NULL); if (IS_ERR(clk_data->clks[2])) goto err; @@ -995,7 +995,7 @@ CLK_OF_DECLARE(clkgena_c65_plls, static struct clk * __init clkgen_odf_register(const char *parent_name, void __iomem *reg, struct clkgen_pll_data *pll_data, - int odf, + unsigned long pll_flags, int odf, spinlock_t *odf_lock, const char *odf_name) { @@ -1004,7 +1004,7 @@ static struct clk * __init clkgen_odf_register(const char *parent_name, struct clk_gate *gate; struct clk_divider *div; - flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT; + flags = pll_flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT; gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) @@ -1099,6 +1099,7 @@ static void __init clkgen_c32_pll_setup(struct device_node *np) int num_odfs, odf; struct clk_onecell_data *clk_data; struct clkgen_pll_data *data; + unsigned long pll_flags = 0; match = of_match_node(c32_pll_of_match, np); if (!match) { @@ -1116,8 +1117,10 @@ static void __init clkgen_c32_pll_setup(struct device_node *np) if (!pll_base) return; - clk = clkgen_pll_register(parent_name, data, pll_base, np->name, - data->lock); + of_clk_detect_critical(np, 0, &pll_flags); + + clk = clkgen_pll_register(parent_name, data, pll_base, pll_flags, + np->name, data->lock); if (IS_ERR(clk)) return; @@ -1139,12 +1142,15 @@ static void __init clkgen_c32_pll_setup(struct device_node *np) for (odf = 0; odf < num_odfs; odf++) { struct clk *clk; const char *clk_name; + unsigned long odf_flags = 0; if (of_property_read_string_index(np, "clock-output-names", odf, &clk_name)) return; - clk = clkgen_odf_register(pll_name, pll_base, data, + of_clk_detect_critical(np, odf, &odf_flags); + + clk = clkgen_odf_register(pll_name, pll_base, data, odf_flags, odf, &clkgena_c32_odf_lock, clk_name); if (IS_ERR(clk)) goto err; @@ -1206,7 +1212,8 @@ static void __init clkgengpu_c32_pll_setup(struct device_node *np) /* * PLL 1200MHz output */ - clk = clkgen_pll_register(parent_name, data, reg, clk_name, data->lock); + clk = clkgen_pll_register(parent_name, data, reg, + 0, clk_name, data->lock); if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig new file mode 100644 index 000000000000..2afcbd39e41e --- /dev/null +++ b/drivers/clk/sunxi-ng/Kconfig @@ -0,0 +1,65 @@ +config SUNXI_CCU + bool "Clock support for Allwinner SoCs" + default ARCH_SUNXI + +if SUNXI_CCU + +# Base clock types + +config SUNXI_CCU_DIV + bool + select SUNXI_CCU_MUX + +config SUNXI_CCU_FRAC + bool + +config SUNXI_CCU_GATE + bool + +config SUNXI_CCU_MUX + bool + +config SUNXI_CCU_PHASE + bool + +# Multi-factor clocks + +config SUNXI_CCU_NK + bool + select SUNXI_CCU_GATE + +config SUNXI_CCU_NKM + bool + select RATIONAL + select SUNXI_CCU_GATE + +config SUNXI_CCU_NKMP + bool + select RATIONAL + select SUNXI_CCU_GATE + +config SUNXI_CCU_NM + bool + select RATIONAL + select SUNXI_CCU_FRAC + select SUNXI_CCU_GATE + +config SUNXI_CCU_MP + bool + select SUNXI_CCU_GATE + select SUNXI_CCU_MUX + +# SoC Drivers + +config SUN8I_H3_CCU + bool "Support for the Allwinner H3 CCU" + select SUNXI_CCU_DIV + select SUNXI_CCU_NK + select SUNXI_CCU_NKM + select SUNXI_CCU_NKMP + select SUNXI_CCU_NM + select SUNXI_CCU_MP + select SUNXI_CCU_PHASE + default MACH_SUN8I + +endif diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile new file mode 100644 index 000000000000..633ce642ffae --- /dev/null +++ b/drivers/clk/sunxi-ng/Makefile @@ -0,0 +1,20 @@ +# Common objects +obj-$(CONFIG_SUNXI_CCU) += ccu_common.o +obj-$(CONFIG_SUNXI_CCU) += ccu_reset.o + +# Base clock types +obj-$(CONFIG_SUNXI_CCU_DIV) += ccu_div.o +obj-$(CONFIG_SUNXI_CCU_FRAC) += ccu_frac.o +obj-$(CONFIG_SUNXI_CCU_GATE) += ccu_gate.o +obj-$(CONFIG_SUNXI_CCU_MUX) += ccu_mux.o +obj-$(CONFIG_SUNXI_CCU_PHASE) += ccu_phase.o + +# Multi-factor clocks +obj-$(CONFIG_SUNXI_CCU_NK) += ccu_nk.o +obj-$(CONFIG_SUNXI_CCU_NKM) += ccu_nkm.o +obj-$(CONFIG_SUNXI_CCU_NKMP) += ccu_nkmp.o +obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o +obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o + +# SoC support +obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c new file mode 100644 index 000000000000..9af359544110 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -0,0 +1,826 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/of_address.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_mult.h" +#include "ccu_nk.h" +#include "ccu_nkm.h" +#include "ccu_nkmp.h" +#include "ccu_nm.h" +#include "ccu_phase.h" + +#include "ccu-sun8i-h3.h" + +static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpux_clk, "pll-cpux", + "osc24M", 0x000, + 8, 5, /* N */ + 4, 2, /* K */ + 0, 2, /* M */ + 16, 2, /* P */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +/* + * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from + * the base (2x, 4x and 8x), and one variable divider (the one true + * pll audio). + * + * We don't have any need for the variable divider for now, so we just + * hardcode it to match with the clock names + */ +#define SUN8I_H3_PLL_AUDIO_REG 0x008 + +static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", + "osc24M", 0x008, + 8, 7, /* N */ + 0, 5, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video", + "osc24M", 0x0010, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", + "osc24M", 0x0018, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr", + "osc24M", 0x020, + 8, 5, /* N */ + 4, 2, /* K */ + 0, 2, /* M */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0", + "osc24M", 0x028, + 8, 5, /* N */ + 4, 2, /* K */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", + "osc24M", 0x0038, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1", + "osc24M", 0x044, + 8, 5, /* N */ + 4, 2, /* K */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de", + "osc24M", 0x0048, + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + 0); + +static const char * const cpux_parents[] = { "osc32k", "osc24M", + "pll-cpux" , "pll-cpux" }; +static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents, + 0x050, 16, 2, CLK_IS_CRITICAL); + +static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0); + +static const char * const ahb1_parents[] = { "osc32k", "osc24M", + "axi" , "pll-periph0" }; +static struct ccu_div ahb1_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + + .mux = { + .shift = 12, + .width = 2, + + .variable_prediv = { + .index = 3, + .shift = 6, + .width = 2, + }, + }, + + .common = { + .reg = 0x054, + .features = CCU_FEATURE_VARIABLE_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ahb1", + ahb1_parents, + &ccu_div_ops, + 0), + }, +}; + +static struct clk_div_table apb1_div_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { /* Sentinel */ }, +}; +static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1", + 0x054, 8, 2, apb1_div_table, 0); + +static const char * const apb2_parents[] = { "osc32k", "osc24M", + "pll-periph0" , "pll-periph0" }; +static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058, + 0, 5, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + 0); + +static const char * const ahb2_parents[] = { "ahb1" , "pll-periph0" }; +static struct ccu_mux ahb2_clk = { + .mux = { + .shift = 0, + .width = 1, + + .fixed_prediv = { + .index = 1, + .div = 2, + }, + }, + + .common = { + .reg = 0x05c, + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ahb2", + ahb2_parents, + &ccu_mux_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1", + 0x060, BIT(5), 0); +static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1", + 0x060, BIT(6), 0); +static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1", + 0x060, BIT(8), 0); +static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1", + 0x060, BIT(9), 0); +static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1", + 0x060, BIT(10), 0); +static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1", + 0x060, BIT(13), 0); +static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1", + 0x060, BIT(14), 0); +static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2", + 0x060, BIT(17), 0); +static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1", + 0x060, BIT(18), 0); +static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1", + 0x060, BIT(19), 0); +static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1", + 0x060, BIT(20), 0); +static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1", + 0x060, BIT(21), 0); +static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1", + 0x060, BIT(23), 0); +static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1", + 0x060, BIT(24), 0); +static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2", + 0x060, BIT(25), 0); +static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb2", + 0x060, BIT(26), 0); +static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb2", + 0x060, BIT(27), 0); +static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1", + 0x060, BIT(28), 0); +static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2", + 0x060, BIT(29), 0); +static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb2", + 0x060, BIT(30), 0); +static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb2", + 0x060, BIT(31), 0); + +static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1", + 0x064, BIT(0), 0); +static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1", + 0x064, BIT(3), 0); +static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1", + 0x064, BIT(4), 0); +static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1", + 0x064, BIT(5), 0); +static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1", + 0x064, BIT(8), 0); +static SUNXI_CCU_GATE(bus_tve_clk, "bus-tve", "ahb1", + 0x064, BIT(9), 0); +static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1", + 0x064, BIT(11), 0); +static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1", + 0x064, BIT(12), 0); +static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1", + 0x064, BIT(20), 0); +static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1", + 0x064, BIT(21), 0); +static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1", + 0x064, BIT(22), 0); + +static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1", + 0x068, BIT(0), 0); +static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", + 0x068, BIT(1), 0); +static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1", + 0x068, BIT(5), 0); +static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", + 0x068, BIT(8), 0); +static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", + 0x068, BIT(12), 0); +static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", + 0x068, BIT(13), 0); +static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", + 0x068, BIT(14), 0); + +static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", + 0x06c, BIT(0), 0); +static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", + 0x06c, BIT(1), 0); +static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", + 0x06c, BIT(2), 0); +static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", + 0x06c, BIT(16), 0); +static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", + 0x06c, BIT(17), 0); +static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", + 0x06c, BIT(18), 0); +static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", + 0x06c, BIT(19), 0); +static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2", + 0x06c, BIT(20), 0); + +static SUNXI_CCU_GATE(bus_ephy_clk, "bus-ephy", "ahb1", + 0x070, BIT(0), 0); +static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1", + 0x070, BIT(7), 0); + +static struct clk_div_table ths_div_table[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, +}; +static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M", + 0x074, 0, 2, ths_div_table, BIT(31), 0); + +static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0", + "pll-periph1" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0", + 0x088, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0", + 0x088, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1", + 0x08c, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1", + 0x08c, 8, 3, 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2", + 0x090, 20, 3, 0); +static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2", + 0x090, 8, 3, 0); + +static const char * const ts_parents[] = { "osc24M", "pll-periph0", }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mod0_default_parents, 0x09c, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", + "pll-audio-2x", "pll-audio" }; +static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, + 0x0b0, 16, 2, BIT(31), 0); + +static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, + 0x0b4, 16, 2, BIT(31), 0); + +static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents, + 0x0b8, 16, 2, BIT(31), 0); + +static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio", + 0x0c0, 0, 4, BIT(31), 0); + +static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", + 0x0cc, BIT(8), 0); +static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", + 0x0cc, BIT(9), 0); +static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M", + 0x0cc, BIT(10), 0); +static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc24M", + 0x0cc, BIT(11), 0); +static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M", + 0x0cc, BIT(16), 0); +static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc24M", + 0x0cc, BIT(17), 0); +static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M", + 0x0cc, BIT(18), 0); +static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc24M", + 0x0cc, BIT(19), 0); + +static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" }; +static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents, + 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL); + +static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram", + 0x100, BIT(0), 0); +static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram", + 0x100, BIT(1), 0); +static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram", + 0x100, BIT(2), 0); +static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram", + 0x100, BIT(3), 0); + +static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" }; +static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, + 0x104, 0, 4, 24, 3, BIT(31), 0); + +static const char * const tcon_parents[] = { "pll-video" }; +static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents, + 0x118, 0, 4, 24, 3, BIT(31), 0); + +static const char * const tve_parents[] = { "pll-de", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(tve_clk, "tve", tve_parents, + 0x120, 0, 4, 24, 3, BIT(31), 0); + +static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents, + 0x124, 0, 4, 24, 3, BIT(31), 0); + +static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", + 0x130, BIT(31), 0); + +static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents, + 0x134, 16, 4, 24, 3, BIT(31), 0); + +static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph0" }; +static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, + 0x134, 0, 5, 8, 3, BIT(15), 0); + +static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", + 0x13c, 16, 3, BIT(31), 0); + +static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", + 0x140, BIT(31), 0); +static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", + 0x144, BIT(31), 0); + +static const char * const hdmi_parents[] = { "pll-video" }; +static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, + 0x150, 0, 4, 24, 2, BIT(31), 0); + +static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", + 0x154, BIT(31), 0); + +static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x", "pll-ddr" }; +static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, + 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL); + +static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", + 0x1a0, 0, 3, BIT(31), 0); + +static struct ccu_common *sun8i_h3_ccu_clks[] = { + &pll_cpux_clk.common, + &pll_audio_base_clk.common, + &pll_video_clk.common, + &pll_ve_clk.common, + &pll_ddr_clk.common, + &pll_periph0_clk.common, + &pll_gpu_clk.common, + &pll_periph1_clk.common, + &pll_de_clk.common, + &cpux_clk.common, + &axi_clk.common, + &ahb1_clk.common, + &apb1_clk.common, + &apb2_clk.common, + &ahb2_clk.common, + &bus_ce_clk.common, + &bus_dma_clk.common, + &bus_mmc0_clk.common, + &bus_mmc1_clk.common, + &bus_mmc2_clk.common, + &bus_nand_clk.common, + &bus_dram_clk.common, + &bus_emac_clk.common, + &bus_ts_clk.common, + &bus_hstimer_clk.common, + &bus_spi0_clk.common, + &bus_spi1_clk.common, + &bus_otg_clk.common, + &bus_ehci0_clk.common, + &bus_ehci1_clk.common, + &bus_ehci2_clk.common, + &bus_ehci3_clk.common, + &bus_ohci0_clk.common, + &bus_ohci1_clk.common, + &bus_ohci2_clk.common, + &bus_ohci3_clk.common, + &bus_ve_clk.common, + &bus_tcon0_clk.common, + &bus_tcon1_clk.common, + &bus_deinterlace_clk.common, + &bus_csi_clk.common, + &bus_tve_clk.common, + &bus_hdmi_clk.common, + &bus_de_clk.common, + &bus_gpu_clk.common, + &bus_msgbox_clk.common, + &bus_spinlock_clk.common, + &bus_codec_clk.common, + &bus_spdif_clk.common, + &bus_pio_clk.common, + &bus_ths_clk.common, + &bus_i2s0_clk.common, + &bus_i2s1_clk.common, + &bus_i2s2_clk.common, + &bus_i2c0_clk.common, + &bus_i2c1_clk.common, + &bus_i2c2_clk.common, + &bus_uart0_clk.common, + &bus_uart1_clk.common, + &bus_uart2_clk.common, + &bus_uart3_clk.common, + &bus_scr_clk.common, + &bus_ephy_clk.common, + &bus_dbg_clk.common, + &ths_clk.common, + &nand_clk.common, + &mmc0_clk.common, + &mmc0_sample_clk.common, + &mmc0_output_clk.common, + &mmc1_clk.common, + &mmc1_sample_clk.common, + &mmc1_output_clk.common, + &mmc2_clk.common, + &mmc2_sample_clk.common, + &mmc2_output_clk.common, + &ts_clk.common, + &ce_clk.common, + &spi0_clk.common, + &spi1_clk.common, + &i2s0_clk.common, + &i2s1_clk.common, + &i2s2_clk.common, + &spdif_clk.common, + &usb_phy0_clk.common, + &usb_phy1_clk.common, + &usb_phy2_clk.common, + &usb_phy3_clk.common, + &usb_ohci0_clk.common, + &usb_ohci1_clk.common, + &usb_ohci2_clk.common, + &usb_ohci3_clk.common, + &dram_clk.common, + &dram_ve_clk.common, + &dram_csi_clk.common, + &dram_deinterlace_clk.common, + &dram_ts_clk.common, + &de_clk.common, + &tcon_clk.common, + &tve_clk.common, + &deinterlace_clk.common, + &csi_misc_clk.common, + &csi_sclk_clk.common, + &csi_mclk_clk.common, + &ve_clk.common, + &ac_dig_clk.common, + &avs_clk.common, + &hdmi_clk.common, + &hdmi_ddc_clk.common, + &mbus_clk.common, + &gpu_clk.common, +}; + +/* We hardcode the divider to 4 for now */ +static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", + "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", + "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", + "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", + "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", + "pll-periph0", 1, 2, 0); + +static struct clk_hw_onecell_data sun8i_h3_hw_clks = { + .hws = { + [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw, + [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw, + [CLK_PLL_AUDIO] = &pll_audio_clk.hw, + [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw, + [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw, + [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw, + [CLK_PLL_VIDEO] = &pll_video_clk.common.hw, + [CLK_PLL_VE] = &pll_ve_clk.common.hw, + [CLK_PLL_DDR] = &pll_ddr_clk.common.hw, + [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, + [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, + [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, + [CLK_PLL_DE] = &pll_de_clk.common.hw, + [CLK_CPUX] = &cpux_clk.common.hw, + [CLK_AXI] = &axi_clk.common.hw, + [CLK_AHB1] = &ahb1_clk.common.hw, + [CLK_APB1] = &apb1_clk.common.hw, + [CLK_APB2] = &apb2_clk.common.hw, + [CLK_AHB2] = &ahb2_clk.common.hw, + [CLK_BUS_CE] = &bus_ce_clk.common.hw, + [CLK_BUS_DMA] = &bus_dma_clk.common.hw, + [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw, + [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw, + [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw, + [CLK_BUS_NAND] = &bus_nand_clk.common.hw, + [CLK_BUS_DRAM] = &bus_dram_clk.common.hw, + [CLK_BUS_EMAC] = &bus_emac_clk.common.hw, + [CLK_BUS_TS] = &bus_ts_clk.common.hw, + [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw, + [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw, + [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw, + [CLK_BUS_OTG] = &bus_otg_clk.common.hw, + [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw, + [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw, + [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw, + [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw, + [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw, + [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw, + [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw, + [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw, + [CLK_BUS_VE] = &bus_ve_clk.common.hw, + [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw, + [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw, + [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw, + [CLK_BUS_CSI] = &bus_csi_clk.common.hw, + [CLK_BUS_TVE] = &bus_tve_clk.common.hw, + [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw, + [CLK_BUS_DE] = &bus_de_clk.common.hw, + [CLK_BUS_GPU] = &bus_gpu_clk.common.hw, + [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw, + [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw, + [CLK_BUS_CODEC] = &bus_codec_clk.common.hw, + [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw, + [CLK_BUS_PIO] = &bus_pio_clk.common.hw, + [CLK_BUS_THS] = &bus_ths_clk.common.hw, + [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw, + [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw, + [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw, + [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw, + [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw, + [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw, + [CLK_BUS_UART0] = &bus_uart0_clk.common.hw, + [CLK_BUS_UART1] = &bus_uart1_clk.common.hw, + [CLK_BUS_UART2] = &bus_uart2_clk.common.hw, + [CLK_BUS_UART3] = &bus_uart3_clk.common.hw, + [CLK_BUS_SCR] = &bus_scr_clk.common.hw, + [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw, + [CLK_BUS_DBG] = &bus_dbg_clk.common.hw, + [CLK_THS] = &ths_clk.common.hw, + [CLK_NAND] = &nand_clk.common.hw, + [CLK_MMC0] = &mmc0_clk.common.hw, + [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw, + [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw, + [CLK_MMC1] = &mmc1_clk.common.hw, + [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, + [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, + [CLK_TS] = &ts_clk.common.hw, + [CLK_CE] = &ce_clk.common.hw, + [CLK_SPI0] = &spi0_clk.common.hw, + [CLK_SPI1] = &spi1_clk.common.hw, + [CLK_I2S0] = &i2s0_clk.common.hw, + [CLK_I2S1] = &i2s1_clk.common.hw, + [CLK_I2S2] = &i2s2_clk.common.hw, + [CLK_SPDIF] = &spdif_clk.common.hw, + [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, + [CLK_USB_PHY1] = &usb_phy1_clk.common.hw, + [CLK_USB_PHY2] = &usb_phy2_clk.common.hw, + [CLK_USB_PHY3] = &usb_phy3_clk.common.hw, + [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw, + [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw, + [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw, + [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw, + [CLK_DRAM] = &dram_clk.common.hw, + [CLK_DRAM_VE] = &dram_ve_clk.common.hw, + [CLK_DRAM_CSI] = &dram_csi_clk.common.hw, + [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw, + [CLK_DRAM_TS] = &dram_ts_clk.common.hw, + [CLK_DE] = &de_clk.common.hw, + [CLK_TCON0] = &tcon_clk.common.hw, + [CLK_TVE] = &tve_clk.common.hw, + [CLK_DEINTERLACE] = &deinterlace_clk.common.hw, + [CLK_CSI_MISC] = &csi_misc_clk.common.hw, + [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw, + [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw, + [CLK_VE] = &ve_clk.common.hw, + [CLK_AC_DIG] = &ac_dig_clk.common.hw, + [CLK_AVS] = &avs_clk.common.hw, + [CLK_HDMI] = &hdmi_clk.common.hw, + [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw, + [CLK_MBUS] = &mbus_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun8i_h3_ccu_resets[] = { + [RST_USB_PHY0] = { 0x0cc, BIT(0) }, + [RST_USB_PHY1] = { 0x0cc, BIT(1) }, + [RST_USB_PHY2] = { 0x0cc, BIT(2) }, + [RST_USB_PHY3] = { 0x0cc, BIT(3) }, + + [RST_MBUS] = { 0x0fc, BIT(31) }, + + [RST_BUS_CE] = { 0x2c0, BIT(5) }, + [RST_BUS_DMA] = { 0x2c0, BIT(6) }, + [RST_BUS_MMC0] = { 0x2c0, BIT(8) }, + [RST_BUS_MMC1] = { 0x2c0, BIT(9) }, + [RST_BUS_MMC2] = { 0x2c0, BIT(10) }, + [RST_BUS_NAND] = { 0x2c0, BIT(13) }, + [RST_BUS_DRAM] = { 0x2c0, BIT(14) }, + [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, + [RST_BUS_TS] = { 0x2c0, BIT(18) }, + [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, + [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, + [RST_BUS_SPI1] = { 0x2c0, BIT(21) }, + [RST_BUS_OTG] = { 0x2c0, BIT(23) }, + [RST_BUS_EHCI0] = { 0x2c0, BIT(24) }, + [RST_BUS_EHCI1] = { 0x2c0, BIT(25) }, + [RST_BUS_EHCI2] = { 0x2c0, BIT(26) }, + [RST_BUS_EHCI3] = { 0x2c0, BIT(27) }, + [RST_BUS_OHCI0] = { 0x2c0, BIT(28) }, + [RST_BUS_OHCI1] = { 0x2c0, BIT(29) }, + [RST_BUS_OHCI2] = { 0x2c0, BIT(30) }, + [RST_BUS_OHCI3] = { 0x2c0, BIT(31) }, + + [RST_BUS_VE] = { 0x2c4, BIT(0) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON1] = { 0x2c4, BIT(4) }, + [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) }, + [RST_BUS_CSI] = { 0x2c4, BIT(8) }, + [RST_BUS_TVE] = { 0x2c4, BIT(9) }, + [RST_BUS_HDMI0] = { 0x2c4, BIT(10) }, + [RST_BUS_HDMI1] = { 0x2c4, BIT(11) }, + [RST_BUS_DE] = { 0x2c4, BIT(12) }, + [RST_BUS_GPU] = { 0x2c4, BIT(20) }, + [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) }, + [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) }, + [RST_BUS_DBG] = { 0x2c4, BIT(31) }, + + [RST_BUS_EPHY] = { 0x2c8, BIT(2) }, + + [RST_BUS_CODEC] = { 0x2d0, BIT(0) }, + [RST_BUS_SPDIF] = { 0x2d0, BIT(1) }, + [RST_BUS_THS] = { 0x2d0, BIT(8) }, + [RST_BUS_I2S0] = { 0x2d0, BIT(12) }, + [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, + [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, + + [RST_BUS_I2C0] = { 0x2d4, BIT(0) }, + [RST_BUS_I2C1] = { 0x2d4, BIT(1) }, + [RST_BUS_I2C2] = { 0x2d4, BIT(2) }, + [RST_BUS_UART0] = { 0x2d4, BIT(16) }, + [RST_BUS_UART1] = { 0x2d4, BIT(17) }, + [RST_BUS_UART2] = { 0x2d4, BIT(18) }, + [RST_BUS_UART3] = { 0x2d4, BIT(19) }, + [RST_BUS_SCR] = { 0x2d4, BIT(20) }, +}; + +static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { + .ccu_clks = sun8i_h3_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_h3_ccu_clks), + + .hw_clks = &sun8i_h3_hw_clks, + + .resets = sun8i_h3_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_h3_ccu_resets), +}; + +static void __init sun8i_h3_ccu_setup(struct device_node *node) +{ + void __iomem *reg; + u32 val; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + /* Force the PLL-Audio-1x divider to 4 */ + val = readl(reg + SUN8I_H3_PLL_AUDIO_REG); + val &= ~GENMASK(19, 16); + writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG); + + sunxi_ccu_probe(node, reg, &sun8i_h3_ccu_desc); +} +CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu", + sun8i_h3_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h new file mode 100644 index 000000000000..78be712c7487 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h @@ -0,0 +1,62 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN8I_H3_H_ +#define _CCU_SUN8I_H3_H_ + +#include <dt-bindings/clock/sun8i-h3-ccu.h> +#include <dt-bindings/reset/sun8i-h3-ccu.h> + +#define CLK_PLL_CPUX 0 +#define CLK_PLL_AUDIO_BASE 1 +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_AUDIO_2X 3 +#define CLK_PLL_AUDIO_4X 4 +#define CLK_PLL_AUDIO_8X 5 +#define CLK_PLL_VIDEO 6 +#define CLK_PLL_VE 7 +#define CLK_PLL_DDR 8 +#define CLK_PLL_PERIPH0 9 +#define CLK_PLL_PERIPH0_2X 10 +#define CLK_PLL_GPU 11 +#define CLK_PLL_PERIPH1 12 +#define CLK_PLL_DE 13 + +/* The CPUX clock is exported */ + +#define CLK_AXI 15 +#define CLK_AHB1 16 +#define CLK_APB1 17 +#define CLK_APB2 18 +#define CLK_AHB2 19 + +/* All the bus gates are exported */ + +/* The first bunch of module clocks are exported */ + +#define CLK_DRAM 96 + +/* All the DRAM gates are exported */ + +/* Some more module clocks are exported */ + +#define CLK_MBUS 113 + +/* And the GPU module clock is exported */ + +#define CLK_NUMBER (CLK_GPU + 1) + +#endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c new file mode 100644 index 000000000000..fc17b5295e16 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -0,0 +1,90 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/iopoll.h> +#include <linux/slab.h> + +#include "ccu_common.h" +#include "ccu_reset.h" + +static DEFINE_SPINLOCK(ccu_lock); + +void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock) +{ + u32 reg; + + if (!lock) + return; + + WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg, + !(reg & lock), 100, 70000)); +} + +int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, + const struct sunxi_ccu_desc *desc) +{ + struct ccu_reset *reset; + int i, ret; + + for (i = 0; i < desc->num_ccu_clks; i++) { + struct ccu_common *cclk = desc->ccu_clks[i]; + + if (!cclk) + continue; + + cclk->base = reg; + cclk->lock = &ccu_lock; + } + + for (i = 0; i < desc->hw_clks->num ; i++) { + struct clk_hw *hw = desc->hw_clks->hws[i]; + + if (!hw) + continue; + + ret = clk_hw_register(NULL, hw); + if (ret) { + pr_err("Couldn't register clock %s\n", + clk_hw_get_name(hw)); + goto err_clk_unreg; + } + } + + ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + desc->hw_clks); + if (ret) + goto err_clk_unreg; + + reset = kzalloc(sizeof(*reset), GFP_KERNEL); + reset->rcdev.of_node = node; + reset->rcdev.ops = &ccu_reset_ops; + reset->rcdev.owner = THIS_MODULE; + reset->rcdev.nr_resets = desc->num_resets; + reset->base = reg; + reset->lock = &ccu_lock; + reset->reset_map = desc->resets; + + ret = reset_controller_register(&reset->rcdev); + if (ret) + goto err_of_clk_unreg; + + return 0; + +err_of_clk_unreg: +err_clk_unreg: + return ret; +} diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h new file mode 100644 index 000000000000..b3d9abfbd721 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_common.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _COMMON_H_ +#define _COMMON_H_ + +#include <linux/compiler.h> +#include <linux/clk-provider.h> + +#define CCU_FEATURE_FRACTIONAL BIT(0) +#define CCU_FEATURE_VARIABLE_PREDIV BIT(1) +#define CCU_FEATURE_FIXED_PREDIV BIT(2) +#define CCU_FEATURE_FIXED_POSTDIV BIT(3) + +struct device_node; + +#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ + &(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = (const char *[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + } + +#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ + &(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + } + +#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +struct ccu_common { + void __iomem *base; + u16 reg; + + unsigned long features; + spinlock_t *lock; + struct clk_hw hw; +}; + +static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw) +{ + return container_of(hw, struct ccu_common, hw); +} + +struct sunxi_ccu_desc { + struct ccu_common **ccu_clks; + unsigned long num_ccu_clks; + + struct clk_hw_onecell_data *hw_clks; + + struct ccu_reset_map *resets; + unsigned long num_resets; +}; + +void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock); + +int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, + const struct sunxi_ccu_desc *desc); + +#endif /* _COMMON_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c new file mode 100644 index 000000000000..8659b4cb6c20 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_div.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> + +#include "ccu_gate.h" +#include "ccu_div.h" + +static unsigned long ccu_div_round_rate(struct ccu_mux_internal *mux, + unsigned long parent_rate, + unsigned long rate, + void *data) +{ + struct ccu_div *cd = data; + unsigned long val; + + /* + * We can't use divider_round_rate that assumes that there's + * several parents, while we might be called to evaluate + * several different parents. + */ + val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width, + cd->div.flags); + + return divider_recalc_rate(&cd->common.hw, parent_rate, val, + cd->div.table, cd->div.flags); +} + +static void ccu_div_disable(struct clk_hw *hw) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_gate_helper_disable(&cd->common, cd->enable); +} + +static int ccu_div_enable(struct clk_hw *hw) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_gate_helper_enable(&cd->common, cd->enable); +} + +static int ccu_div_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_gate_helper_is_enabled(&cd->common, cd->enable); +} + +static unsigned long ccu_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + unsigned long val; + u32 reg; + + reg = readl(cd->common.base + cd->common.reg); + val = reg >> cd->div.shift; + val &= (1 << cd->div.width) - 1; + + ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1, + &parent_rate); + + return divider_recalc_rate(hw, parent_rate, val, cd->div.table, + cd->div.flags); +} + +static int ccu_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_mux_helper_determine_rate(&cd->common, &cd->mux, + req, ccu_div_round_rate, cd); +} + +static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + unsigned long flags; + unsigned long val; + u32 reg; + + ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1, + &parent_rate); + + val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width, + cd->div.flags); + + spin_lock_irqsave(cd->common.lock, flags); + + reg = readl(cd->common.base + cd->common.reg); + reg &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift); + + writel(reg | (val << cd->div.shift), + cd->common.base + cd->common.reg); + + spin_unlock_irqrestore(cd->common.lock, flags); + + return 0; +} + +static u8 ccu_div_get_parent(struct clk_hw *hw) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_mux_helper_get_parent(&cd->common, &cd->mux); +} + +static int ccu_div_set_parent(struct clk_hw *hw, u8 index) +{ + struct ccu_div *cd = hw_to_ccu_div(hw); + + return ccu_mux_helper_set_parent(&cd->common, &cd->mux, index); +} + +const struct clk_ops ccu_div_ops = { + .disable = ccu_div_disable, + .enable = ccu_div_enable, + .is_enabled = ccu_div_is_enabled, + + .get_parent = ccu_div_get_parent, + .set_parent = ccu_div_set_parent, + + .determine_rate = ccu_div_determine_rate, + .recalc_rate = ccu_div_recalc_rate, + .set_rate = ccu_div_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h new file mode 100644 index 000000000000..653ade5769b3 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_div.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_DIV_H_ +#define _CCU_DIV_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_mux.h" + +struct _ccu_div { + u8 shift; + u8 width; + + u32 flags; + + struct clk_div_table *table; +}; + +#define _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, _flags) \ + { \ + .shift = _shift, \ + .width = _width, \ + .flags = _flags, \ + .table = _table, \ + } + +#define _SUNXI_CCU_DIV_FLAGS(_shift, _width, _flags) \ + _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, _flags) + +#define _SUNXI_CCU_DIV_TABLE(_shift, _width, _table) \ + _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, 0) + +#define _SUNXI_CCU_DIV(_shift, _width) \ + _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, 0) + +struct ccu_div { + u32 enable; + + struct _ccu_div div; + struct ccu_mux_internal mux; + struct ccu_common common; +}; + +#define SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \ + _shift, _width, \ + _table, _gate, _flags) \ + struct ccu_div _struct = { \ + .div = _SUNXI_CCU_DIV_TABLE(_shift, _width, \ + _table), \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_div_ops, \ + _flags), \ + } \ + } + + +#define SUNXI_CCU_DIV_TABLE(_struct, _name, _parent, _reg, \ + _shift, _width, \ + _table, _flags) \ + SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \ + _shift, _width, _table, 0, \ + _flags) + +#define SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, _muxshift, _muxwidth, \ + _gate, _flags) \ + struct ccu_div _struct = { \ + .enable = _gate, \ + .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_PARENTS(_name, \ + _parents, \ + &ccu_div_ops, \ + _flags), \ + }, \ + } + +#define SUNXI_CCU_M_WITH_MUX(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, _muxshift, _muxwidth, \ + _flags) \ + SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, _muxshift, _muxwidth, \ + 0, _flags) + + +#define SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \ + _mshift, _mwidth, _gate, \ + _flags) \ + struct ccu_div _struct = { \ + .enable = _gate, \ + .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_div_ops, \ + _flags), \ + }, \ + } + +#define SUNXI_CCU_M(_struct, _name, _parent, _reg, _mshift, _mwidth, \ + _flags) \ + SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \ + _mshift, _mwidth, 0, _flags) + +static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_div, common); +} + +extern const struct clk_ops ccu_div_ops; + +#endif /* _CCU_DIV_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c new file mode 100644 index 000000000000..5c4b10cd15b5 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_frac.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include "ccu_frac.h" + +bool ccu_frac_helper_is_enabled(struct ccu_common *common, + struct _ccu_frac *cf) +{ + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return false; + + return !(readl(common->base + common->reg) & cf->enable); +} + +void ccu_frac_helper_enable(struct ccu_common *common, + struct _ccu_frac *cf) +{ + unsigned long flags; + u32 reg; + + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return; + + spin_lock_irqsave(common->lock, flags); + reg = readl(common->base + common->reg); + writel(reg & ~cf->enable, common->base + common->reg); + spin_unlock_irqrestore(common->lock, flags); +} + +void ccu_frac_helper_disable(struct ccu_common *common, + struct _ccu_frac *cf) +{ + unsigned long flags; + u32 reg; + + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return; + + spin_lock_irqsave(common->lock, flags); + reg = readl(common->base + common->reg); + writel(reg | cf->enable, common->base + common->reg); + spin_unlock_irqrestore(common->lock, flags); +} + +bool ccu_frac_helper_has_rate(struct ccu_common *common, + struct _ccu_frac *cf, + unsigned long rate) +{ + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return false; + + return (cf->rates[0] == rate) || (cf->rates[1] == rate); +} + +unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, + struct _ccu_frac *cf) +{ + u32 reg; + + printk("%s: Read fractional\n", clk_hw_get_name(&common->hw)); + + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return 0; + + printk("%s: clock is fractional (rates %lu and %lu)\n", + clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]); + + reg = readl(common->base + common->reg); + + printk("%s: clock reg is 0x%x (select is 0x%x)\n", + clk_hw_get_name(&common->hw), reg, cf->select); + + return (reg & cf->select) ? cf->rates[1] : cf->rates[0]; +} + +int ccu_frac_helper_set_rate(struct ccu_common *common, + struct _ccu_frac *cf, + unsigned long rate) +{ + unsigned long flags; + u32 reg, sel; + + if (!(common->features & CCU_FEATURE_FRACTIONAL)) + return -EINVAL; + + if (cf->rates[0] == rate) + sel = 0; + else if (cf->rates[1] == rate) + sel = cf->select; + else + return -EINVAL; + + spin_lock_irqsave(common->lock, flags); + reg = readl(common->base + common->reg); + reg &= ~cf->select; + writel(reg | sel, common->base + common->reg); + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h new file mode 100644 index 000000000000..e4c670b1cdfe --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_frac.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_FRAC_H_ +#define _CCU_FRAC_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +struct _ccu_frac { + u32 enable; + u32 select; + + unsigned long rates[2]; +}; + +#define _SUNXI_CCU_FRAC(_enable, _select, _rate1, _rate2) \ + { \ + .enable = _enable, \ + .select = _select, \ + .rates = { _rate1, _rate2 }, \ + } + +bool ccu_frac_helper_is_enabled(struct ccu_common *common, + struct _ccu_frac *cf); +void ccu_frac_helper_enable(struct ccu_common *common, + struct _ccu_frac *cf); +void ccu_frac_helper_disable(struct ccu_common *common, + struct _ccu_frac *cf); + +bool ccu_frac_helper_has_rate(struct ccu_common *common, + struct _ccu_frac *cf, + unsigned long rate); + +unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, + struct _ccu_frac *cf); + +int ccu_frac_helper_set_rate(struct ccu_common *common, + struct _ccu_frac *cf, + unsigned long rate); + +#endif /* _CCU_FRAC_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c new file mode 100644 index 000000000000..8a81f9d4a89f --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_gate.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> + +#include "ccu_gate.h" + +void ccu_gate_helper_disable(struct ccu_common *common, u32 gate) +{ + unsigned long flags; + u32 reg; + + if (!gate) + return; + + spin_lock_irqsave(common->lock, flags); + + reg = readl(common->base + common->reg); + writel(reg & ~gate, common->base + common->reg); + + spin_unlock_irqrestore(common->lock, flags); +} + +static void ccu_gate_disable(struct clk_hw *hw) +{ + struct ccu_gate *cg = hw_to_ccu_gate(hw); + + return ccu_gate_helper_disable(&cg->common, cg->enable); +} + +int ccu_gate_helper_enable(struct ccu_common *common, u32 gate) +{ + unsigned long flags; + u32 reg; + + if (!gate) + return 0; + + spin_lock_irqsave(common->lock, flags); + + reg = readl(common->base + common->reg); + writel(reg | gate, common->base + common->reg); + + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} + +static int ccu_gate_enable(struct clk_hw *hw) +{ + struct ccu_gate *cg = hw_to_ccu_gate(hw); + + return ccu_gate_helper_enable(&cg->common, cg->enable); +} + +int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate) +{ + if (!gate) + return 1; + + return readl(common->base + common->reg) & gate; +} + +static int ccu_gate_is_enabled(struct clk_hw *hw) +{ + struct ccu_gate *cg = hw_to_ccu_gate(hw); + + return ccu_gate_helper_is_enabled(&cg->common, cg->enable); +} + +const struct clk_ops ccu_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, +}; diff --git a/drivers/clk/sunxi-ng/ccu_gate.h b/drivers/clk/sunxi-ng/ccu_gate.h new file mode 100644 index 000000000000..4466169bd2d7 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_gate.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_GATE_H_ +#define _CCU_GATE_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +struct ccu_gate { + u32 enable; + + struct ccu_common common; +}; + +#define SUNXI_CCU_GATE(_struct, _name, _parent, _reg, _gate, _flags) \ + struct ccu_gate _struct = { \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_gate_ops, \ + _flags), \ + } \ + } + +static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_gate, common); +} + +void ccu_gate_helper_disable(struct ccu_common *common, u32 gate); +int ccu_gate_helper_enable(struct ccu_common *common, u32 gate); +int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate); + +extern const struct clk_ops ccu_gate_ops; + +#endif /* _CCU_GATE_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c new file mode 100644 index 000000000000..cbf33ef5faa9 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> + +#include "ccu_gate.h" +#include "ccu_mp.h" + +static void ccu_mp_find_best(unsigned long parent, unsigned long rate, + unsigned int max_m, unsigned int max_p, + unsigned int *m, unsigned int *p) +{ + unsigned long best_rate = 0; + unsigned int best_m = 0, best_p = 0; + unsigned int _m, _p; + + for (_p = 0; _p <= max_p; _p++) { + for (_m = 1; _m <= max_m; _m++) { + unsigned long tmp_rate = (parent >> _p) / _m; + + if (tmp_rate > rate) + continue; + + if ((rate - tmp_rate) < (rate - best_rate)) { + best_rate = tmp_rate; + best_m = _m; + best_p = _p; + } + } + } + + *m = best_m; + *p = best_p; +} + +static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux, + unsigned long parent_rate, + unsigned long rate, + void *data) +{ + struct ccu_mp *cmp = data; + unsigned int m, p; + + ccu_mp_find_best(parent_rate, rate, + 1 << cmp->m.width, (1 << cmp->p.width) - 1, + &m, &p); + + return (parent_rate >> p) / m; +} + +static void ccu_mp_disable(struct clk_hw *hw) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_gate_helper_disable(&cmp->common, cmp->enable); +} + +static int ccu_mp_enable(struct clk_hw *hw) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_gate_helper_enable(&cmp->common, cmp->enable); +} + +static int ccu_mp_is_enabled(struct clk_hw *hw) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable); +} + +static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + unsigned int m, p; + u32 reg; + + reg = readl(cmp->common.base + cmp->common.reg); + + m = reg >> cmp->m.shift; + m &= (1 << cmp->m.width) - 1; + + p = reg >> cmp->p.shift; + p &= (1 << cmp->p.width) - 1; + + return (parent_rate >> p) / (m + 1); +} + +static int ccu_mp_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux, + req, ccu_mp_round_rate, cmp); +} + +static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + unsigned long flags; + unsigned int m, p; + u32 reg; + + ccu_mp_find_best(parent_rate, rate, + 1 << cmp->m.width, (1 << cmp->p.width) - 1, + &m, &p); + + + spin_lock_irqsave(cmp->common.lock, flags); + + reg = readl(cmp->common.base + cmp->common.reg); + reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift); + reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift); + + writel(reg | (p << cmp->p.shift) | ((m - 1) << cmp->m.shift), + cmp->common.base + cmp->common.reg); + + spin_unlock_irqrestore(cmp->common.lock, flags); + + return 0; +} + +static u8 ccu_mp_get_parent(struct clk_hw *hw) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux); +} + +static int ccu_mp_set_parent(struct clk_hw *hw, u8 index) +{ + struct ccu_mp *cmp = hw_to_ccu_mp(hw); + + return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index); +} + +const struct clk_ops ccu_mp_ops = { + .disable = ccu_mp_disable, + .enable = ccu_mp_enable, + .is_enabled = ccu_mp_is_enabled, + + .get_parent = ccu_mp_get_parent, + .set_parent = ccu_mp_set_parent, + + .determine_rate = ccu_mp_determine_rate, + .recalc_rate = ccu_mp_recalc_rate, + .set_rate = ccu_mp_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h new file mode 100644 index 000000000000..3cf12bf95962 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_MP_H_ +#define _CCU_MP_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_mult.h" +#include "ccu_mux.h" + +/* + * struct ccu_mp - Definition of an M-P clock + * + * Clocks based on the formula parent >> P / M + */ +struct ccu_mp { + u32 enable; + + struct _ccu_div m; + struct _ccu_div p; + struct ccu_mux_internal mux; + struct ccu_common common; +}; + +#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _flags) \ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_PARENTS(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + +#define SUNXI_CCU_MP_WITH_MUX(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _flags) \ + SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + 0, _flags) + +static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_mp, common); +} + +extern const struct clk_ops ccu_mp_ops; + +#endif /* _CCU_MP_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h new file mode 100644 index 000000000000..609db6610880 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mult.h @@ -0,0 +1,15 @@ +#ifndef _CCU_MULT_H_ +#define _CCU_MULT_H_ + +struct _ccu_mult { + u8 shift; + u8 width; +}; + +#define _SUNXI_CCU_MULT(_shift, _width) \ + { \ + .shift = _shift, \ + .width = _width, \ + } + +#endif /* _CCU_MULT_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c new file mode 100644 index 000000000000..58fc36e7dcce --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mux.c @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> + +#include "ccu_gate.h" +#include "ccu_mux.h" + +void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common, + struct ccu_mux_internal *cm, + int parent_index, + unsigned long *parent_rate) +{ + u8 prediv = 1; + u32 reg; + + if (!((common->features & CCU_FEATURE_FIXED_PREDIV) || + (common->features & CCU_FEATURE_VARIABLE_PREDIV))) + return; + + reg = readl(common->base + common->reg); + if (parent_index < 0) { + parent_index = reg >> cm->shift; + parent_index &= (1 << cm->width) - 1; + } + + if (common->features & CCU_FEATURE_FIXED_PREDIV) + if (parent_index == cm->fixed_prediv.index) + prediv = cm->fixed_prediv.div; + + if (common->features & CCU_FEATURE_VARIABLE_PREDIV) + if (parent_index == cm->variable_prediv.index) { + u8 div; + + div = reg >> cm->variable_prediv.shift; + div &= (1 << cm->variable_prediv.width) - 1; + prediv = div + 1; + } + + *parent_rate = *parent_rate / prediv; +} + +int ccu_mux_helper_determine_rate(struct ccu_common *common, + struct ccu_mux_internal *cm, + struct clk_rate_request *req, + unsigned long (*round)(struct ccu_mux_internal *, + unsigned long, + unsigned long, + void *), + void *data) +{ + unsigned long best_parent_rate = 0, best_rate = 0; + struct clk_hw *best_parent, *hw = &common->hw; + unsigned int i; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + unsigned long tmp_rate, parent_rate; + struct clk_hw *parent; + + parent = clk_hw_get_parent_by_index(hw, i); + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + ccu_mux_helper_adjust_parent_for_prediv(common, cm, i, + &parent_rate); + + tmp_rate = round(cm, clk_hw_get_rate(parent), req->rate, data); + if (tmp_rate == req->rate) { + best_parent = parent; + best_parent_rate = parent_rate; + best_rate = tmp_rate; + goto out; + } + + if ((req->rate - tmp_rate) < (req->rate - best_rate)) { + best_rate = tmp_rate; + best_parent_rate = parent_rate; + best_parent = parent; + } + } + + if (best_rate == 0) + return -EINVAL; + +out: + req->best_parent_hw = best_parent; + req->best_parent_rate = best_parent_rate; + req->rate = best_rate; + return 0; +} + +u8 ccu_mux_helper_get_parent(struct ccu_common *common, + struct ccu_mux_internal *cm) +{ + u32 reg; + u8 parent; + + reg = readl(common->base + common->reg); + parent = reg >> cm->shift; + parent &= (1 << cm->width) - 1; + + return parent; +} + +int ccu_mux_helper_set_parent(struct ccu_common *common, + struct ccu_mux_internal *cm, + u8 index) +{ + unsigned long flags; + u32 reg; + + spin_lock_irqsave(common->lock, flags); + + reg = readl(common->base + common->reg); + reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift); + writel(reg | (index << cm->shift), common->base + common->reg); + + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} + +static void ccu_mux_disable(struct clk_hw *hw) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + return ccu_gate_helper_disable(&cm->common, cm->enable); +} + +static int ccu_mux_enable(struct clk_hw *hw) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + return ccu_gate_helper_enable(&cm->common, cm->enable); +} + +static int ccu_mux_is_enabled(struct clk_hw *hw) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + return ccu_gate_helper_is_enabled(&cm->common, cm->enable); +} + +static u8 ccu_mux_get_parent(struct clk_hw *hw) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + return ccu_mux_helper_get_parent(&cm->common, &cm->mux); +} + +static int ccu_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index); +} + +static unsigned long ccu_mux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_mux *cm = hw_to_ccu_mux(hw); + + ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1, + &parent_rate); + + return parent_rate; +} + +const struct clk_ops ccu_mux_ops = { + .disable = ccu_mux_disable, + .enable = ccu_mux_enable, + .is_enabled = ccu_mux_is_enabled, + + .get_parent = ccu_mux_get_parent, + .set_parent = ccu_mux_set_parent, + + .determine_rate = __clk_mux_determine_rate, + .recalc_rate = ccu_mux_recalc_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h new file mode 100644 index 000000000000..945082631e7d --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_mux.h @@ -0,0 +1,91 @@ +#ifndef _CCU_MUX_H_ +#define _CCU_MUX_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +struct ccu_mux_internal { + u8 shift; + u8 width; + + struct { + u8 index; + u8 div; + } fixed_prediv; + + struct { + u8 index; + u8 shift; + u8 width; + } variable_prediv; +}; + +#define SUNXI_CLK_MUX(_shift, _width) \ + { \ + .shift = _shift, \ + .width = _width, \ + } + +struct ccu_mux { + u16 reg; + u32 enable; + + struct ccu_mux_internal mux; + struct ccu_common common; +}; + +#define SUNXI_CCU_MUX(_struct, _name, _parents, _reg, _shift, _width, _flags) \ + struct ccu_mux _struct = { \ + .mux = SUNXI_CLK_MUX(_shift, _width), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_PARENTS(_name, \ + _parents, \ + &ccu_mux_ops, \ + _flags), \ + } \ + } + +#define SUNXI_CCU_MUX_WITH_GATE(_struct, _name, _parents, _reg, \ + _shift, _width, _gate, _flags) \ + struct ccu_mux _struct = { \ + .enable = _gate, \ + .mux = SUNXI_CLK_MUX(_shift, _width), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_PARENTS(_name, \ + _parents, \ + &ccu_mux_ops, \ + _flags), \ + } \ + } + +static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_mux, common); +} + +extern const struct clk_ops ccu_mux_ops; + +void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common, + struct ccu_mux_internal *cm, + int parent_index, + unsigned long *parent_rate); +int ccu_mux_helper_determine_rate(struct ccu_common *common, + struct ccu_mux_internal *cm, + struct clk_rate_request *req, + unsigned long (*round)(struct ccu_mux_internal *, + unsigned long, + unsigned long, + void *), + void *data); +u8 ccu_mux_helper_get_parent(struct ccu_common *common, + struct ccu_mux_internal *cm); +int ccu_mux_helper_set_parent(struct ccu_common *common, + struct ccu_mux_internal *cm, + u8 index); + +#endif /* _CCU_MUX_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c new file mode 100644 index 000000000000..4470ffc8cf0d --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nk.c @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/rational.h> + +#include "ccu_gate.h" +#include "ccu_nk.h" + +void ccu_nk_find_best(unsigned long parent, unsigned long rate, + unsigned int max_n, unsigned int max_k, + unsigned int *n, unsigned int *k) +{ + unsigned long best_rate = 0; + unsigned int best_k = 0, best_n = 0; + unsigned int _k, _n; + + for (_k = 1; _k <= max_k; _k++) { + for (_n = 1; _n <= max_n; _n++) { + unsigned long tmp_rate = parent * _n * _k; + + if (tmp_rate > rate) + continue; + + if ((rate - tmp_rate) < (rate - best_rate)) { + best_rate = tmp_rate; + best_k = _k; + best_n = _n; + } + } + } + + *k = best_k; + *n = best_n; +} + +static void ccu_nk_disable(struct clk_hw *hw) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + + return ccu_gate_helper_disable(&nk->common, nk->enable); +} + +static int ccu_nk_enable(struct clk_hw *hw) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + + return ccu_gate_helper_enable(&nk->common, nk->enable); +} + +static int ccu_nk_is_enabled(struct clk_hw *hw) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + + return ccu_gate_helper_is_enabled(&nk->common, nk->enable); +} + +static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + unsigned long rate, n, k; + u32 reg; + + reg = readl(nk->common.base + nk->common.reg); + + n = reg >> nk->n.shift; + n &= (1 << nk->n.width) - 1; + + k = reg >> nk->k.shift; + k &= (1 << nk->k.width) - 1; + + rate = parent_rate * (n + 1) * (k + 1); + + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= nk->fixed_post_div; + + return rate; +} + +static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + unsigned int n, k; + + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate *= nk->fixed_post_div; + + ccu_nk_find_best(*parent_rate, rate, + 1 << nk->n.width, 1 << nk->k.width, + &n, &k); + + rate = *parent_rate * n * k; + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate / nk->fixed_post_div; + + return rate; +} + +static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_nk *nk = hw_to_ccu_nk(hw); + unsigned long flags; + unsigned int n, k; + u32 reg; + + if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate = rate * nk->fixed_post_div; + + ccu_nk_find_best(parent_rate, rate, + 1 << nk->n.width, 1 << nk->k.width, + &n, &k); + + spin_lock_irqsave(nk->common.lock, flags); + + reg = readl(nk->common.base + nk->common.reg); + reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift); + reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift); + + writel(reg | ((k - 1) << nk->k.shift) | ((n - 1) << nk->n.shift), + nk->common.base + nk->common.reg); + + spin_unlock_irqrestore(nk->common.lock, flags); + + ccu_helper_wait_for_lock(&nk->common, nk->lock); + + return 0; +} + +const struct clk_ops ccu_nk_ops = { + .disable = ccu_nk_disable, + .enable = ccu_nk_enable, + .is_enabled = ccu_nk_is_enabled, + + .recalc_rate = ccu_nk_recalc_rate, + .round_rate = ccu_nk_round_rate, + .set_rate = ccu_nk_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_nk.h b/drivers/clk/sunxi-ng/ccu_nk.h new file mode 100644 index 000000000000..4b52da0c29fe --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nk.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_NK_H_ +#define _CCU_NK_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_mult.h" + +/* + * struct ccu_nk - Definition of an N-K clock + * + * Clocks based on the formula parent * N * K + */ +struct ccu_nk { + u16 reg; + u32 enable; + u32 lock; + + struct _ccu_mult n; + struct _ccu_mult k; + + unsigned int fixed_post_div; + + struct ccu_common common; +}; + +#define SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(_struct, _name, _parent, _reg, \ + _nshift, _nwidth, \ + _kshift, _kwidth, \ + _gate, _lock, _postdiv, \ + _flags) \ + struct ccu_nk _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nk_ops, \ + _flags), \ + }, \ + } + +static inline struct ccu_nk *hw_to_ccu_nk(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_nk, common); +} + +extern const struct clk_ops ccu_nk_ops; + +#endif /* _CCU_NK_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c new file mode 100644 index 000000000000..2071822b1e9c --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nkm.c @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/rational.h> + +#include "ccu_gate.h" +#include "ccu_nkm.h" + +struct _ccu_nkm { + unsigned long n, max_n; + unsigned long k, max_k; + unsigned long m, max_m; +}; + +static void ccu_nkm_find_best(unsigned long parent, unsigned long rate, + struct _ccu_nkm *nkm) +{ + unsigned long best_rate = 0; + unsigned long best_n = 0, best_k = 0, best_m = 0; + unsigned long _n, _k, _m; + + for (_k = 1; _k <= nkm->max_k; _k++) { + unsigned long tmp_rate; + + rational_best_approximation(rate / _k, parent, + nkm->max_n, nkm->max_m, &_n, &_m); + + tmp_rate = parent * _n * _k / _m; + + if (tmp_rate > rate) + continue; + + if ((rate - tmp_rate) < (rate - best_rate)) { + best_rate = tmp_rate; + best_n = _n; + best_k = _k; + best_m = _m; + } + } + + nkm->n = best_n; + nkm->k = best_k; + nkm->m = best_m; +} + +static void ccu_nkm_disable(struct clk_hw *hw) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + + return ccu_gate_helper_disable(&nkm->common, nkm->enable); +} + +static int ccu_nkm_enable(struct clk_hw *hw) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + + return ccu_gate_helper_enable(&nkm->common, nkm->enable); +} + +static int ccu_nkm_is_enabled(struct clk_hw *hw) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + + return ccu_gate_helper_is_enabled(&nkm->common, nkm->enable); +} + +static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + unsigned long n, m, k; + u32 reg; + + reg = readl(nkm->common.base + nkm->common.reg); + + n = reg >> nkm->n.shift; + n &= (1 << nkm->n.width) - 1; + + k = reg >> nkm->k.shift; + k &= (1 << nkm->k.width) - 1; + + m = reg >> nkm->m.shift; + m &= (1 << nkm->m.width) - 1; + + return parent_rate * (n + 1) * (k + 1) / (m + 1); +} + +static long ccu_nkm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + struct _ccu_nkm _nkm; + + _nkm.max_n = 1 << nkm->n.width; + _nkm.max_k = 1 << nkm->k.width; + _nkm.max_m = 1 << nkm->m.width; + + ccu_nkm_find_best(*parent_rate, rate, &_nkm); + + return *parent_rate * _nkm.n * _nkm.k / _nkm.m; +} + +static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_nkm *nkm = hw_to_ccu_nkm(hw); + struct _ccu_nkm _nkm; + unsigned long flags; + u32 reg; + + _nkm.max_n = 1 << nkm->n.width; + _nkm.max_k = 1 << nkm->k.width; + _nkm.max_m = 1 << nkm->m.width; + + ccu_nkm_find_best(parent_rate, rate, &_nkm); + + spin_lock_irqsave(nkm->common.lock, flags); + + reg = readl(nkm->common.base + nkm->common.reg); + reg &= ~GENMASK(nkm->n.width + nkm->n.shift - 1, nkm->n.shift); + reg &= ~GENMASK(nkm->k.width + nkm->k.shift - 1, nkm->k.shift); + reg &= ~GENMASK(nkm->m.width + nkm->m.shift - 1, nkm->m.shift); + + reg |= (_nkm.n - 1) << nkm->n.shift; + reg |= (_nkm.k - 1) << nkm->k.shift; + reg |= (_nkm.m - 1) << nkm->m.shift; + + writel(reg, nkm->common.base + nkm->common.reg); + + spin_unlock_irqrestore(nkm->common.lock, flags); + + ccu_helper_wait_for_lock(&nkm->common, nkm->lock); + + return 0; +} + +const struct clk_ops ccu_nkm_ops = { + .disable = ccu_nkm_disable, + .enable = ccu_nkm_enable, + .is_enabled = ccu_nkm_is_enabled, + + .recalc_rate = ccu_nkm_recalc_rate, + .round_rate = ccu_nkm_round_rate, + .set_rate = ccu_nkm_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h new file mode 100644 index 000000000000..1936ac1c6b37 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nkm.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_NKM_H_ +#define _CCU_NKM_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_mult.h" + +/* + * struct ccu_nkm - Definition of an N-K-M clock + * + * Clocks based on the formula parent * N * K / M + */ +struct ccu_nkm { + u32 enable; + u32 lock; + + struct _ccu_mult n; + struct _ccu_mult k; + struct _ccu_div m; + + struct ccu_common common; +}; + +#define SUNXI_CCU_NKM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \ + _nshift, _nwidth, \ + _kshift, _kwidth, \ + _mshift, _mwidth, \ + _gate, _lock, _flags) \ + struct ccu_nkm _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nkm_ops, \ + _flags), \ + }, \ + } + +static inline struct ccu_nkm *hw_to_ccu_nkm(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_nkm, common); +} + +extern const struct clk_ops ccu_nkm_ops; + +#endif /* _CCU_NKM_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c new file mode 100644 index 000000000000..9f2b98e19dc9 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/rational.h> + +#include "ccu_gate.h" +#include "ccu_nkmp.h" + +struct _ccu_nkmp { + unsigned long n, max_n; + unsigned long k, max_k; + unsigned long m, max_m; + unsigned long p, max_p; +}; + +static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate, + struct _ccu_nkmp *nkmp) +{ + unsigned long best_rate = 0; + unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0; + unsigned long _n, _k, _m, _p; + + for (_k = 1; _k <= nkmp->max_k; _k++) { + for (_p = 0; _p <= nkmp->max_p; _p++) { + unsigned long tmp_rate; + + rational_best_approximation(rate / _k, parent >> _p, + nkmp->max_n, nkmp->max_m, + &_n, &_m); + + tmp_rate = (parent * _n * _k >> _p) / _m; + + if (tmp_rate > rate) + continue; + + if ((rate - tmp_rate) < (rate - best_rate)) { + best_rate = tmp_rate; + best_n = _n; + best_k = _k; + best_m = _m; + best_p = _p; + } + } + } + + nkmp->n = best_n; + nkmp->k = best_k; + nkmp->m = best_m; + nkmp->p = best_p; +} + +static void ccu_nkmp_disable(struct clk_hw *hw) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + + return ccu_gate_helper_disable(&nkmp->common, nkmp->enable); +} + +static int ccu_nkmp_enable(struct clk_hw *hw) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + + return ccu_gate_helper_enable(&nkmp->common, nkmp->enable); +} + +static int ccu_nkmp_is_enabled(struct clk_hw *hw) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + + return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable); +} + +static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + unsigned long n, m, k, p; + u32 reg; + + reg = readl(nkmp->common.base + nkmp->common.reg); + + n = reg >> nkmp->n.shift; + n &= (1 << nkmp->n.width) - 1; + + k = reg >> nkmp->k.shift; + k &= (1 << nkmp->k.width) - 1; + + m = reg >> nkmp->m.shift; + m &= (1 << nkmp->m.width) - 1; + + p = reg >> nkmp->p.shift; + p &= (1 << nkmp->p.width) - 1; + + return (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1); +} + +static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + struct _ccu_nkmp _nkmp; + + _nkmp.max_n = 1 << nkmp->n.width; + _nkmp.max_k = 1 << nkmp->k.width; + _nkmp.max_m = 1 << nkmp->m.width; + _nkmp.max_p = (1 << nkmp->p.width) - 1; + + ccu_nkmp_find_best(*parent_rate, rate, + &_nkmp); + + return (*parent_rate * _nkmp.n * _nkmp.k >> _nkmp.p) / _nkmp.m; +} + +static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); + struct _ccu_nkmp _nkmp; + unsigned long flags; + u32 reg; + + _nkmp.max_n = 1 << nkmp->n.width; + _nkmp.max_k = 1 << nkmp->k.width; + _nkmp.max_m = 1 << nkmp->m.width; + _nkmp.max_p = (1 << nkmp->p.width) - 1; + + ccu_nkmp_find_best(parent_rate, rate, &_nkmp); + + spin_lock_irqsave(nkmp->common.lock, flags); + + reg = readl(nkmp->common.base + nkmp->common.reg); + reg &= ~GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); + reg &= ~GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); + reg &= ~GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); + reg &= ~GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); + + reg |= (_nkmp.n - 1) << nkmp->n.shift; + reg |= (_nkmp.k - 1) << nkmp->k.shift; + reg |= (_nkmp.m - 1) << nkmp->m.shift; + reg |= _nkmp.p << nkmp->p.shift; + + writel(reg, nkmp->common.base + nkmp->common.reg); + + spin_unlock_irqrestore(nkmp->common.lock, flags); + + ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock); + + return 0; +} + +const struct clk_ops ccu_nkmp_ops = { + .disable = ccu_nkmp_disable, + .enable = ccu_nkmp_enable, + .is_enabled = ccu_nkmp_is_enabled, + + .recalc_rate = ccu_nkmp_recalc_rate, + .round_rate = ccu_nkmp_round_rate, + .set_rate = ccu_nkmp_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h new file mode 100644 index 000000000000..5adb0c92a614 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nkmp.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_NKMP_H_ +#define _CCU_NKMP_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_mult.h" + +/* + * struct ccu_nkmp - Definition of an N-K-M-P clock + * + * Clocks based on the formula parent * N * K >> P / M + */ +struct ccu_nkmp { + u32 enable; + u32 lock; + + struct _ccu_mult n; + struct _ccu_mult k; + struct _ccu_div m; + struct _ccu_div p; + + struct ccu_common common; +}; + +#define SUNXI_CCU_NKMP_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \ + _nshift, _nwidth, \ + _kshift, _kwidth, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _gate, _lock, _flags) \ + struct ccu_nkmp _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nkmp_ops, \ + _flags), \ + }, \ + } + +static inline struct ccu_nkmp *hw_to_ccu_nkmp(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_nkmp, common); +} + +extern const struct clk_ops ccu_nkmp_ops; + +#endif /* _CCU_NKMP_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c new file mode 100644 index 000000000000..e35ddd8eec8b --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/rational.h> + +#include "ccu_frac.h" +#include "ccu_gate.h" +#include "ccu_nm.h" + +static void ccu_nm_disable(struct clk_hw *hw) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + + return ccu_gate_helper_disable(&nm->common, nm->enable); +} + +static int ccu_nm_enable(struct clk_hw *hw) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + + return ccu_gate_helper_enable(&nm->common, nm->enable); +} + +static int ccu_nm_is_enabled(struct clk_hw *hw) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + + return ccu_gate_helper_is_enabled(&nm->common, nm->enable); +} + +static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + unsigned long n, m; + u32 reg; + + if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) + return ccu_frac_helper_read_rate(&nm->common, &nm->frac); + + reg = readl(nm->common.base + nm->common.reg); + + n = reg >> nm->n.shift; + n &= (1 << nm->n.width) - 1; + + m = reg >> nm->m.shift; + m &= (1 << nm->m.width) - 1; + + return parent_rate * (n + 1) / (m + 1); +} + +static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + unsigned long n, m; + + rational_best_approximation(rate, *parent_rate, + 1 << nm->n.width, 1 << nm->m.width, + &n, &m); + + return *parent_rate * n / m; +} + +static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_nm *nm = hw_to_ccu_nm(hw); + unsigned long flags; + unsigned long n, m; + u32 reg; + + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) + return ccu_frac_helper_set_rate(&nm->common, &nm->frac, rate); + else + ccu_frac_helper_disable(&nm->common, &nm->frac); + + rational_best_approximation(rate, parent_rate, + 1 << nm->n.width, 1 << nm->m.width, + &n, &m); + + spin_lock_irqsave(nm->common.lock, flags); + + reg = readl(nm->common.base + nm->common.reg); + reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift); + reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift); + + writel(reg | ((m - 1) << nm->m.shift) | ((n - 1) << nm->n.shift), + nm->common.base + nm->common.reg); + + spin_unlock_irqrestore(nm->common.lock, flags); + + ccu_helper_wait_for_lock(&nm->common, nm->lock); + + return 0; +} + +const struct clk_ops ccu_nm_ops = { + .disable = ccu_nm_disable, + .enable = ccu_nm_enable, + .is_enabled = ccu_nm_is_enabled, + + .recalc_rate = ccu_nm_recalc_rate, + .round_rate = ccu_nm_round_rate, + .set_rate = ccu_nm_set_rate, +}; diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h new file mode 100644 index 000000000000..0b7bcd33a2df --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_nm.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_NM_H_ +#define _CCU_NM_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" +#include "ccu_div.h" +#include "ccu_frac.h" +#include "ccu_mult.h" + +/* + * struct ccu_nm - Definition of an N-M clock + * + * Clocks based on the formula parent * N / M + */ +struct ccu_nm { + u32 enable; + u32 lock; + + struct _ccu_mult n; + struct _ccu_div m; + struct _ccu_frac frac; + + struct ccu_common common; +}; + +#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(_struct, _name, _parent, _reg, \ + _nshift, _nwidth, \ + _mshift, _mwidth, \ + _frac_en, _frac_sel, \ + _frac_rate_0, _frac_rate_1, \ + _gate, _lock, _flags) \ + struct ccu_nm _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \ + _frac_rate_0, \ + _frac_rate_1), \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FRACTIONAL, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nm_ops, \ + _flags), \ + }, \ + } + +#define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \ + _nshift, _nwidth, \ + _mshift, _mwidth, \ + _gate, _lock, _flags) \ + struct ccu_nm _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nm_ops, \ + _flags), \ + }, \ + } + +static inline struct ccu_nm *hw_to_ccu_nm(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_nm, common); +} + +extern const struct clk_ops ccu_nm_ops; + +#endif /* _CCU_NM_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c new file mode 100644 index 000000000000..400c58ad72fd --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_phase.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include "ccu_phase.h" + +static int ccu_phase_get_phase(struct clk_hw *hw) +{ + struct ccu_phase *phase = hw_to_ccu_phase(hw); + struct clk_hw *parent, *grandparent; + unsigned int parent_rate, grandparent_rate; + u16 step, parent_div; + u32 reg; + u8 delay; + + reg = readl(phase->common.base + phase->common.reg); + delay = (reg >> phase->shift); + delay &= (1 << phase->width) - 1; + + if (!delay) + return 180; + + /* Get our parent clock, it's the one that can adjust its rate */ + parent = clk_hw_get_parent(hw); + if (!parent) + return -EINVAL; + + /* And its rate */ + parent_rate = clk_hw_get_rate(parent); + if (!parent_rate) + return -EINVAL; + + /* Now, get our parent's parent (most likely some PLL) */ + grandparent = clk_hw_get_parent(parent); + if (!grandparent) + return -EINVAL; + + /* And its rate */ + grandparent_rate = clk_hw_get_rate(grandparent); + if (!grandparent_rate) + return -EINVAL; + + /* Get our parent clock divider */ + parent_div = grandparent_rate / parent_rate; + + step = DIV_ROUND_CLOSEST(360, parent_div); + return delay * step; +} + +static int ccu_phase_set_phase(struct clk_hw *hw, int degrees) +{ + struct ccu_phase *phase = hw_to_ccu_phase(hw); + struct clk_hw *parent, *grandparent; + unsigned int parent_rate, grandparent_rate; + unsigned long flags; + u32 reg; + u8 delay; + + /* Get our parent clock, it's the one that can adjust its rate */ + parent = clk_hw_get_parent(hw); + if (!parent) + return -EINVAL; + + /* And its rate */ + parent_rate = clk_hw_get_rate(parent); + if (!parent_rate) + return -EINVAL; + + /* Now, get our parent's parent (most likely some PLL) */ + grandparent = clk_hw_get_parent(parent); + if (!grandparent) + return -EINVAL; + + /* And its rate */ + grandparent_rate = clk_hw_get_rate(grandparent); + if (!grandparent_rate) + return -EINVAL; + + if (degrees != 180) { + u16 step, parent_div; + + /* Get our parent divider */ + parent_div = grandparent_rate / parent_rate; + + /* + * We can only outphase the clocks by multiple of the + * PLL's period. + * + * Since our parent clock is only a divider, and the + * formula to get the outphasing in degrees is deg = + * 360 * delta / period + * + * If we simplify this formula, we can see that the + * only thing that we're concerned about is the number + * of period we want to outphase our clock from, and + * the divider set by our parent clock. + */ + step = DIV_ROUND_CLOSEST(360, parent_div); + delay = DIV_ROUND_CLOSEST(degrees, step); + } else { + delay = 0; + } + + spin_lock_irqsave(phase->common.lock, flags); + reg = readl(phase->common.base + phase->common.reg); + reg &= ~GENMASK(phase->width + phase->shift - 1, phase->shift); + writel(reg | (delay << phase->shift), + phase->common.base + phase->common.reg); + spin_unlock_irqrestore(phase->common.lock, flags); + + return 0; +} + +const struct clk_ops ccu_phase_ops = { + .get_phase = ccu_phase_get_phase, + .set_phase = ccu_phase_set_phase, +}; diff --git a/drivers/clk/sunxi-ng/ccu_phase.h b/drivers/clk/sunxi-ng/ccu_phase.h new file mode 100644 index 000000000000..75a091a4c565 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_phase.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_PHASE_H_ +#define _CCU_PHASE_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +struct ccu_phase { + u8 shift; + u8 width; + + struct ccu_common common; +}; + +#define SUNXI_CCU_PHASE(_struct, _name, _parent, _reg, _shift, _width, _flags) \ + struct ccu_phase _struct = { \ + .shift = _shift, \ + .width = _width, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_phase_ops, \ + _flags), \ + } \ + } + +static inline struct ccu_phase *hw_to_ccu_phase(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_phase, common); +} + +extern const struct clk_ops ccu_phase_ops; + +#endif /* _CCU_PHASE_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c new file mode 100644 index 000000000000..6c31d48783a7 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_reset.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/io.h> +#include <linux/reset-controller.h> + +#include "ccu_reset.h" + +static int ccu_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev); + const struct ccu_reset_map *map = &ccu->reset_map[id]; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(ccu->lock, flags); + + reg = readl(ccu->base + map->reg); + writel(reg & ~map->bit, ccu->base + map->reg); + + spin_unlock_irqrestore(ccu->lock, flags); + + return 0; +} + +static int ccu_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev); + const struct ccu_reset_map *map = &ccu->reset_map[id]; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(ccu->lock, flags); + + reg = readl(ccu->base + map->reg); + writel(reg | map->bit, ccu->base + map->reg); + + spin_unlock_irqrestore(ccu->lock, flags); + + return 0; +} + +const struct reset_control_ops ccu_reset_ops = { + .assert = ccu_reset_assert, + .deassert = ccu_reset_deassert, +}; diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h new file mode 100644 index 000000000000..36a4679210bd --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu_reset.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2016 Maxime Ripard. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_RESET_H_ +#define _CCU_RESET_H_ + +#include <linux/reset-controller.h> + +struct ccu_reset_map { + u16 reg; + u32 bit; +}; + + +struct ccu_reset { + void __iomem *base; + struct ccu_reset_map *reset_map; + spinlock_t *lock; + + struct reset_controller_dev rcdev; +}; + +static inline struct ccu_reset *rcdev_to_ccu_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct ccu_reset, rcdev); +} + +extern const struct reset_control_ops ccu_reset_ops; + +#endif /* _CCU_RESET_H_ */ diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c index ddefe9668863..dfe5e3e32d28 100644 --- a/drivers/clk/sunxi/clk-factors.c +++ b/drivers/clk/sunxi/clk-factors.c @@ -12,7 +12,6 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/string.h> diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c index 68021fa5ecd9..09cdb9874636 100644 --- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c +++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c @@ -9,7 +9,7 @@ */ #include <linux/clk-provider.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> @@ -33,7 +33,6 @@ static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = { { .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_gates_clk_dt_ids); static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev) { @@ -102,8 +101,4 @@ static struct platform_driver sun6i_a31_apb0_gates_clk_driver = { }, .probe = sun6i_a31_apb0_gates_clk_probe, }; -module_platform_driver(sun6i_a31_apb0_gates_clk_driver); - -MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>"); -MODULE_DESCRIPTION("Allwinner A31 APB0 gate clocks driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(sun6i_a31_apb0_gates_clk_driver); diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c index e703e1895b76..b9c8d359288c 100644 --- a/drivers/clk/sunxi/clk-sun6i-apb0.c +++ b/drivers/clk/sunxi/clk-sun6i-apb0.c @@ -9,7 +9,7 @@ */ #include <linux/clk-provider.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -61,7 +61,6 @@ static const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-apb0-clk" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_clk_dt_ids); static struct platform_driver sun6i_a31_apb0_clk_driver = { .driver = { @@ -70,8 +69,4 @@ static struct platform_driver sun6i_a31_apb0_clk_driver = { }, .probe = sun6i_a31_apb0_clk_probe, }; -module_platform_driver(sun6i_a31_apb0_clk_driver); - -MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>"); -MODULE_DESCRIPTION("Allwinner A31 APB0 clock Driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(sun6i_a31_apb0_clk_driver); diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c index 84a187e55360..64ca3e9e38e6 100644 --- a/drivers/clk/sunxi/clk-sun6i-ar100.c +++ b/drivers/clk/sunxi/clk-sun6i-ar100.c @@ -10,7 +10,7 @@ #include <linux/bitops.h> #include <linux/clk-provider.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -91,32 +91,17 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev) return 0; } -static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct clk *clk = platform_get_drvdata(pdev); - - sunxi_factors_unregister(np, clk); - - return 0; -} - static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-ar100-clk" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sun6i_a31_ar100_clk_dt_ids); static struct platform_driver sun6i_a31_ar100_clk_driver = { .driver = { .name = "sun6i-a31-ar100-clk", .of_match_table = sun6i_a31_ar100_clk_dt_ids, + .suppress_bind_attrs = true, }, .probe = sun6i_a31_ar100_clk_probe, - .remove = sun6i_a31_ar100_clk_remove, }; -module_platform_driver(sun6i_a31_ar100_clk_driver); - -MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>"); -MODULE_DESCRIPTION("Allwinner A31 AR100 clock Driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(sun6i_a31_ar100_clk_driver); diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c index 2ea61debffc1..a5666e1d0ce7 100644 --- a/drivers/clk/sunxi/clk-sun8i-apb0.c +++ b/drivers/clk/sunxi/clk-sun8i-apb0.c @@ -15,7 +15,7 @@ */ #include <linux/clk-provider.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> @@ -108,7 +108,6 @@ static const struct of_device_id sun8i_a23_apb0_clk_dt_ids[] = { { .compatible = "allwinner,sun8i-a23-apb0-clk" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sun8i_a23_apb0_clk_dt_ids); static struct platform_driver sun8i_a23_apb0_clk_driver = { .driver = { @@ -117,8 +116,4 @@ static struct platform_driver sun8i_a23_apb0_clk_driver = { }, .probe = sun8i_a23_apb0_clk_probe, }; -module_platform_driver(sun8i_a23_apb0_clk_driver); - -MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); -MODULE_DESCRIPTION("Allwinner A23 APB0 clock Driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(sun8i_a23_apb0_clk_driver); diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 716737388b7d..6041bdba2e97 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -16,7 +16,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/reset.h> @@ -183,39 +183,17 @@ err_clk_register: return ret; } -static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev); - struct clk_onecell_data *clk_data = &data->clk_data; - int i; - - reset_controller_unregister(&data->rcdev); - of_clk_del_provider(np); - for (i = 0; i < clk_data->clk_num; i++) - clk_unregister(clk_data->clks[i]); - - reset_control_assert(data->reset); - - return 0; -} - static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = { { .compatible = "allwinner,sun9i-a80-mmc-config-clk" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, sun9i_a80_mmc_config_clk_dt_ids); static struct platform_driver sun9i_a80_mmc_config_clk_driver = { .driver = { .name = "sun9i-a80-mmc-config-clk", + .suppress_bind_attrs = true, .of_match_table = sun9i_a80_mmc_config_clk_dt_ids, }, .probe = sun9i_a80_mmc_config_clk_probe, - .remove = sun9i_a80_mmc_config_clk_remove, }; -module_platform_driver(sun9i_a80_mmc_config_clk_driver); - -MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); -MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(sun9i_a80_mmc_config_clk_driver); diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 36c974916d4f..5738635c5274 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -238,7 +238,6 @@ enum clk_id { tegra_clk_sor0, tegra_clk_sor0_lvds, tegra_clk_sor1, - tegra_clk_sor1_brick, tegra_clk_sor1_src, tegra_clk_spdif, tegra_clk_spdif_2x, diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 4e194ecc8d5e..b3855360d6bc 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -191,6 +191,53 @@ #define PLLSS_REF_SRC_SEL_SHIFT 25 #define PLLSS_REF_SRC_SEL_MASK (3 << PLLSS_REF_SRC_SEL_SHIFT) +#define UTMIP_PLL_CFG1 0x484 +#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) +#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) +#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) +#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) +#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15) +#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) +#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17) + +#define UTMIP_PLL_CFG2 0x488 +#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6) +#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP BIT(1) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP BIT(3) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP BIT(5) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN BIT(24) +#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP BIT(25) +#define UTMIP_PLL_CFG2_PHY_XTAL_CLOCKEN BIT(30) + +#define UTMIPLL_HW_PWRDN_CFG0 0x52c +#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0) +#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1) +#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) +#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4) +#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5) +#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) +#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) +#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25) + +#define PLLU_HW_PWRDN_CFG0 0x530 +#define PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL BIT(0) +#define PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) +#define PLLU_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) +#define PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT BIT(7) +#define PLLU_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) +#define PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE BIT(28) + +#define XUSB_PLL_CFG0 0x534 +#define XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY 0x3ff +#define XUSB_PLL_CFG0_PLLU_LOCK_DLY (0x3ff << 14) + +#define PLLU_BASE_CLKENABLE_USB BIT(21) +#define PLLU_BASE_OVERRIDE BIT(24) + #define pll_readl(offset, p) readl_relaxed(p->clk_base + offset) #define pll_readl_base(p) pll_readl(p->params->base_reg, p) #define pll_readl_misc(p) pll_readl(p->params->misc_reg, p) @@ -973,6 +1020,133 @@ const struct clk_ops tegra_clk_plle_ops = { .enable = clk_plle_enable, }; +/* + * Structure defining the fields for USB UTMI clocks Parameters. + */ +struct utmi_clk_param { + /* Oscillator Frequency in Hz */ + u32 osc_frequency; + /* UTMIP PLL Enable Delay Count */ + u8 enable_delay_count; + /* UTMIP PLL Stable count */ + u8 stable_count; + /* UTMIP PLL Active delay count */ + u8 active_delay_count; + /* UTMIP PLL Xtal frequency count */ + u8 xtal_freq_count; +}; + +static const struct utmi_clk_param utmi_parameters[] = { + { + .osc_frequency = 13000000, .enable_delay_count = 0x02, + .stable_count = 0x33, .active_delay_count = 0x05, + .xtal_freq_count = 0x7f + }, { + .osc_frequency = 19200000, .enable_delay_count = 0x03, + .stable_count = 0x4b, .active_delay_count = 0x06, + .xtal_freq_count = 0xbb + }, { + .osc_frequency = 12000000, .enable_delay_count = 0x02, + .stable_count = 0x2f, .active_delay_count = 0x04, + .xtal_freq_count = 0x76 + }, { + .osc_frequency = 26000000, .enable_delay_count = 0x04, + .stable_count = 0x66, .active_delay_count = 0x09, + .xtal_freq_count = 0xfe + }, { + .osc_frequency = 16800000, .enable_delay_count = 0x03, + .stable_count = 0x41, .active_delay_count = 0x0a, + .xtal_freq_count = 0xa4 + }, { + .osc_frequency = 38400000, .enable_delay_count = 0x0, + .stable_count = 0x0, .active_delay_count = 0x6, + .xtal_freq_count = 0x80 + }, +}; + +static int clk_pllu_enable(struct clk_hw *hw) +{ + struct tegra_clk_pll *pll = to_clk_pll(hw); + struct clk_hw *pll_ref = clk_hw_get_parent(hw); + struct clk_hw *osc = clk_hw_get_parent(pll_ref); + const struct utmi_clk_param *params = NULL; + unsigned long flags = 0, input_rate; + unsigned int i; + int ret = 0; + u32 value; + + if (!osc) { + pr_err("%s: failed to get OSC clock\n", __func__); + return -EINVAL; + } + + input_rate = clk_hw_get_rate(osc); + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + _clk_pll_enable(hw); + + ret = clk_pll_wait_for_lock(pll); + if (ret < 0) + goto out; + + for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { + if (input_rate == utmi_parameters[i].osc_frequency) { + params = &utmi_parameters[i]; + break; + } + } + + if (!params) { + pr_err("%s: unexpected input rate %lu Hz\n", __func__, + input_rate); + ret = -EINVAL; + goto out; + } + + value = pll_readl_base(pll); + value &= ~PLLU_BASE_OVERRIDE; + pll_writel_base(value, pll); + + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); + /* Program UTMIP PLL stable and active counts */ + value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); + value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count); + value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count); + /* Remove power downs from UTMIP PLL control bits */ + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); + + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + /* Program UTMIP PLL delay and oscillator frequency counts */ + value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count); + value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); + value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count); + /* Remove power downs from UTMIP PLL control bits */ + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; + value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); + +out: + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + return ret; +} + +static const struct clk_ops tegra_clk_pllu_ops = { + .is_enabled = clk_pll_is_enabled, + .enable = clk_pllu_enable, + .disable = clk_pll_disable, + .recalc_rate = clk_pll_recalc_rate, +}; + static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, unsigned long parent_rate) { @@ -1505,6 +1679,112 @@ static void clk_plle_tegra114_disable(struct clk_hw *hw) if (pll->lock) spin_unlock_irqrestore(pll->lock, flags); } + +static int clk_pllu_tegra114_enable(struct clk_hw *hw) +{ + struct tegra_clk_pll *pll = to_clk_pll(hw); + const struct utmi_clk_param *params = NULL; + struct clk *osc = __clk_lookup("osc"); + unsigned long flags = 0, input_rate; + unsigned int i; + int ret = 0; + u32 value; + + if (!osc) { + pr_err("%s: failed to get OSC clock\n", __func__); + return -EINVAL; + } + + input_rate = clk_hw_get_rate(__clk_get_hw(osc)); + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + _clk_pll_enable(hw); + + ret = clk_pll_wait_for_lock(pll); + if (ret < 0) + goto out; + + for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { + if (input_rate == utmi_parameters[i].osc_frequency) { + params = &utmi_parameters[i]; + break; + } + } + + if (!params) { + pr_err("%s: unexpected input rate %lu Hz\n", __func__, + input_rate); + ret = -EINVAL; + goto out; + } + + value = pll_readl_base(pll); + value &= ~PLLU_BASE_OVERRIDE; + pll_writel_base(value, pll); + + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); + /* Program UTMIP PLL stable and active counts */ + value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); + value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count); + value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count); + /* Remove power downs from UTMIP PLL control bits */ + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); + + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + /* Program UTMIP PLL delay and oscillator frequency counts */ + value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count); + value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); + value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count); + /* Remove power downs from UTMIP PLL control bits */ + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; + value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; + value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); + + /* Setup HW control of UTMIPLL */ + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + value |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; + value &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; + value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); + + udelay(1); + + /* + * Setup SW override of UTMIPLL assuming USB2.0 ports are assigned + * to USB2 + */ + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + value |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL; + value &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + + udelay(1); + + /* Enable HW control of UTMIPLL */ + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + +out: + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + return ret; +} #endif static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base, @@ -1614,6 +1894,27 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, return clk; } +struct clk *tegra_clk_register_pllu(const char *name, const char *parent_name, + void __iomem *clk_base, unsigned long flags, + struct tegra_clk_pll_params *pll_params, spinlock_t *lock) +{ + struct tegra_clk_pll *pll; + struct clk *clk; + + pll_params->flags |= TEGRA_PLLU; + + pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); + if (IS_ERR(pll)) + return ERR_CAST(pll); + + clk = _tegra_clk_register_pll(pll, name, parent_name, flags, + &tegra_clk_pllu_ops); + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} + #if defined(CONFIG_ARCH_TEGRA_114_SOC) || \ defined(CONFIG_ARCH_TEGRA_124_SOC) || \ defined(CONFIG_ARCH_TEGRA_132_SOC) || \ @@ -1652,6 +1953,12 @@ static const struct clk_ops tegra_clk_plle_tegra114_ops = { .recalc_rate = clk_pll_recalc_rate, }; +static const struct clk_ops tegra_clk_pllu_tegra114_ops = { + .is_enabled = clk_pll_is_enabled, + .enable = clk_pllu_tegra114_enable, + .disable = clk_pll_disable, + .recalc_rate = clk_pll_recalc_rate, +}; struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name, void __iomem *clk_base, void __iomem *pmc, @@ -1919,6 +2226,29 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name, return clk; } + +struct clk * +tegra_clk_register_pllu_tegra114(const char *name, const char *parent_name, + void __iomem *clk_base, unsigned long flags, + struct tegra_clk_pll_params *pll_params, + spinlock_t *lock) +{ + struct tegra_clk_pll *pll; + struct clk *clk; + + pll_params->flags |= TEGRA_PLLU; + + pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); + if (IS_ERR(pll)) + return ERR_CAST(pll); + + clk = _tegra_clk_register_pll(pll, name, parent_name, flags, + &tegra_clk_pllu_tegra114_ops); + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} #endif #if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC) @@ -2187,6 +2517,152 @@ static int clk_plle_tegra210_is_enabled(struct clk_hw *hw) return val & PLLE_BASE_ENABLE ? 1 : 0; } +static int clk_pllu_tegra210_enable(struct clk_hw *hw) +{ + struct tegra_clk_pll *pll = to_clk_pll(hw); + struct clk_hw *pll_ref = clk_hw_get_parent(hw); + struct clk_hw *osc = clk_hw_get_parent(pll_ref); + const struct utmi_clk_param *params = NULL; + unsigned long flags = 0, input_rate; + unsigned int i; + int ret = 0; + u32 value; + + if (!osc) { + pr_err("%s: failed to get OSC clock\n", __func__); + return -EINVAL; + } + + input_rate = clk_hw_get_rate(osc); + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + _clk_pll_enable(hw); + + ret = clk_pll_wait_for_lock(pll); + if (ret < 0) + goto out; + + for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { + if (input_rate == utmi_parameters[i].osc_frequency) { + params = &utmi_parameters[i]; + break; + } + } + + if (!params) { + pr_err("%s: unexpected input rate %lu Hz\n", __func__, + input_rate); + ret = -EINVAL; + goto out; + } + + value = pll_readl_base(pll); + value &= ~PLLU_BASE_OVERRIDE; + pll_writel_base(value, pll); + + /* Put PLLU under HW control */ + value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0); + value |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE | + PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT | + PLLU_HW_PWRDN_CFG0_USE_LOCKDET; + value &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL | + PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL); + writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0); + + value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0); + value &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY; + writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0); + + udelay(1); + + value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0); + value |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE; + writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0); + + udelay(1); + + /* Disable PLLU clock branch to UTMIPLL since it uses OSC */ + value = pll_readl_base(pll); + value &= ~PLLU_BASE_CLKENABLE_USB; + pll_writel_base(value, pll); + + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + if (value & UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE) { + pr_debug("UTMIPLL already enabled\n"); + goto out; + } + + value &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + + /* Program UTMIP PLL stable and active counts */ + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); + value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); + value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count); + value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count); + value |= UTMIP_PLL_CFG2_PHY_XTAL_CLOCKEN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); + + /* Program UTMIP PLL delay and oscillator frequency counts */ + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); + value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count); + value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); + value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count); + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); + + /* Remove power downs from UTMIP PLL control bits */ + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + value |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; + writel(value, pll->clk_base + UTMIP_PLL_CFG1); + + udelay(1); + + /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */ + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2); + value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; + value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; + value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; + value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2); + + /* Setup HW control of UTMIPLL */ + value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1); + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; + value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; + writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1); + + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + value |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; + value &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + + udelay(1); + + value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0); + value &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY; + writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0); + + udelay(1); + + /* Enable HW control of UTMIPLL */ + value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; + writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0); + +out: + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + return ret; +} + static const struct clk_ops tegra_clk_plle_tegra210_ops = { .is_enabled = clk_plle_tegra210_is_enabled, .enable = clk_plle_tegra210_enable, @@ -2194,6 +2670,13 @@ static const struct clk_ops tegra_clk_plle_tegra210_ops = { .recalc_rate = clk_pll_recalc_rate, }; +static const struct clk_ops tegra_clk_pllu_tegra210_ops = { + .is_enabled = clk_pll_is_enabled, + .enable = clk_pllu_tegra210_enable, + .disable = clk_pll_disable, + .recalc_rate = clk_pllre_recalc_rate, +}; + struct clk *tegra_clk_register_plle_tegra210(const char *name, const char *parent_name, void __iomem *clk_base, unsigned long flags, @@ -2434,4 +2917,26 @@ struct clk *tegra_clk_register_pllmb(const char *name, const char *parent_name, return clk; } + +struct clk *tegra_clk_register_pllu_tegra210(const char *name, + const char *parent_name, void __iomem *clk_base, + unsigned long flags, struct tegra_clk_pll_params *pll_params, + spinlock_t *lock) +{ + struct tegra_clk_pll *pll; + struct clk *clk; + + pll_params->flags |= TEGRA_PLLU; + + pll = _tegra_init_pll(clk_base, NULL, pll_params, lock); + if (IS_ERR(pll)) + return ERR_CAST(pll); + + clk = _tegra_clk_register_pll(pll, name, parent_name, flags, + &tegra_clk_pllu_tegra210_ops); + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} #endif diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 29d04c663abf..4ce4e7fb1124 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -594,15 +594,17 @@ static u32 mux_pllp_plld_plld2_clkm_idx[] = { [0] = 0, [1] = 2, [2] = 5, [3] = 6 }; -static const char *mux_plldp_sor1_src[] = { - "pll_dp", "clk_sor1_src" -}; -#define mux_plldp_sor1_src_idx NULL - -static const char *mux_clkm_sor1_brick_sor1_src[] = { - "clk_m", "sor1_brick", "sor1_src", "sor1_brick" -}; -#define mux_clkm_sor1_brick_sor1_src_idx NULL +static const char *mux_sor_safe_sor1_brick_sor1_src[] = { + /* + * Bit 0 of the mux selects sor1_brick, irrespective of bit 1, so the + * sor1_brick parent appears twice in the list below. This is merely + * to support clk_get_parent() if firmware happened to set these bits + * to 0b11. While not an invalid setting, code should always set the + * bits to 0b01 to select sor1_brick. + */ + "sor_safe", "sor1_brick", "sor1_src", "sor1_brick" +}; +#define mux_sor_safe_sor1_brick_sor1_src_idx NULL static const char *mux_pllp_pllre_clkm[] = { "pll_p", "pll_re_out1", "clk_m" @@ -778,8 +780,7 @@ static struct tegra_periph_init_data periph_clks[] = { MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), MUX8_NOGATE_LOCK("sor1_src", mux_pllp_plld_plld2_clkm, CLK_SOURCE_SOR1, tegra_clk_sor1_src, &sor1_lock), - NODIV("sor1_brick", mux_plldp_sor1_src, CLK_SOURCE_SOR1, 14, MASK(1), 183, 0, tegra_clk_sor1_brick, &sor1_lock), - NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), + NODIV("sor1", mux_sor_safe_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 14, MASK(2), 183, 0, tegra_clk_sor1, &sor1_lock), MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c), @@ -791,7 +792,7 @@ static struct tegra_periph_init_data periph_clks[] = { static struct tegra_periph_init_data gate_clks[] = { GATE("rtc", "clk_32k", 4, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_rtc, 0), - GATE("timer", "clk_m", 5, 0, tegra_clk_timer, 0), + GATE("timer", "clk_m", 5, 0, tegra_clk_timer, CLK_IS_CRITICAL), GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0), GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0), GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0), diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index b78054fac0a8..64da7b79a6e4 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -113,32 +113,6 @@ #define CCLKG_BURST_POLICY 0x368 -#define UTMIP_PLL_CFG2 0x488 -#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6) -#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) - -#define UTMIP_PLL_CFG1 0x484 -#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6) -#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) -#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) - -#define UTMIPLL_HW_PWRDN_CFG0 0x52c -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) -#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4) -#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) -#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1) -#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0) - #define CLK_SOURCE_CSITE 0x1d4 #define CLK_SOURCE_EMC 0x19c @@ -649,43 +623,6 @@ static unsigned long tegra114_input_freq[] = { #define MASK(x) (BIT(x) - 1) -struct utmi_clk_param { - /* Oscillator Frequency in KHz */ - u32 osc_frequency; - /* UTMIP PLL Enable Delay Count */ - u8 enable_delay_count; - /* UTMIP PLL Stable count */ - u8 stable_count; - /* UTMIP PLL Active delay count */ - u8 active_delay_count; - /* UTMIP PLL Xtal frequency count */ - u8 xtal_freq_count; -}; - -static const struct utmi_clk_param utmi_parameters[] = { - { - .osc_frequency = 13000000, .enable_delay_count = 0x02, - .stable_count = 0x33, .active_delay_count = 0x05, - .xtal_freq_count = 0x7f - }, { - .osc_frequency = 19200000, .enable_delay_count = 0x03, - .stable_count = 0x4b, .active_delay_count = 0x06, - .xtal_freq_count = 0xbb - }, { - .osc_frequency = 12000000, .enable_delay_count = 0x02, - .stable_count = 0x2f, .active_delay_count = 0x04, - .xtal_freq_count = 0x76 - }, { - .osc_frequency = 26000000, .enable_delay_count = 0x04, - .stable_count = 0x66, .active_delay_count = 0x09, - .xtal_freq_count = 0xfe - }, { - .osc_frequency = 16800000, .enable_delay_count = 0x03, - .stable_count = 0x41, .active_delay_count = 0x0a, - .xtal_freq_count = 0xa4 - }, -}; - /* peripheral mux definitions */ static const char *mux_plld_out0_plld2_out0[] = { @@ -986,92 +923,9 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base) } -static __init void tegra114_utmi_param_configure(void __iomem *clk_base) -{ - unsigned int i; - u32 reg; - - for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { - if (osc_freq == utmi_parameters[i].osc_frequency) - break; - } - - if (i >= ARRAY_SIZE(utmi_parameters)) { - pr_err("%s: Unexpected oscillator freq %lu\n", __func__, - osc_freq); - return; - } - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL stable and active counts */ - /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ - reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); - reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count); - - reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i]. - active_delay_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; - - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL delay and oscillator frequency counts */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i]. - enable_delay_count); - - reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); - reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i]. - xtal_freq_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - /* Setup HW control of UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; - reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; - reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - udelay(1); - - /* Setup SW override of UTMIPLL assuming USB2.0 - ports are assigned to USB2 */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL; - reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - udelay(1); - - /* Enable HW control UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); -} - static void __init tegra114_pll_init(void __iomem *clk_base, void __iomem *pmc) { - u32 val; struct clk *clk; /* PLLC */ @@ -1118,16 +972,10 @@ static void __init tegra114_pll_init(void __iomem *clk_base, CLK_SET_RATE_PARENT, 1, 1); /* PLLU */ - val = readl(clk_base + pll_u_params.base_reg); - val &= ~BIT(24); /* disable PLLU_OVERRIDE */ - writel(val, clk_base + pll_u_params.base_reg); - - clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0, - &pll_u_params, &pll_u_lock); + clk = tegra_clk_register_pllu_tegra114("pll_u", "pll_ref", clk_base, 0, + &pll_u_params, &pll_u_lock); clks[TEGRA114_CLK_PLL_U] = clk; - tegra114_utmi_param_configure(clk_base); - /* PLLU_480M */ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index f4fbbf16a056..a112d3d2bff1 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -99,32 +99,6 @@ #define CCLKG_BURST_POLICY 0x368 -#define UTMIP_PLL_CFG2 0x488 -#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6) -#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) - -#define UTMIP_PLL_CFG1 0x484 -#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6) -#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) -#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) - -#define UTMIPLL_HW_PWRDN_CFG0 0x52c -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) -#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5) -#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4) -#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) -#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1) -#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0) - /* Tegra CPU clock and reset control regs */ #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470 @@ -764,43 +738,6 @@ static struct tegra_clk_pll_params pll_u_params = { TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, }; -struct utmi_clk_param { - /* Oscillator Frequency in KHz */ - u32 osc_frequency; - /* UTMIP PLL Enable Delay Count */ - u8 enable_delay_count; - /* UTMIP PLL Stable count */ - u8 stable_count; - /* UTMIP PLL Active delay count */ - u8 active_delay_count; - /* UTMIP PLL Xtal frequency count */ - u8 xtal_freq_count; -}; - -static const struct utmi_clk_param utmi_parameters[] = { - { - .osc_frequency = 13000000, .enable_delay_count = 0x02, - .stable_count = 0x33, .active_delay_count = 0x05, - .xtal_freq_count = 0x7f - }, { - .osc_frequency = 19200000, .enable_delay_count = 0x03, - .stable_count = 0x4b, .active_delay_count = 0x06, - .xtal_freq_count = 0xbb - }, { - .osc_frequency = 12000000, .enable_delay_count = 0x02, - .stable_count = 0x2f, .active_delay_count = 0x04, - .xtal_freq_count = 0x76 - }, { - .osc_frequency = 26000000, .enable_delay_count = 0x04, - .stable_count = 0x66, .active_delay_count = 0x09, - .xtal_freq_count = 0xfe - }, { - .osc_frequency = 16800000, .enable_delay_count = 0x03, - .stable_count = 0x41, .active_delay_count = 0x0a, - .xtal_freq_count = 0xa4 - }, -}; - static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_ispb] = { .dt_id = TEGRA124_CLK_ISPB, .present = true }, [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, @@ -1063,88 +1000,6 @@ static struct tegra_devclk devclks[] __initdata = { static struct clk **clks; -static void tegra124_utmi_param_configure(void __iomem *clk_base) -{ - unsigned int i; - u32 reg; - - for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { - if (osc_freq == utmi_parameters[i].osc_frequency) - break; - } - - if (i >= ARRAY_SIZE(utmi_parameters)) { - pr_err("%s: Unexpected oscillator freq %lu\n", __func__, - osc_freq); - return; - } - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL stable and active counts */ - /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ - reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); - reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count); - - reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i]. - active_delay_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; - - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL delay and oscillator frequency counts */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i]. - enable_delay_count); - - reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); - reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i]. - xtal_freq_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - /* Setup HW control of UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; - reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; - reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - udelay(1); - - /* Setup SW override of UTMIPLL assuming USB2.0 - ports are assigned to USB2 */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL; - reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - udelay(1); - - /* Enable HW control UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); -} - static __init void tegra124_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { @@ -1195,7 +1050,6 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base, static void __init tegra124_pll_init(void __iomem *clk_base, void __iomem *pmc) { - u32 val; struct clk *clk; /* PLLC */ @@ -1256,17 +1110,11 @@ static void __init tegra124_pll_init(void __iomem *clk_base, clks[TEGRA124_CLK_PLL_M_UD] = clk; /* PLLU */ - val = readl(clk_base + pll_u_params.base_reg); - val &= ~BIT(24); /* disable PLLU_OVERRIDE */ - writel(val, clk_base + pll_u_params.base_reg); - - clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0, - &pll_u_params, &pll_u_lock); + clk = tegra_clk_register_pllu_tegra114("pll_u", "pll_ref", clk_base, 0, + &pll_u_params, &pll_u_lock); clk_register_clkdev(clk, "pll_u", NULL); clks[TEGRA124_CLK_PLL_U] = clk; - tegra124_utmi_param_configure(clk_base); - /* PLLU_480M */ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 456cf586d2c2..2896d2e783ce 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -155,27 +155,6 @@ #define PMC_PLLM_WB0_OVERRIDE 0x1dc #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0 -#define UTMIP_PLL_CFG2 0x488 -#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6) -#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP BIT(1) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP BIT(3) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP BIT(5) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN BIT(24) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP BIT(25) - -#define UTMIP_PLL_CFG1 0x484 -#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) -#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) -#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) - #define SATA_PLL_CFG0 0x490 #define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0) #define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2) @@ -1366,9 +1345,9 @@ static u32 pll_expo_p_to_pdiv(u32 p, u32 *pdiv) static struct tegra_clk_pll_freq_table pll_x_freq_table[] = { /* 1 GHz */ - { 12000000, 1000000000, 166, 1, 1, 0 }, /* actual: 996.0 MHz */ - { 13000000, 1000000000, 153, 1, 1, 0 }, /* actual: 994.0 MHz */ - { 38400000, 1000000000, 156, 3, 1, 0 }, /* actual: 998.4 MHz */ + { 12000000, 1000000000, 166, 1, 2, 0 }, /* actual: 996.0 MHz */ + { 13000000, 1000000000, 153, 1, 2, 0 }, /* actual: 994.0 MHz */ + { 38400000, 1000000000, 156, 3, 2, 0 }, /* actual: 998.4 MHz */ { 0, 0, 0, 0, 0, 0 }, }; @@ -1417,9 +1396,9 @@ static struct div_nmp pllc_nmp = { }; static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = { - { 12000000, 510000000, 85, 1, 1, 0 }, - { 13000000, 510000000, 78, 1, 1, 0 }, /* actual: 507.0 MHz */ - { 38400000, 510000000, 79, 3, 1, 0 }, /* actual: 505.6 MHz */ + { 12000000, 510000000, 85, 1, 2, 0 }, + { 13000000, 510000000, 78, 1, 2, 0 }, /* actual: 507.0 MHz */ + { 38400000, 510000000, 79, 3, 2, 0 }, /* actual: 505.6 MHz */ { 0, 0, 0, 0, 0, 0 }, }; @@ -1532,9 +1511,9 @@ static struct div_nmp pllss_nmp = { }; static struct tegra_clk_pll_freq_table pll_c4_vco_freq_table[] = { - { 12000000, 600000000, 50, 1, 0, 0 }, - { 13000000, 600000000, 46, 1, 0, 0 }, /* actual: 598.0 MHz */ - { 38400000, 600000000, 62, 4, 0, 0 }, /* actual: 595.2 MHz */ + { 12000000, 600000000, 50, 1, 1, 0 }, + { 13000000, 600000000, 46, 1, 1, 0 }, /* actual: 598.0 MHz */ + { 38400000, 600000000, 62, 4, 1, 0 }, /* actual: 595.2 MHz */ { 0, 0, 0, 0, 0, 0 }, }; @@ -1583,19 +1562,19 @@ static struct tegra_clk_pll_params pll_c4_vco_params = { }; static struct tegra_clk_pll_freq_table pll_m_freq_table[] = { - { 12000000, 800000000, 66, 1, 0, 0 }, /* actual: 792.0 MHz */ - { 13000000, 800000000, 61, 1, 0, 0 }, /* actual: 793.0 MHz */ - { 38400000, 297600000, 93, 4, 2, 0 }, - { 38400000, 400000000, 125, 4, 2, 0 }, - { 38400000, 532800000, 111, 4, 1, 0 }, - { 38400000, 665600000, 104, 3, 1, 0 }, - { 38400000, 800000000, 125, 3, 1, 0 }, - { 38400000, 931200000, 97, 4, 0, 0 }, - { 38400000, 1065600000, 111, 4, 0, 0 }, - { 38400000, 1200000000, 125, 4, 0, 0 }, - { 38400000, 1331200000, 104, 3, 0, 0 }, - { 38400000, 1459200000, 76, 2, 0, 0 }, - { 38400000, 1600000000, 125, 3, 0, 0 }, + { 12000000, 800000000, 66, 1, 1, 0 }, /* actual: 792.0 MHz */ + { 13000000, 800000000, 61, 1, 1, 0 }, /* actual: 793.0 MHz */ + { 38400000, 297600000, 93, 4, 3, 0 }, + { 38400000, 400000000, 125, 4, 3, 0 }, + { 38400000, 532800000, 111, 4, 2, 0 }, + { 38400000, 665600000, 104, 3, 2, 0 }, + { 38400000, 800000000, 125, 3, 2, 0 }, + { 38400000, 931200000, 97, 4, 1, 0 }, + { 38400000, 1065600000, 111, 4, 1, 0 }, + { 38400000, 1200000000, 125, 4, 1, 0 }, + { 38400000, 1331200000, 104, 3, 1, 0 }, + { 38400000, 1459200000, 76, 2, 1, 0 }, + { 38400000, 1600000000, 125, 3, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -1705,9 +1684,9 @@ static struct tegra_clk_pll_params pll_e_params = { }; static struct tegra_clk_pll_freq_table pll_re_vco_freq_table[] = { - { 12000000, 672000000, 56, 1, 0, 0 }, - { 13000000, 672000000, 51, 1, 0, 0 }, /* actual: 663.0 MHz */ - { 38400000, 672000000, 70, 4, 0, 0 }, + { 12000000, 672000000, 56, 1, 1, 0 }, + { 13000000, 672000000, 51, 1, 1, 0 }, /* actual: 663.0 MHz */ + { 38400000, 672000000, 70, 4, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -1754,8 +1733,8 @@ static struct div_nmp pllp_nmp = { }; static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { - { 12000000, 408000000, 34, 1, 0, 0 }, - { 38400000, 408000000, 85, 8, 0, 0 }, /* cf = 4.8MHz, allowed exception */ + { 12000000, 408000000, 34, 1, 1, 0 }, + { 38400000, 408000000, 85, 8, 1, 0 }, /* cf = 4.8MHz, allowed exception */ { 0, 0, 0, 0, 0, 0 }, }; @@ -1820,14 +1799,14 @@ static struct div_nmp plla_nmp = { }; static struct tegra_clk_pll_freq_table pll_a_freq_table[] = { - { 12000000, 282240000, 47, 1, 1, 1, 0xf148 }, /* actual: 282240234 */ - { 12000000, 368640000, 61, 1, 1, 1, 0xfe15 }, /* actual: 368640381 */ - { 12000000, 240000000, 60, 1, 2, 1, 0 }, - { 13000000, 282240000, 43, 1, 1, 1, 0xfd7d }, /* actual: 282239807 */ - { 13000000, 368640000, 56, 1, 1, 1, 0x06d8 }, /* actual: 368640137 */ - { 13000000, 240000000, 55, 1, 2, 1, 0 }, /* actual: 238.3 MHz */ - { 38400000, 282240000, 44, 3, 1, 1, 0xf333 }, /* actual: 282239844 */ - { 38400000, 368640000, 57, 3, 1, 1, 0x0333 }, /* actual: 368639844 */ + { 12000000, 282240000, 47, 1, 2, 1, 0xf148 }, /* actual: 282240234 */ + { 12000000, 368640000, 61, 1, 2, 1, 0xfe15 }, /* actual: 368640381 */ + { 12000000, 240000000, 60, 1, 3, 1, 0 }, + { 13000000, 282240000, 43, 1, 2, 1, 0xfd7d }, /* actual: 282239807 */ + { 13000000, 368640000, 56, 1, 2, 1, 0x06d8 }, /* actual: 368640137 */ + { 13000000, 240000000, 55, 1, 3, 1, 0 }, /* actual: 238.3 MHz */ + { 38400000, 282240000, 44, 3, 2, 1, 0xf333 }, /* actual: 282239844 */ + { 38400000, 368640000, 57, 3, 2, 1, 0x0333 }, /* actual: 368639844 */ { 38400000, 240000000, 75, 3, 3, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0 }, }; @@ -1873,9 +1852,9 @@ static struct div_nmp plld_nmp = { }; static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { - { 12000000, 594000000, 99, 1, 1, 0, 0 }, - { 13000000, 594000000, 91, 1, 1, 0, 0xfc4f }, /* actual: 594000183 */ - { 38400000, 594000000, 30, 1, 1, 0, 0x0e00 }, + { 12000000, 594000000, 99, 1, 2, 0, 0 }, + { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */ + { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 }, { 0, 0, 0, 0, 0, 0, 0 }, }; @@ -1911,9 +1890,9 @@ static struct tegra_clk_pll_params pll_d_params = { }; static struct tegra_clk_pll_freq_table tegra210_pll_d2_freq_table[] = { - { 12000000, 594000000, 99, 1, 1, 0, 0xf000 }, - { 13000000, 594000000, 91, 1, 1, 0, 0xfc4f }, /* actual: 594000183 */ - { 38400000, 594000000, 30, 1, 1, 0, 0x0e00 }, + { 12000000, 594000000, 99, 1, 2, 0, 0xf000 }, + { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */ + { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 }, { 0, 0, 0, 0, 0, 0, 0 }, }; @@ -1935,8 +1914,9 @@ static struct tegra_clk_pll_params pll_d2_params = { .sdm_din_mask = PLLA_SDM_DIN_MASK, .sdm_ctrl_reg = PLLD2_MISC1, .sdm_ctrl_en_mask = PLLD2_SDM_EN_MASK, - .ssc_ctrl_reg = PLLD2_MISC1, - .ssc_ctrl_en_mask = PLLD2_SSC_EN_MASK, + /* disable spread-spectrum for pll_d2 */ + .ssc_ctrl_reg = 0, + .ssc_ctrl_en_mask = 0, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllss_nmp, @@ -1955,9 +1935,9 @@ static struct tegra_clk_pll_params pll_d2_params = { }; static struct tegra_clk_pll_freq_table pll_dp_freq_table[] = { - { 12000000, 270000000, 90, 1, 3, 0, 0xf000 }, - { 13000000, 270000000, 83, 1, 3, 0, 0xf000 }, /* actual: 269.8 MHz */ - { 38400000, 270000000, 28, 1, 3, 0, 0xf400 }, + { 12000000, 270000000, 90, 1, 4, 0, 0xf000 }, + { 13000000, 270000000, 83, 1, 4, 0, 0xf000 }, /* actual: 269.8 MHz */ + { 38400000, 270000000, 28, 1, 4, 0, 0xf400 }, { 0, 0, 0, 0, 0, 0, 0 }, }; @@ -2007,9 +1987,9 @@ static struct div_nmp pllu_nmp = { }; static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { - { 12000000, 480000000, 40, 1, 0, 0 }, - { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */ - { 38400000, 480000000, 25, 2, 0, 0 }, + { 12000000, 480000000, 40, 1, 1, 0 }, + { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */ + { 38400000, 480000000, 25, 2, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -2037,47 +2017,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = { .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; -struct utmi_clk_param { - /* Oscillator Frequency in KHz */ - u32 osc_frequency; - /* UTMIP PLL Enable Delay Count */ - u8 enable_delay_count; - /* UTMIP PLL Stable count */ - u16 stable_count; - /* UTMIP PLL Active delay count */ - u8 active_delay_count; - /* UTMIP PLL Xtal frequency count */ - u16 xtal_freq_count; -}; - -static const struct utmi_clk_param utmi_parameters[] = { - { - .osc_frequency = 38400000, .enable_delay_count = 0x0, - .stable_count = 0x0, .active_delay_count = 0x6, - .xtal_freq_count = 0x80 - }, { - .osc_frequency = 13000000, .enable_delay_count = 0x02, - .stable_count = 0x33, .active_delay_count = 0x05, - .xtal_freq_count = 0x7f - }, { - .osc_frequency = 19200000, .enable_delay_count = 0x03, - .stable_count = 0x4b, .active_delay_count = 0x06, - .xtal_freq_count = 0xbb - }, { - .osc_frequency = 12000000, .enable_delay_count = 0x02, - .stable_count = 0x2f, .active_delay_count = 0x08, - .xtal_freq_count = 0x76 - }, { - .osc_frequency = 26000000, .enable_delay_count = 0x04, - .stable_count = 0x66, .active_delay_count = 0x09, - .xtal_freq_count = 0xfe - }, { - .osc_frequency = 16800000, .enable_delay_count = 0x03, - .stable_count = 0x41, .active_delay_count = 0x0a, - .xtal_freq_count = 0xa4 - }, -}; - static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_ispb] = { .dt_id = TEGRA210_CLK_ISPB, .present = true }, [tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true }, @@ -2154,6 +2093,8 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true }, [tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true }, [tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true }, + [tegra_clk_sor1] = { .dt_id = TEGRA210_CLK_SOR1, .present = true }, + [tegra_clk_sor1_src] = { .dt_id = TEGRA210_CLK_SOR1_SRC, .present = true }, [tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true }, [tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, }, [tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true }, @@ -2345,114 +2286,6 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = { static struct clk **clks; -static void tegra210_utmi_param_configure(void __iomem *clk_base) -{ - u32 reg; - int i; - - for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { - if (osc_freq == utmi_parameters[i].osc_frequency) - break; - } - - if (i >= ARRAY_SIZE(utmi_parameters)) { - pr_err("%s: Unexpected oscillator freq %lu\n", __func__, - osc_freq); - return; - } - - reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0); - reg |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE | - PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT | - PLLU_HW_PWRDN_CFG0_USE_LOCKDET; - reg &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL | - PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL); - writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0); - - reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0); - reg |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0); - udelay(1); - - reg = readl_relaxed(clk_base + PLLU_BASE); - reg &= ~PLLU_BASE_CLKENABLE_USB; - writel_relaxed(reg, clk_base + PLLU_BASE); - - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - udelay(10); - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL stable and active counts */ - /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */ - reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); - reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count); - - reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i]. - active_delay_count); - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL delay and oscillator frequency counts */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i]. - enable_delay_count); - - reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); - reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i]. - xtal_freq_count); - - reg |= UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - /* Remove power downs from UTMIP PLL control bits */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - udelay(1); - - /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); - reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; - reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; - reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); - - /* Setup HW control of UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); - - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; - reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); - - udelay(1); - - reg = readl_relaxed(clk_base + XUSB_PLL_CFG0); - reg &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY; - writel_relaxed(reg, clk_base + XUSB_PLL_CFG0); - - udelay(1); - - /* Enable HW control UTMIPLL */ - reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); - reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; - writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); -} - static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { @@ -2463,18 +2296,18 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, 1, 2); clks[TEGRA210_CLK_XUSB_SS_DIV2] = clk; - clk = tegra_clk_register_periph_fixed("dpaux", "pll_p", 0, clk_base, + clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base, + 1, 17, 222); + clks[TEGRA210_CLK_SOR_SAFE] = clk; + + clk = tegra_clk_register_periph_fixed("dpaux", "sor_safe", 0, clk_base, 1, 17, 181); clks[TEGRA210_CLK_DPAUX] = clk; - clk = tegra_clk_register_periph_fixed("dpaux1", "pll_p", 0, clk_base, + clk = tegra_clk_register_periph_fixed("dpaux1", "sor_safe", 0, clk_base, 1, 17, 207); clks[TEGRA210_CLK_DPAUX1] = clk; - clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base, - 1, 17, 222); - clks[TEGRA210_CLK_SOR_SAFE] = clk; - /* pll_d_dsi_out */ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0, clk_base + PLLD_MISC0, 21, 0, &pll_d_lock); @@ -2520,7 +2353,6 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, static void __init tegra210_pll_init(void __iomem *clk_base, void __iomem *pmc) { - u32 val; struct clk *clk; /* PLLC */ @@ -2580,12 +2412,9 @@ static void __init tegra210_pll_init(void __iomem *clk_base, clks[TEGRA210_CLK_PLL_M_UD] = clk; /* PLLU_VCO */ - val = readl(clk_base + pll_u_vco_params.base_reg); - val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */ - writel(val, clk_base + pll_u_vco_params.base_reg); - - clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, - 0, &pll_u_vco_params, &pll_u_lock, pll_ref_freq); + clk = tegra_clk_register_pllu_tegra210("pll_u_vco", "pll_ref", + clk_base, 0, &pll_u_vco_params, + &pll_u_lock); clk_register_clkdev(clk, "pll_u_vco", NULL); clks[TEGRA210_CLK_PLL_U] = clk; @@ -2618,8 +2447,6 @@ static void __init tegra210_pll_init(void __iomem *clk_base, clk_register_clkdev(clk, "pll_u_out2", NULL); clks[TEGRA210_CLK_PLL_U_OUT2] = clk; - tegra210_utmi_param_configure(clk_base); - /* PLLU_480M */ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u_vco", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 9396f4930da7..8e2db5ead8da 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -118,20 +118,6 @@ #define AUDIO_SYNC_DOUBLER 0x49c -#define UTMIP_PLL_CFG2 0x488 -#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6) -#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2) -#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4) - -#define UTMIP_PLL_CFG1 0x484 -#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6) -#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) -#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14) -#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12) -#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16) - /* Tegra CPU clock and reset control regs */ #define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c #define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340 @@ -207,46 +193,6 @@ static DEFINE_SPINLOCK(emc_lock); static struct clk **clks; -/* - * Structure defining the fields for USB UTMI clocks Parameters. - */ -struct utmi_clk_param { - /* Oscillator Frequency in KHz */ - u32 osc_frequency; - /* UTMIP PLL Enable Delay Count */ - u8 enable_delay_count; - /* UTMIP PLL Stable count */ - u8 stable_count; - /* UTMIP PLL Active delay count */ - u8 active_delay_count; - /* UTMIP PLL Xtal frequency count */ - u8 xtal_freq_count; -}; - -static const struct utmi_clk_param utmi_parameters[] = { - { - .osc_frequency = 13000000, .enable_delay_count = 0x02, - .stable_count = 0x33, .active_delay_count = 0x05, - .xtal_freq_count = 0x7f - }, { - .osc_frequency = 19200000, .enable_delay_count = 0x03, - .stable_count = 0x4b, .active_delay_count = 0x06, - .xtal_freq_count = 0xbb - }, { - .osc_frequency = 12000000, .enable_delay_count = 0x02, - .stable_count = 0x2f, .active_delay_count = 0x04, - .xtal_freq_count = 0x76 - }, { - .osc_frequency = 26000000, .enable_delay_count = 0x04, - .stable_count = 0x66, .active_delay_count = 0x09, - .xtal_freq_count = 0xfe - }, { - .osc_frequency = 16800000, .enable_delay_count = 0x03, - .stable_count = 0x41, .active_delay_count = 0x0a, - .xtal_freq_count = 0xa4 - }, -}; - static struct tegra_clk_pll_freq_table pll_c_freq_table[] = { { 12000000, 1040000000, 520, 6, 1, 8 }, { 13000000, 1040000000, 480, 6, 1, 8 }, @@ -873,59 +819,6 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true }, }; -static void tegra30_utmi_param_configure(void) -{ - unsigned int i; - u32 reg; - - for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) { - if (input_freq == utmi_parameters[i].osc_frequency) - break; - } - - if (i >= ARRAY_SIZE(utmi_parameters)) { - pr_err("%s: Unexpected input rate %lu\n", __func__, input_freq); - return; - } - - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL stable and active counts */ - reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); - reg |= UTMIP_PLL_CFG2_STABLE_COUNT( - utmi_parameters[i].stable_count); - - reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT( - utmi_parameters[i].active_delay_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; - reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; - - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2); - - /* Program UTMIP PLL delay and oscillator frequency counts */ - reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1); - reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); - - reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT( - utmi_parameters[i].enable_delay_count); - - reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); - reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT( - utmi_parameters[i].xtal_freq_count); - - /* Remove power downs from UTMIP PLL control bits */ - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; - reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; - - writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1); -} - static const char *pll_e_parents[] = { "pll_ref", "pll_p" }; static void __init tegra30_pll_init(void) @@ -972,12 +865,10 @@ static void __init tegra30_pll_init(void) clks[TEGRA30_CLK_PLL_X_OUT0] = clk; /* PLLU */ - clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0, - &pll_u_params, NULL); + clk = tegra_clk_register_pllu("pll_u", "pll_ref", clk_base, 0, + &pll_u_params, NULL); clks[TEGRA30_CLK_PLL_U] = clk; - tegra30_utmi_param_configure(); - /* PLLD */ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0, &pll_d_params, &pll_d_lock); diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 9421f0310999..6ba82ecffd4d 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -427,6 +427,23 @@ struct clk *tegra_clk_register_pllmb(const char *name, const char *parent_name, struct tegra_clk_pll_params *pll_params, spinlock_t *lock); +struct clk *tegra_clk_register_pllu(const char *name, const char *parent_name, + void __iomem *clk_base, unsigned long flags, + struct tegra_clk_pll_params *pll_params, + spinlock_t *lock); + +struct clk *tegra_clk_register_pllu_tegra114(const char *name, + const char *parent_name, + void __iomem *clk_base, unsigned long flags, + struct tegra_clk_pll_params *pll_params, + spinlock_t *lock); + +struct clk *tegra_clk_register_pllu_tegra210(const char *name, + const char *parent_name, + void __iomem *clk_base, unsigned long flags, + struct tegra_clk_pll_params *pll_params, + spinlock_t *lock); + /** * struct tegra_clk_pll_out - PLL divider down clock * diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index ef2ec64fe547..0e47d95faf49 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -108,6 +108,9 @@ static struct ti_dt_clk am33xx_clks[] = { DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"), DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"), DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"), + DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"), + DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"), + DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 097fc90bf19a..e816a7500e43 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -58,6 +58,7 @@ static struct ti_dt_clk am43xx_clks[] = { DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"), DT_CLK(NULL, "sha0_fck", "sha0_fck"), DT_CLK(NULL, "aes0_fck", "aes0_fck"), + DT_CLK(NULL, "rng_fck", "rng_fck"), DT_CLK(NULL, "timer1_fck", "timer1_fck"), DT_CLK(NULL, "timer2_fck", "timer2_fck"), DT_CLK(NULL, "timer3_fck", "timer3_fck"), @@ -115,6 +116,12 @@ static struct ti_dt_clk am43xx_clks[] = { DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"), DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"), DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"), + DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"), + DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"), + DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"), + DT_CLK("48306200.pwm", "tbclk", "ehrpwm3_tbclk"), + DT_CLK("48308200.pwm", "tbclk", "ehrpwm4_tbclk"), + DT_CLK("4830a200.pwm", "tbclk", "ehrpwm5_tbclk"), { .node_name = NULL }, }; diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index 9a736d939806..e960d686d9db 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -11,7 +11,6 @@ #include <linux/of_address.h> #include <linux/clk-provider.h> #include <linux/mfd/dbx500-prcmu.h> -#include <linux/platform_data/clk-ux500.h> #include "clk.h" #define PRCC_NUM_PERIPH_CLUSTERS 6 @@ -48,11 +47,6 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, return PRCC_SHOW(clk_data, base, bit); } -static const struct of_device_id u8500_clk_of_match[] = { - { .compatible = "stericsson,u8500-clks", }, - { }, -}; - /* CLKRST4 is missing making it hard to index things */ enum clkrst_index { CLKRST1_INDEX = 0, @@ -63,22 +57,15 @@ enum clkrst_index { CLKRST_MAX, }; -void u8500_clk_init(void) +static void u8500_clk_init(struct device_node *np) { struct prcmu_fw_version *fw_version; - struct device_node *np = NULL; struct device_node *child = NULL; const char *sgaclk_parent = NULL; struct clk *clk, *rtc_clk, *twd_clk; u32 bases[CLKRST_MAX]; int i; - if (of_have_populated_dt()) - np = of_find_matching_node(NULL, u8500_clk_of_match); - if (!np) { - pr_err("Either DT or U8500 Clock node not found\n"); - return; - } for (i = 0; i < ARRAY_SIZE(bases); i++) { struct resource r; @@ -573,3 +560,4 @@ void u8500_clk_init(void) of_clk_add_provider(child, of_clk_src_simple_get, twd_clk); } } +CLK_OF_DECLARE(u8500_clks, "stericsson,u8500-clks", u8500_clk_init); diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c index 86549e59fb42..133859f0e2bf 100644 --- a/drivers/clk/ux500/u8540_clk.c +++ b/drivers/clk/ux500/u8540_clk.c @@ -12,14 +12,8 @@ #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/mfd/dbx500-prcmu.h> -#include <linux/platform_data/clk-ux500.h> #include "clk.h" -static const struct of_device_id u8540_clk_of_match[] = { - { .compatible = "stericsson,u8540-clks", }, - { } -}; - /* CLKRST4 is missing making it hard to index things */ enum clkrst_index { CLKRST1_INDEX = 0, @@ -30,19 +24,12 @@ enum clkrst_index { CLKRST_MAX, }; -void u8540_clk_init(void) +static void u8540_clk_init(struct device_node *np) { struct clk *clk; - struct device_node *np = NULL; u32 bases[CLKRST_MAX]; int i; - if (of_have_populated_dt()) - np = of_find_matching_node(NULL, u8540_clk_of_match); - if (!np) { - pr_err("Either DT or U8540 Clock node not found\n"); - return; - } for (i = 0; i < ARRAY_SIZE(bases); i++) { struct resource r; @@ -607,3 +594,4 @@ void u8540_clk_init(void) bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "rng"); } +CLK_OF_DECLARE(u8540_clks, "stericsson,u8540-clks", u8540_clk_init); diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c index 2138a4c8cbca..7b6bca49ce42 100644 --- a/drivers/clk/ux500/u9540_clk.c +++ b/drivers/clk/ux500/u9540_clk.c @@ -9,10 +9,10 @@ #include <linux/clk-provider.h> #include <linux/mfd/dbx500-prcmu.h> -#include <linux/platform_data/clk-ux500.h> #include "clk.h" -void u9540_clk_init(void) +static void u9540_clk_init(struct device_node *np) { /* register clocks here */ } +CLK_OF_DECLARE(u9540_clks, "stericsson,u9540-clks", u9540_clk_init); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 5effd3027319..28bce3f4f81d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void) arch_timer_ppi[PHYS_NONSECURE_PPI]); } -static int arch_timer_setup(struct clock_event_device *clk) +static int arch_timer_starting_cpu(unsigned int cpu) { + struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); + __arch_timer_setup(ARCH_CP15_TIMER, clk); enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); @@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk) clk->set_state_shutdown(clk); } -static int arch_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int arch_timer_dying_cpu(unsigned int cpu) { - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - arch_timer_setup(this_cpu_ptr(arch_timer_evt)); - break; - case CPU_DYING: - arch_timer_stop(this_cpu_ptr(arch_timer_evt)); - break; - } + struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); - return NOTIFY_OK; + arch_timer_stop(clk); + return 0; } -static struct notifier_block arch_timer_cpu_nb = { - .notifier_call = arch_timer_cpu_notify, -}; - #ifdef CONFIG_CPU_PM static unsigned int saved_cntkctl; static int arch_timer_cpu_pm_notify(struct notifier_block *self, @@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void) { return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); } + +static void __init arch_timer_cpu_pm_deinit(void) +{ + WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); +} + #else static int __init arch_timer_cpu_pm_init(void) { return 0; } + +static void __init arch_timer_cpu_pm_deinit(void) +{ +} #endif static int __init arch_timer_register(void) @@ -621,22 +618,23 @@ static int __init arch_timer_register(void) goto out_free; } - err = register_cpu_notifier(&arch_timer_cpu_nb); - if (err) - goto out_free_irq; - err = arch_timer_cpu_pm_init(); if (err) goto out_unreg_notify; - /* Immediately configure the timer on the boot CPU */ - arch_timer_setup(this_cpu_ptr(arch_timer_evt)); + /* Register and immediately configure the timer on the boot CPU */ + err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, + "AP_ARM_ARCH_TIMER_STARTING", + arch_timer_starting_cpu, arch_timer_dying_cpu); + if (err) + goto out_unreg_cpupm; return 0; +out_unreg_cpupm: + arch_timer_cpu_pm_deinit(); + out_unreg_notify: - unregister_cpu_notifier(&arch_timer_cpu_nb); -out_free_irq: free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); if (arch_timer_has_nonsecure_ppi()) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 2a9ceb6e93f9..8da03298f844 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int gt_clockevents_init(struct clock_event_device *clk) +static int gt_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *clk = this_cpu_ptr(gt_evt); clk->name = "arm_global_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | @@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk) return 0; } -static void gt_clockevents_stop(struct clock_event_device *clk) +static int gt_dying_cpu(unsigned int cpu) { + struct clock_event_device *clk = this_cpu_ptr(gt_evt); + gt_clockevent_shutdown(clk); disable_percpu_irq(clk->irq); + return 0; } static cycle_t gt_clocksource_read(struct clocksource *cs) @@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void) return clocksource_register_hz(>_clocksource, gt_clk_rate); } -static int gt_cpu_notify(struct notifier_block *self, unsigned long action, - void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - gt_clockevents_init(this_cpu_ptr(gt_evt)); - break; - case CPU_DYING: - gt_clockevents_stop(this_cpu_ptr(gt_evt)); - break; - } - - return NOTIFY_OK; -} -static struct notifier_block gt_cpu_nb = { - .notifier_call = gt_cpu_notify, -}; - static int __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; @@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np) goto out_free; } - err = register_cpu_notifier(>_cpu_nb); - if (err) { - pr_warn("global-timer: unable to register cpu notifier.\n"); - goto out_irq; - } - - /* Immediately configure the timer on the boot CPU */ + /* Register and immediately configure the timer on the boot CPU */ err = gt_clocksource_init(); if (err) goto out_irq; - err = gt_clockevents_init(this_cpu_ptr(gt_evt)); + err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, + "AP_ARM_GLOBAL_TIMER_STARTING", + gt_starting_cpu, gt_dying_cpu); if (err) goto out_irq; diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index 84aed78261e4..24db6d605549 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -119,5 +119,5 @@ static int __init clps711x_timer_init(struct device_node *np) return -EINVAL; } } -CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init); +CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); #endif diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 776b6c86dcd5..89f1c2edbe02 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -16,10 +16,9 @@ static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); -static void dummy_timer_setup(void) +static int dummy_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); - struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt); + struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu); evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_PERIODIC | @@ -29,36 +28,13 @@ static void dummy_timer_setup(void) evt->cpumask = cpumask_of(cpu); clockevents_register_device(evt); + return 0; } -static int dummy_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) - dummy_timer_setup(); - - return NOTIFY_OK; -} - -static struct notifier_block dummy_timer_cpu_nb = { - .notifier_call = dummy_timer_cpu_notify, -}; - static int __init dummy_timer_register(void) { - int err = 0; - - cpu_notifier_register_begin(); - err = __register_cpu_notifier(&dummy_timer_cpu_nb); - if (err) - goto out; - - /* We won't get a call on the boot CPU, so register immediately */ - if (num_possible_cpus() > 1) - dummy_timer_setup(); - -out: - cpu_notifier_register_done(); - return err; + return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, + "AP_DUMMY_TIMER_STARTING", + dummy_timer_starting_cpu, NULL); } early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 0d18dd4b3bd2..41840d02c331 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) +static int exynos4_mct_starting_cpu(unsigned int cpu) { + struct mct_clock_event_device *mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); struct clock_event_device *evt = &mevt->evt; - unsigned int cpu = smp_processor_id(); mevt->base = EXYNOS4_MCT_L_BASE(cpu); snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); @@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) return 0; } -static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) +static int exynos4_mct_dying_cpu(unsigned int cpu) { + struct mct_clock_event_device *mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); struct clock_event_device *evt = &mevt->evt; evt->set_state_shutdown(evt); @@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } + return 0; } -static int exynos4_mct_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - struct mct_clock_event_device *mevt; - - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_setup(mevt); - break; - case CPU_DYING: - mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_stop(mevt); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block exynos4_mct_cpu_nb = { - .notifier_call = exynos4_mct_cpu_notify, -}; - static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { int err, cpu; - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct clk *mct_clk, *tick_clk; tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : @@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * } } - err = register_cpu_notifier(&exynos4_mct_cpu_nb); + /* Install hotplug callbacks which configure the timer on this CPU */ + err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, + "AP_EXYNOS4_MCT_TIMER_STARTING", + exynos4_mct_starting_cpu, + exynos4_mct_dying_cpu); if (err) goto out_irq; - /* Immediately configure the timer on the boot CPU */ - exynos4_local_timer_setup(mevt); return 0; out_irq: diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index bcd5c0d602a0..a80ab3e446b7 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -90,7 +90,7 @@ unsigned long long sched_clock(void) return ticks << HARDWARE_TO_NS_SHIFT; } -static void arch_timer_setup(unsigned int cpu) +static int arch_timer_starting_cpu(unsigned int cpu) { unsigned int txdivtime; struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); @@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu) val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); __core_reg_set(TXTIMER, val); } + return 0; } -static int arch_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - arch_timer_setup(cpu); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block arch_timer_cpu_nb = { - .notifier_call = arch_timer_cpu_notify, -}; - int __init metag_generic_timer_init(void) { /* @@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void) setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); - /* Configure timer on boot CPU */ - arch_timer_setup(smp_processor_id()); - - /* Hook cpu boot to configure other CPU's timers */ - register_cpu_notifier(&arch_timer_cpu_nb); - - return 0; + /* Hook cpu boot to configure the CPU's timers */ + return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, + "AP_METAG_TIMER_STARTING", + arch_timer_starting_cpu, NULL); } diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 1572c7a778ab..d91e8725917c 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = { .name = "timer", }; -static void gic_clockevent_cpu_init(struct clock_event_device *cd) +static void gic_clockevent_cpu_init(unsigned int cpu, + struct clock_event_device *cd) { - unsigned int cpu = smp_processor_id(); - cd->name = "MIPS GIC"; cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; @@ -79,19 +78,10 @@ static void gic_update_frequency(void *data) clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); } -static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, - void *data) +static int gic_starting_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); - break; - case CPU_DYING: - gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); - break; - } - - return NOTIFY_OK; + gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); + return 0; } static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, @@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } - -static struct notifier_block gic_cpu_nb = { - .notifier_call = gic_cpu_notifier, -}; +static int gic_dying_cpu(unsigned int cpu) +{ + gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); + return 0; +} static struct notifier_block gic_clk_nb = { .notifier_call = gic_clk_notifier, @@ -125,12 +116,9 @@ static int gic_clockevent_init(void) if (ret < 0) return ret; - ret = register_cpu_notifier(&gic_cpu_nb); - if (ret < 0) - pr_warn("GIC: Unable to register CPU notifier\n"); - - gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); - + cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, + "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, + gic_dying_cpu); return 0; } diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 662576339049..3283cfa2aa52 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = { static int msm_timer_irq; static int msm_timer_has_ppi; -static int msm_local_timer_setup(struct clock_event_device *evt) +static int msm_local_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); int err; evt->irq = msm_timer_irq; @@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt) return 0; } -static void msm_local_timer_stop(struct clock_event_device *evt) +static int msm_local_timer_dying_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); + return 0; } -static int msm_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - msm_local_timer_setup(this_cpu_ptr(msm_evt)); - break; - case CPU_DYING: - msm_local_timer_stop(this_cpu_ptr(msm_evt)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block msm_timer_cpu_nb = { - .notifier_call = msm_timer_cpu_notify, -}; - static u64 notrace msm_sched_clock_read(void) { return msm_clocksource.read(&msm_clocksource); @@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, if (res) { pr_err("request_percpu_irq failed\n"); } else { - res = register_cpu_notifier(&msm_timer_cpu_nb); + /* Install and invoke hotplug callbacks */ + res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, + "AP_QCOM_TIMER_STARTING", + msm_local_timer_starting_cpu, + msm_local_timer_dying_cpu); if (res) { free_percpu_irq(irq, msm_evt); goto err; } - - /* Immediately configure the timer on the boot CPU */ - msm_local_timer_setup(raw_cpu_ptr(msm_evt)); } err: diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 20ec066481fe..719b478d136e 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) /* * Setup the local clock events for a CPU. */ -static int armada_370_xp_timer_setup(struct clock_event_device *evt) +static int armada_370_xp_timer_starting_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); u32 clr = 0, set = 0; - int cpu = smp_processor_id(); if (timer25Mhz) set = TIMER0_25MHZ; @@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) return 0; } -static void armada_370_xp_timer_stop(struct clock_event_device *evt) +static int armada_370_xp_timer_dying_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); + return 0; } -static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); - break; - case CPU_DYING: - armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block armada_370_xp_timer_cpu_nb = { - .notifier_call = armada_370_xp_timer_cpu_notify, -}; - static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; static int armada_370_xp_timer_suspend(void) @@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) return res; } - register_cpu_notifier(&armada_370_xp_timer_cpu_nb); - armada_370_xp_evt = alloc_percpu(struct clock_event_device); if (!armada_370_xp_evt) return -ENOMEM; @@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) return res; } - res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); + res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, + "AP_ARMADA_TIMER_STARTING", + armada_370_xp_timer_starting_cpu, + armada_370_xp_timer_dying_cpu); if (res) { - pr_err("Failed to setup timer"); + pr_err("Failed to setup hotplug state and timer"); return res; } diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 90f8fbc154a4..4334e0330ada 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c @@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = { .handler = sirfsoc_timer_interrupt, }; -static int sirfsoc_local_timer_setup(struct clock_event_device *ce) +static int sirfsoc_local_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu); struct irqaction *action; if (cpu == 0) @@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) return 0; } -static void sirfsoc_local_timer_stop(struct clock_event_device *ce) +static int sirfsoc_local_timer_dying_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); - sirfsoc_timer_count_disable(1); if (cpu == 0) remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); else remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); + return 0; } -static int sirfsoc_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); - break; - case CPU_DYING: - sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block sirfsoc_cpu_nb = { - .notifier_call = sirfsoc_cpu_notify, -}; - static int __init sirfsoc_clockevent_init(void) { sirfsoc_clockevent = alloc_percpu(struct clock_event_device); BUG_ON(!sirfsoc_clockevent); - BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); - - /* Immediately configure the timer on the boot CPU */ - return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); + /* Install and invoke hotplug callbacks */ + return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, + "AP_MARCO_TIMER_STARTING", + sirfsoc_local_timer_starting_cpu, + sirfsoc_local_timer_dying_cpu); } /* initialize the kernel jiffy timer source */ diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 9e07588ea9f5..f82074eea779 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -220,7 +220,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) tmp1 /= tmp; - __raw_writel(tmp1, reg); + writel_relaxed(tmp1, reg); } static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) @@ -296,29 +296,29 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) * 1. Temporary Change divider for MFC and G3D * SCLKA2M(200/1=200)->(200/4=50)Mhz */ - reg = __raw_readl(S5P_CLK_DIV2); + reg = readl_relaxed(S5P_CLK_DIV2); reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK); reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) | (3 << S5P_CLKDIV2_MFC_SHIFT); - __raw_writel(reg, S5P_CLK_DIV2); + writel_relaxed(reg, S5P_CLK_DIV2); /* For MFC, G3D dividing */ do { - reg = __raw_readl(S5P_CLKDIV_STAT0); + reg = readl_relaxed(S5P_CLKDIV_STAT0); } while (reg & ((1 << 16) | (1 << 17))); /* * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX * (200/4=50)->(667/4=166)Mhz */ - reg = __raw_readl(S5P_CLK_SRC2); + reg = readl_relaxed(S5P_CLK_SRC2); reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK); reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) | (1 << S5P_CLKSRC2_MFC_SHIFT); - __raw_writel(reg, S5P_CLK_SRC2); + writel_relaxed(reg, S5P_CLK_SRC2); do { - reg = __raw_readl(S5P_CLKMUX_STAT1); + reg = readl_relaxed(S5P_CLKMUX_STAT1); } while (reg & ((1 << 7) | (1 << 3))); /* @@ -330,19 +330,19 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) s5pv210_set_refresh(DMC1, 133000); /* 4. SCLKAPLL -> SCLKMPLL */ - reg = __raw_readl(S5P_CLK_SRC0); + reg = readl_relaxed(S5P_CLK_SRC0); reg &= ~(S5P_CLKSRC0_MUX200_MASK); reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT); - __raw_writel(reg, S5P_CLK_SRC0); + writel_relaxed(reg, S5P_CLK_SRC0); do { - reg = __raw_readl(S5P_CLKMUX_STAT0); + reg = readl_relaxed(S5P_CLKMUX_STAT0); } while (reg & (0x1 << 18)); } /* Change divider */ - reg = __raw_readl(S5P_CLK_DIV0); + reg = readl_relaxed(S5P_CLK_DIV0); reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK | S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK | @@ -358,25 +358,25 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) | (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT)); - __raw_writel(reg, S5P_CLK_DIV0); + writel_relaxed(reg, S5P_CLK_DIV0); do { - reg = __raw_readl(S5P_CLKDIV_STAT0); + reg = readl_relaxed(S5P_CLKDIV_STAT0); } while (reg & 0xff); /* ARM MCS value changed */ - reg = __raw_readl(S5P_ARM_MCS_CON); + reg = readl_relaxed(S5P_ARM_MCS_CON); reg &= ~0x3; if (index >= L3) reg |= 0x3; else reg |= 0x1; - __raw_writel(reg, S5P_ARM_MCS_CON); + writel_relaxed(reg, S5P_ARM_MCS_CON); if (pll_changing) { /* 5. Set Lock time = 30us*24Mhz = 0x2cf */ - __raw_writel(0x2cf, S5P_APLL_LOCK); + writel_relaxed(0x2cf, S5P_APLL_LOCK); /* * 6. Turn on APLL @@ -384,12 +384,12 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) * 6-2. Wait untile the PLL is locked */ if (index == L0) - __raw_writel(APLL_VAL_1000, S5P_APLL_CON); + writel_relaxed(APLL_VAL_1000, S5P_APLL_CON); else - __raw_writel(APLL_VAL_800, S5P_APLL_CON); + writel_relaxed(APLL_VAL_800, S5P_APLL_CON); do { - reg = __raw_readl(S5P_APLL_CON); + reg = readl_relaxed(S5P_APLL_CON); } while (!(reg & (0x1 << 29))); /* @@ -397,39 +397,39 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX * (667/4=166)->(200/4=50)Mhz */ - reg = __raw_readl(S5P_CLK_SRC2); + reg = readl_relaxed(S5P_CLK_SRC2); reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK); reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) | (0 << S5P_CLKSRC2_MFC_SHIFT); - __raw_writel(reg, S5P_CLK_SRC2); + writel_relaxed(reg, S5P_CLK_SRC2); do { - reg = __raw_readl(S5P_CLKMUX_STAT1); + reg = readl_relaxed(S5P_CLKMUX_STAT1); } while (reg & ((1 << 7) | (1 << 3))); /* * 8. Change divider for MFC and G3D * (200/4=50)->(200/1=200)Mhz */ - reg = __raw_readl(S5P_CLK_DIV2); + reg = readl_relaxed(S5P_CLK_DIV2); reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK); reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) | (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT); - __raw_writel(reg, S5P_CLK_DIV2); + writel_relaxed(reg, S5P_CLK_DIV2); /* For MFC, G3D dividing */ do { - reg = __raw_readl(S5P_CLKDIV_STAT0); + reg = readl_relaxed(S5P_CLKDIV_STAT0); } while (reg & ((1 << 16) | (1 << 17))); /* 9. Change MPLL to APLL in MSYS_MUX */ - reg = __raw_readl(S5P_CLK_SRC0); + reg = readl_relaxed(S5P_CLK_SRC0); reg &= ~(S5P_CLKSRC0_MUX200_MASK); reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT); - __raw_writel(reg, S5P_CLK_SRC0); + writel_relaxed(reg, S5P_CLK_SRC0); do { - reg = __raw_readl(S5P_CLKMUX_STAT0); + reg = readl_relaxed(S5P_CLKMUX_STAT0); } while (reg & (0x1 << 18)); /* @@ -446,13 +446,13 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) * and memory refresh parameter should be changed */ if (bus_speed_changing) { - reg = __raw_readl(S5P_CLK_DIV6); + reg = readl_relaxed(S5P_CLK_DIV6); reg &= ~S5P_CLKDIV6_ONEDRAM_MASK; reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT); - __raw_writel(reg, S5P_CLK_DIV6); + writel_relaxed(reg, S5P_CLK_DIV6); do { - reg = __raw_readl(S5P_CLKDIV_STAT1); + reg = readl_relaxed(S5P_CLKDIV_STAT1); } while (reg & (1 << 15)); /* Reconfigure DRAM refresh counter value */ @@ -492,7 +492,7 @@ static int check_mem_type(void __iomem *dmc_reg) { unsigned long val; - val = __raw_readl(dmc_reg + 0x4); + val = readl_relaxed(dmc_reg + 0x4); val = (val & (0xf << 8)); return val >> 8; @@ -537,10 +537,10 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) } /* Find current refresh counter and frequency each DMC */ - s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000); + s5pv210_dram_conf[0].refresh = (readl_relaxed(dmc_base[0] + 0x30) * 1000); s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk); - s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000); + s5pv210_dram_conf[1].refresh = (readl_relaxed(dmc_base[1] + 0x30) * 1000); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); policy->suspend_freq = SLEEP_FREQ; diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index e12dc30d8864..f7ca891b5b59 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -20,7 +20,7 @@ #include <asm/opal.h> #include <asm/runlatch.h> -#define MAX_POWERNV_IDLE_STATES 8 +#define POWERNV_THRESHOLD_LATENCY_NS 200000 struct cpuidle_driver powernv_idle_driver = { .name = "powernv_idle", @@ -29,6 +29,9 @@ struct cpuidle_driver powernv_idle_driver = { static int max_idle_state; static struct cpuidle_state *cpuidle_state_table; + +static u64 stop_psscr_table[CPUIDLE_STATE_MAX]; + static u64 snooze_timeout; static bool snooze_timeout_en; @@ -93,16 +96,27 @@ static int fastsleep_loop(struct cpuidle_device *dev, return index; } #endif + +static int stop_loop(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + ppc64_runlatch_off(); + power9_idle_stop(stop_psscr_table[index]); + ppc64_runlatch_on(); + return index; +} + /* * States for dedicated partition case. */ -static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = { +static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = { { /* Snooze */ .name = "snooze", .desc = "snooze", .exit_latency = 0, .target_residency = 0, - .enter = &snooze_loop }, + .enter = snooze_loop }, }; static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n, @@ -168,7 +182,11 @@ static int powernv_add_idle_states(void) struct device_node *power_mgt; int nr_idle_states = 1; /* Snooze */ int dt_idle_states; - u32 *latency_ns, *residency_ns, *flags; + u32 latency_ns[CPUIDLE_STATE_MAX]; + u32 residency_ns[CPUIDLE_STATE_MAX]; + u32 flags[CPUIDLE_STATE_MAX]; + u64 psscr_val[CPUIDLE_STATE_MAX]; + const char *names[CPUIDLE_STATE_MAX]; int i, rc; /* Currently we have snooze statically defined */ @@ -186,26 +204,55 @@ static int powernv_add_idle_states(void) goto out; } - flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL); + /* + * Since snooze is used as first idle state, max idle states allowed is + * CPUIDLE_STATE_MAX -1 + */ + if (dt_idle_states > CPUIDLE_STATE_MAX - 1) { + pr_warn("cpuidle-powernv: discovered idle states more than allowed"); + dt_idle_states = CPUIDLE_STATE_MAX - 1; + } + if (of_property_read_u32_array(power_mgt, "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n"); - goto out_free_flags; + goto out; } - latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL); - rc = of_property_read_u32_array(power_mgt, - "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states); - if (rc) { + if (of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-latencies-ns", latency_ns, + dt_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); - goto out_free_latency; + goto out; + } + if (of_property_read_string_array(power_mgt, + "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); + goto out; } - residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL); + /* + * If the idle states use stop instruction, probe for psscr values + * which are necessary to specify required stop level. + */ + if (flags[0] & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP)) + if (of_property_read_u64_array(power_mgt, + "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n"); + goto out; + } + rc = of_property_read_u32_array(power_mgt, "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states); for (i = 0; i < dt_idle_states; i++) { + /* + * If an idle state has exit latency beyond + * POWERNV_THRESHOLD_LATENCY_NS then don't use it + * in cpu-idle. + */ + if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS) + continue; /* * Cpuidle accepts exit_latency and target_residency in us. @@ -217,7 +264,17 @@ static int powernv_add_idle_states(void) strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = 0; powernv_states[nr_idle_states].target_residency = 100; - powernv_states[nr_idle_states].enter = &nap_loop; + powernv_states[nr_idle_states].enter = nap_loop; + } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) && + !(flags[i] & OPAL_PM_TIMEBASE_STOP)) { + strncpy(powernv_states[nr_idle_states].name, + names[i], CPUIDLE_NAME_LEN); + strncpy(powernv_states[nr_idle_states].desc, + names[i], CPUIDLE_NAME_LEN); + powernv_states[nr_idle_states].flags = 0; + + powernv_states[nr_idle_states].enter = stop_loop; + stop_psscr_table[nr_idle_states] = psscr_val[i]; } /* @@ -232,7 +289,17 @@ static int powernv_add_idle_states(void) strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; powernv_states[nr_idle_states].target_residency = 300000; - powernv_states[nr_idle_states].enter = &fastsleep_loop; + powernv_states[nr_idle_states].enter = fastsleep_loop; + } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) && + (flags[i] & OPAL_PM_TIMEBASE_STOP)) { + strncpy(powernv_states[nr_idle_states].name, + names[i], CPUIDLE_NAME_LEN); + strncpy(powernv_states[nr_idle_states].desc, + names[i], CPUIDLE_NAME_LEN); + + powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; + powernv_states[nr_idle_states].enter = stop_loop; + stop_psscr_table[nr_idle_states] = psscr_val[i]; } #endif powernv_states[nr_idle_states].exit_latency = @@ -245,12 +312,6 @@ static int powernv_add_idle_states(void) nr_idle_states++; } - - kfree(residency_ns); -out_free_latency: - kfree(latency_ns); -out_free_flags: - kfree(flags); out: return nr_idle_states; } diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index e373cc6557c6..d64af8625d7e 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -180,10 +180,11 @@ int mv_cesa_queue_req(struct crypto_async_request *req, struct mv_cesa_engine *engine = creq->engine; spin_lock_bh(&engine->lock); - if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ) - mv_cesa_tdma_chain(engine, creq); - ret = crypto_enqueue_request(&engine->queue, req); + if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && + (ret == -EINPROGRESS || + (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); if (ret != -EINPROGRESS) diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 48df03a06066..d19dc9614e6e 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -139,20 +139,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); struct mv_cesa_req *basereq = &creq->base; - unsigned int ivsize; - int ret; if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) return mv_cesa_ablkcipher_std_process(ablkreq, status); - ret = mv_cesa_dma_process(basereq, status); - if (ret) - return ret; - - ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); - memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize); - - return 0; + return mv_cesa_dma_process(basereq, status); } static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) @@ -320,7 +311,6 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, GFP_KERNEL : GFP_ATOMIC; struct mv_cesa_req *basereq = &creq->base; struct mv_cesa_ablkcipher_dma_iter iter; - struct mv_cesa_tdma_chain chain; bool skip_ctx = false; int ret; unsigned int ivsize; @@ -347,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, return -ENOMEM; } - mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_tdma_desc_iter_init(&basereq->chain); mv_cesa_ablkcipher_req_iter_init(&iter, req); do { struct mv_cesa_op_ctx *op; - op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); + op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); if (IS_ERR(op)) { ret = PTR_ERR(op); goto err_free_tdma; @@ -363,18 +353,18 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, mv_cesa_set_crypt_op_len(op, iter.base.op_len); /* Add input transfers */ - ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, &iter.src, flags); if (ret) goto err_free_tdma; /* Add dummy desc to launch the crypto operation */ - ret = mv_cesa_dma_add_dummy_launch(&chain, flags); + ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags); if (ret) goto err_free_tdma; /* Add output transfers */ - ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, &iter.dst, flags); if (ret) goto err_free_tdma; @@ -383,13 +373,12 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, /* Add output data for IV */ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); - ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, + ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize, CESA_TDMA_SRC_IN_SRAM, flags); if (ret) goto err_free_tdma; - basereq->chain = chain; basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; return 0; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index c35912b4fffb..82e0f4e6eb1c 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -315,12 +315,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) for (i = 0; i < digsize / 4; i++) creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); - if (creq->cache_ptr) - sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, - creq->cache, - creq->cache_ptr, - ahashreq->nbytes - creq->cache_ptr); - if (creq->last_req) { /* * Hardware's MD5 digest is in little endian format, but @@ -365,6 +359,12 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) mv_cesa_ahash_last_cleanup(ahashreq); mv_cesa_ahash_cleanup(ahashreq); + + if (creq->cache_ptr) + sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, + creq->cache, + creq->cache_ptr, + ahashreq->nbytes - creq->cache_ptr); } static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index 89d8208d9851..a83ead109d5f 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -1,7 +1,7 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX - default y + default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. This module supports acceleration for AES and GHASH in hardware. If you diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index f688c32fbcc7..31a98dc6f849 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -23,6 +23,7 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/err.h> +#include <linux/cpufeature.h> #include <linux/crypto.h> #include <asm/cputable.h> #include <crypto/internal/hash.h> @@ -45,9 +46,6 @@ int __init p8_init(void) int ret = 0; struct crypto_alg **alg_it; - if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) - return -ENODEV; - for (alg_it = algs; *alg_it; alg_it++) { ret = crypto_register_alg(*alg_it); printk(KERN_INFO "crypto_register_alg '%s' = %d\n", @@ -80,7 +78,7 @@ void __exit p8_exit(void) crypto_unregister_shash(&p8_ghash_alg); } -module_init(p8_init); +module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index b891a129b275..803f3953b341 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -211,11 +211,9 @@ int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res, } dax_dev->dev = dev; - rc = devm_add_action(dax_region->dev, unregister_dax_dev, dev); - if (rc) { - unregister_dax_dev(dev); + rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev); + if (rc) return rc; - } return 0; diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 55d510e36cd1..dfb168568af1 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -102,21 +102,19 @@ static int dax_pmem_probe(struct device *dev) if (rc) return rc; - rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); - if (rc) { - dax_pmem_percpu_exit(&dax_pmem->ref); + rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit, + &dax_pmem->ref); + if (rc) return rc; - } addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap); if (IS_ERR(addr)) return PTR_ERR(addr); - rc = devm_add_action(dev, dax_pmem_percpu_kill, &dax_pmem->ref); - if (rc) { - dax_pmem_percpu_kill(&dax_pmem->ref); + rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, + &dax_pmem->ref); + if (rc) return rc; - } nd_region = to_nd_region(dev->parent); dax_region = alloc_dax_region(dev, nd_region->id, &res, diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 9824bc4addf8..25bcfa0b474f 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -1,11 +1,20 @@ menu "DMABUF options" config SYNC_FILE - bool "sync_file support for fences" + bool "Explicit Synchronization Framework" default n select ANON_INODES select DMA_SHARED_BUFFER ---help--- - This option enables the fence framework synchronization to export - sync_files to userspace that can represent one or more fences. + The Sync File Framework adds explicit syncronization via + userspace. It enables send/receive 'struct fence' objects to/from + userspace via Sync File fds for synchronization between drivers via + userspace components. It has been ported from Android. + + The first and main user for this is graphics in which a fence is + associated with a buffer. When a job is submitted to the GPU a fence + is attached to the buffer and is transferred via userspace, using Sync + Files fds, to the DRM driver for example. More details at + Documentation/sync_file.txt. + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a424eca75ed..f353db213a81 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,2 +1,2 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o +obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o obj-$(CONFIG_SYNC_FILE) += sync_file.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 6355ab38d630..ddaee60ae52a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) struct reservation_object *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); + int ret; if (!exp_info->resv) alloc_size += sizeof(struct reservation_object); @@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { - module_put(exp_info->owner); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_module; } dmabuf->priv = exp_info->priv; @@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, exp_info->flags); if (IS_ERR(file)) { - kfree(dmabuf); - return ERR_CAST(file); + ret = PTR_ERR(file); + goto err_dmabuf; } file->f_mode |= FMODE_LSEEK; @@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) mutex_unlock(&db_list.lock); return dmabuf; + +err_dmabuf: + kfree(dmabuf); +err_module: + module_put(exp_info->owner); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(dma_buf_export); @@ -824,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) EXPORT_SYMBOL_GPL(dma_buf_vunmap); #ifdef CONFIG_DEBUG_FS -static int dma_buf_describe(struct seq_file *s) +static int dma_buf_debug_show(struct seq_file *s, void *unused) { int ret; struct dma_buf *buf_obj; @@ -879,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s) return 0; } -static int dma_buf_show(struct seq_file *s, void *unused) -{ - void (*func)(struct seq_file *) = s->private; - - func(s); - return 0; -} - static int dma_buf_debug_open(struct inode *inode, struct file *file) { - return single_open(file, dma_buf_show, inode->i_private); + return single_open(file, dma_buf_debug_show, NULL); } static const struct file_operations dma_buf_debug_fops = { @@ -903,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir; static int dma_buf_init_debugfs(void) { + struct dentry *d; int err = 0; - dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); + d = debugfs_create_dir("dma_buf", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); - if (IS_ERR(dma_buf_debugfs_dir)) { - err = PTR_ERR(dma_buf_debugfs_dir); - dma_buf_debugfs_dir = NULL; - return err; - } - - err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); + dma_buf_debugfs_dir = d; - if (err) + d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, + NULL, &dma_buf_debug_fops); + if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); + debugfs_remove_recursive(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + err = PTR_ERR(d); + } return err; } @@ -926,17 +928,6 @@ static void dma_buf_uninit_debugfs(void) if (dma_buf_debugfs_dir) debugfs_remove_recursive(dma_buf_debugfs_dir); } - -int dma_buf_debugfs_create_file(const char *name, - int (*write)(struct seq_file *)) -{ - struct dentry *d; - - d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, - write, &dma_buf_debug_fops); - - return PTR_ERR_OR_ZERO(d); -} #else static inline int dma_buf_init_debugfs(void) { diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c new file mode 100644 index 000000000000..a8731c853da6 --- /dev/null +++ b/drivers/dma-buf/fence-array.c @@ -0,0 +1,144 @@ +/* + * fence-array: aggregate fences to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/fence-array.h> + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); + +static const char *fence_array_get_driver_name(struct fence *fence) +{ + return "fence_array"; +} + +static const char *fence_array_get_timeline_name(struct fence *fence) +{ + return "unbound"; +} + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) +{ + struct fence_array_cb *array_cb = + container_of(cb, struct fence_array_cb, cb); + struct fence_array *array = array_cb->array; + + if (atomic_dec_and_test(&array->num_pending)) + fence_signal(&array->base); + fence_put(&array->base); +} + +static bool fence_array_enable_signaling(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + struct fence_array_cb *cb = (void *)(&array[1]); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) { + cb[i].array = array; + /* + * As we may report that the fence is signaled before all + * callbacks are complete, we need to take an additional + * reference count on the array so that we do not free it too + * early. The core fence handling will only hold the reference + * until we signal the array as complete (but that is now + * insufficient). + */ + fence_get(&array->base); + if (fence_add_callback(array->fences[i], &cb[i].cb, + fence_array_cb_func)) { + fence_put(&array->base); + if (atomic_dec_and_test(&array->num_pending)) + return false; + } + } + + return true; +} + +static bool fence_array_signaled(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + + return atomic_read(&array->num_pending) <= 0; +} + +static void fence_array_release(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + fence_put(array->fences[i]); + + kfree(array->fences); + fence_free(fence); +} + +const struct fence_ops fence_array_ops = { + .get_driver_name = fence_array_get_driver_name, + .get_timeline_name = fence_array_get_timeline_name, + .enable_signaling = fence_array_enable_signaling, + .signaled = fence_array_signaled, + .wait = fence_default_wait, + .release = fence_array_release, +}; + +/** + * fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any [in] signal on any fence in the array + * + * Allocate a fence_array object and initialize the base fence with fence_init(). + * In case of error it returns NULL. + * + * The caller should allocte the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is take and fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct fence_array *array; + size_t size = sizeof(*array); + + /* Allocate the callback structures behind the array. */ + size += num_fences * sizeof(struct fence_array_cb); + array = kzalloc(size, GFP_KERNEL); + if (!array) + return NULL; + + spin_lock_init(&array->lock); + fence_init(&array->base, &fence_array_ops, &array->lock, + context, seqno); + + array->num_fences = num_fences; + atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); + array->fences = fences; + + return array; +} +EXPORT_SYMBOL(fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 7b05dbe9b296..4d51f9e83fa8 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ -static atomic_t fence_context_counter = ATOMIC_INIT(0); +static atomic64_t fence_context_counter = ATOMIC64_INIT(0); /** * fence_context_alloc - allocate an array of fence contexts @@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0); * This function will return the first index of the number of fences allocated. * The fence context is used for setting fence->context to a unique number. */ -unsigned fence_context_alloc(unsigned num) +u64 fence_context_alloc(unsigned num) { BUG_ON(!num); - return atomic_add_return(num, &fence_context_counter) - num; + return atomic64_add_return(num, &fence_context_counter) - num; } EXPORT_SYMBOL(fence_context_alloc); @@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout); */ void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno) + spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->wait || !ops->enable_signaling || diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index f08cf2d8309e..9aaa608dfe01 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence) sync_file->num_fences = 1; atomic_set(&sync_file->status, 1); - snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", + snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8c98779a12b1..739f797b40d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -339,6 +339,20 @@ config MV_XOR ---help--- Enable support for the Marvell XOR engine. +config MV_XOR_V2 + bool "Marvell XOR engine version 2 support " + depends on ARM64 + select DMA_ENGINE + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + select GENERIC_MSI_IRQ_DOMAIN + ---help--- + Enable support for the Marvell version 2 XOR engine. + + This engine provides acceleration for copy, XOR and RAID6 + operations, and is available on Marvell Armada 7K and 8K + platforms. + config MXS_DMA bool "MXS DMA support" depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL @@ -519,19 +533,31 @@ config XGENE_DMA help Enable support for the APM X-Gene SoC DMA engine. -config XILINX_VDMA - tristate "Xilinx AXI VDMA Engine" +config XILINX_DMA + tristate "Xilinx AXI DMAS Engine" depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) select DMA_ENGINE help Enable support for Xilinx AXI VDMA Soft IP. - This engine provides high-bandwidth direct memory access + AXI VDMA engine provides high-bandwidth direct memory access between memory and AXI4-Stream video type target peripherals including peripherals which support AXI4- Stream Video Protocol. It has two stream interfaces/ channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. + AXI CDMA engine provides high-bandwidth direct memory access + between a memory-mapped source address and a memory-mapped + destination address. + AXI DMA engine provides high-bandwidth one dimensional direct + memory access between memory and AXI4-Stream target peripherals. + +config XILINX_ZYNQMP_DMA + tristate "Xilinx ZynqMP DMA Engine" + depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) + select DMA_ENGINE + help + Enable support for Xilinx ZynqMP DMA controller. config ZX_DMA tristate "ZTE ZX296702 DMA support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 614f28b0b739..e4dc9cac7ee8 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o +obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 81db1c4811ce..939a7c31f760 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { pl08x_free_txd(pl08x, txd); - dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", - __func__); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); @@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, */ for (i = 0; i < channels; i++) { chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(&pl08x->adev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; @@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { - dev_err(&adev->dev, "%s failed to allocate " - "physical channel holders\n", - __func__); ret = -ENOMEM; goto out_no_phychans; } diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 75bd6621dc5d..e434ffe7bc5c 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } -void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) { memset(&desc->lld, 0, sizeof(desc->lld)); INIT_LIST_HEAD(&desc->descs_list); @@ -1195,14 +1195,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6149b27c33ad..e18dc596cf24 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg( unsigned int sg_len) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - size_t max_len = bcm2835_dma_max_frame_length(c); - unsigned int i, len; + size_t len, max_len; + unsigned int i; dma_addr_t addr; struct scatterlist *sgent; + max_len = bcm2835_dma_max_frame_length(c); for_each_sg(sgl, sgent, sg_len, i) { for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); len > 0; @@ -613,7 +614,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&c->vc.lock, flags); } -struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 180fedb418cc..7ce843723003 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op) /* Get a clean struct */ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); if (!bcom_eng) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't allocate state structure\n"); rv = -ENOMEM; goto error_sramclean; } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index c340ca9bd2b5..e4acd63e42aa 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = { COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) -const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { +static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { { .number = U300_DMA_MSL_TX_0, .name = "MSL TX 0", @@ -1280,6 +1280,7 @@ struct coh901318_desc { struct coh901318_base { struct device *dev; void __iomem *virtbase; + unsigned int irq; struct coh901318_pool pool; struct powersave pm; struct dma_device dma_slave; @@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, } static const struct file_operations coh901318_debugfs_status_operations = { - .owner = THIS_MODULE, .open = simple_open, .read = coh901318_debugfs_read, .llseek = default_llseek, @@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !txstate) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) return err; + base->irq = irq; + err = coh901318_pool_create(&base->pool, &pdev->dev, sizeof(struct coh901318_lli), 32); @@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev) coh901318_pool_destroy(&base->pool); return err; } +static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans) +{ + int chans_i; + int i = 0; + struct coh901318_chan *cohc; + + for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { + for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { + cohc = &base->chans[i]; + + tasklet_kill(&cohc->tasklet); + } + } + +} static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, base->irq, base); + + coh901318_base_remove(base, dma_slave_channels); + coh901318_base_remove(base, dma_memcpy_channels); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_slave); @@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = { }, }; -int __init coh901318_init(void) +static int __init coh901318_init(void) { return platform_driver_probe(&coh901318_driver, coh901318_probe); } subsys_initcall(coh901318_init); -void __exit coh901318_exit(void) +static void __exit coh901318_exit(void) { platform_driver_unregister(&coh901318_driver); } diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index ceedafbd23e0..4b2317426c8e 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; - unsigned int num; - num = 0; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { u32 addr; u32 len; /* We need to use more than one desc once musb supports sg */ - BUG_ON(num > 0); addr = lower_32_bits(sg_dma_address(sg)); len = sg_dma_len(sg); diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index c3468094393e..7f0b9aa15867 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int pending; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); + if (!pending) + return IRQ_NONE; + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); spin_lock(&dmac->chan.vchan.lock); @@ -579,7 +582,9 @@ static int axi_dmac_probe(struct platform_device *pdev) return -ENOMEM; dmac->irq = platform_get_irq(pdev, 0); - if (dmac->irq <= 0) + if (dmac->irq < 0) + return dmac->irq; + if (dmac->irq == 0) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -683,6 +688,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = { { .compatible = "adi,axi-dmac-1.00.a" }, { }, }; +MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); static struct platform_driver axi_dmac_driver = { .driver = { diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 7638b24ce8d0..9689b36c005a 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -573,12 +573,26 @@ err_unregister: return ret; } +static void jz4740_cleanup_vchan(struct dma_device *dmadev) +{ + struct jz4740_dmaengine_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + + static int jz4740_dma_remove(struct platform_device *pdev) { struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, dmadev); + + jz4740_cleanup_vchan(&dmadev->ddev); dma_async_device_unregister(&dmadev->ddev); clk_disable_unprepare(dmadev->clk); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b8576fd6bd0e..1245db5438e1 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); +static unsigned int sg_buffers = 1; +module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(sg_buffers, + "Number of scatter gather buffers (default: 1)"); + +static unsigned int dmatest = 1; +module_param(dmatest, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dmatest, + "dmatest 0-memcpy 1-slave_sg (default: 1)"); + static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(xor_sources, @@ -431,6 +441,8 @@ static int dmatest_func(void *data) dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; + else if (thread->type == DMA_SG) + src_cnt = dst_cnt = sg_buffers; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); @@ -485,6 +497,8 @@ static int dmatest_func(void *data) dma_addr_t *dsts; unsigned int src_off, dst_off, len; u8 align = 0; + struct scatterlist tx_sg[src_cnt]; + struct scatterlist rx_sg[src_cnt]; total_tests++; @@ -577,10 +591,22 @@ static int dmatest_func(void *data) um->bidi_cnt++; } + sg_init_table(tx_sg, src_cnt); + sg_init_table(rx_sg, src_cnt); + for (i = 0; i < src_cnt; i++) { + sg_dma_address(&rx_sg[i]) = srcs[i]; + sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; + sg_dma_len(&tx_sg[i]) = len; + sg_dma_len(&rx_sg[i]) = len; + } + if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dsts[0] + dst_off, srcs[0], len, flags); + else if (thread->type == DMA_SG) + tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, + rx_sg, src_cnt, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dsts[0] + dst_off, @@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info, if (type == DMA_MEMCPY) op = "copy"; + else if (type == DMA_SG) + op = "sg"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) @@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info, INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); - thread_count += cnt > 0 ? cnt : 0; + if (dmatest == 0) { + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); + thread_count += cnt > 0 ? cnt : 0; + } } + + if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { + if (dmatest == 1) { + cnt = dmatest_add_threads(info, dtc, DMA_SG); + thread_count += cnt > 0 ? cnt : 0; + } + } + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; @@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info) request_channels(info, DMA_MEMCPY); request_channels(info, DMA_XOR); + request_channels(info, DMA_SG); request_channels(info, DMA_PQ); } diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 8181ed131386..3d277fa76c1a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -239,6 +239,9 @@ struct edma_cc { bool chmap_exist; enum dma_event_q default_queue; + unsigned int ccint; + unsigned int ccerrint; + /* * The slot_inuse bit for each PaRAM slot is clear unless the slot is * in use by Linux or if it is allocated to be used by DSP. @@ -1069,10 +1072,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->pset_nr = sg_len; edesc->residue = 0; @@ -1114,14 +1115,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc->absync = ret; edesc->residue += sg_dma_len(sg); - /* If this is the last in a current SG set of transactions, - enable interrupts so that next set is processed */ - if (!((i+1) % MAX_NR_SG)) - edesc->pset[i].param.opt |= TCINTEN; - - /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) + /* Enable completion interrupt */ edesc->pset[i].param.opt |= TCINTEN; + else if (!((i+1) % MAX_NR_SG)) + /* + * Enable early completion interrupt for the + * intermediateset. In this case the driver will be + * notified when the paRAM set is submitted to TC. This + * will allow more time to set up the next set of slots. + */ + edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); } edesc->residue_stat = edesc->residue; @@ -1173,10 +1177,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + if (!edesc) return NULL; - } edesc->pset_nr = nslots; edesc->residue = edesc->residue_stat = len; @@ -1298,10 +1300,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->cyclic = 1; edesc->pset_nr = nslots; @@ -2207,10 +2207,8 @@ static int edma_probe(struct platform_device *pdev) return ret; ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); - if (!ecc) { - dev_err(dev, "Can't allocate controller\n"); + if (!ecc) return -ENOMEM; - } ecc->dev = dev; ecc->id = pdev->id; @@ -2288,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccint = irq; } irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); @@ -2303,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccerrint = irq; } ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); @@ -2393,11 +2393,27 @@ err_reg1: return ret; } +static void edma_cleanupp_vchan(struct dma_device *dmadev) +{ + struct edma_chan *echan, *_echan; + + list_for_each_entry_safe(echan, _echan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&echan->vchan.chan.device_node); + tasklet_kill(&echan->vchan.task); + } +} + static int edma_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct edma_cc *ecc = dev_get_drvdata(dev); + devm_free_irq(dev, ecc->ccint, ecc); + devm_free_irq(dev, ecc->ccerrint, ecc); + + edma_cleanupp_vchan(&ecc->dma_slave); + if (dev->of_node) of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index be2e62b87948..6775f2c74e25 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -852,6 +852,25 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } +static void fsl_edma_irq_exit( + struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) +{ + if (fsl_edma->txirq == fsl_edma->errirq) { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + } else { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); + } +} + +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) +{ + int i; + + for (i = 0; i < DMAMUX_NR; i++) + clk_disable_unprepare(fsl_edma->muxclk[i]); +} + static int fsl_edma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -897,6 +916,10 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = clk_prepare_enable(fsl_edma->muxclk[i]); if (ret) { + /* disable only clks which were enabled on error */ + for (; i >= 0; i--) + clk_disable_unprepare(fsl_edma->muxclk[i]); + dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); return ret; } @@ -951,14 +974,18 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = dma_async_device_register(&fsl_edma->dma_dev); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA engine. (%d)\n", ret); + fsl_disable_clocks(fsl_edma); return ret; } ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_disable_clocks(fsl_edma); return ret; } @@ -968,17 +995,27 @@ static int fsl_edma_probe(struct platform_device *pdev) return 0; } +static void fsl_edma_cleanup_vchan(struct dma_device *dmadev) +{ + struct fsl_edma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + static int fsl_edma_remove(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); - int i; + fsl_edma_irq_exit(pdev, fsl_edma); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - - for (i = 0; i < DMAMUX_NR; i++) - clk_disable_unprepare(fsl_edma->muxclk[i]); + fsl_disable_clocks(fsl_edma); return 0; } diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 4d9470f16552..aad167eaaee8 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "genq tx length %lu, max length %d\n", + dev_err(re_chan->dev, "genq tx length %zu, max length %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev) static void fsl_re_remove_chan(struct fsl_re_chan *chan) { + tasklet_kill(&chan->irqtask); + dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, chan->inb_phys_addr); @@ -890,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = { static struct platform_driver fsl_re_driver = { .driver = { .name = "fsl-raideng", - .owner = THIS_MODULE, .of_match_table = fsl_re_ids, }, .probe = fsl_re_probe, diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index a8828ed639b3..911b7177eb50 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, /* alloc channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } @@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op) fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { - dev_err(&op->dev, "No enough memory for 'priv'\n"); err = -ENOMEM; goto out_return; } diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 48d85f8b95fe..a960608c0a4d 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -167,6 +167,7 @@ struct imxdma_channel { u32 ccr_to_device; bool enabled_2d; int slot_2d; + unsigned int irq; }; enum imx_dma_type { @@ -186,6 +187,9 @@ struct imxdma_engine { struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; enum imx_dma_type devtype; + unsigned int irq; + unsigned int irq_err; + }; struct imxdma_filter_data { @@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, } static int __init imxdma_probe(struct platform_device *pdev) - { +{ struct imxdma_engine *imxdma; struct resource *res; const struct of_device_id *of_id; @@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq = irq; irq_err = platform_get_irq(pdev, 1); if (irq_err < 0) { @@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq_err = irq_err; } /* enable DMA module */ @@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev) irq + i, i); goto disable_dma_ahb_clk; } + + imxdmac->irq = irq + i; init_timer(&imxdmac->watchdog); imxdmac->watchdog.function = &imxdma_watchdog; imxdmac->watchdog.data = (unsigned long)imxdmac; @@ -1217,10 +1225,31 @@ disable_dma_ipg_clk: return ret; } +static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) +{ + int i; + + if (is_imx1_dma(imxdma)) { + disable_irq(imxdma->irq); + disable_irq(imxdma->irq_err); + } + + for (i = 0; i < IMX_DMA_CHANNELS; i++) { + struct imxdma_channel *imxdmac = &imxdma->channel[i]; + + if (!is_imx1_dma(imxdma)) + disable_irq(imxdmac->irq); + + tasklet_kill(&imxdmac->dma_tasklet); + } +} + static int imxdma_remove(struct platform_device *pdev) { struct imxdma_engine *imxdma = platform_get_drvdata(pdev); + imxdma_free_irq(pdev, imxdma); + dma_async_device_unregister(&imxdma->dma_device); if (pdev->dev.of_node) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0f6fd42f55ca..03ec76fc22ff 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -18,6 +18,7 @@ */ #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/types.h> #include <linux/bitops.h> @@ -385,6 +386,7 @@ struct sdma_engine { const struct sdma_driver_data *drvdata; u32 spba_start_addr; u32 spba_end_addr; + unsigned int irq; }; static struct sdma_driver_data sdma_imx31 = { @@ -571,28 +573,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) static int sdma_run_channel0(struct sdma_engine *sdma) { int ret; - unsigned long timeout = 500; + u32 reg; sdma_enable_channel(sdma, 0); - while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { - if (timeout-- <= 0) - break; - udelay(1); - } - - if (ret) { - /* Clear the interrupt status */ - writel_relaxed(ret, sdma->regs + SDMA_H_INTR); - } else { + ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, + reg, !(reg & 1), 1, 500); + if (ret) dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); - } /* Set bits of CONFIG register with dynamic context switching */ if (readl(sdma->regs + SDMA_H_CONFIG) == 0) writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); - return ret ? 0 : -ETIMEDOUT; + return ret; } static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, @@ -727,9 +721,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) unsigned long stat; stat = readl_relaxed(sdma->regs + SDMA_H_INTR); - /* not interested in channel 0 interrupts */ - stat &= ~1; writel_relaxed(stat, sdma->regs + SDMA_H_INTR); + /* channel 0 is special and not handled here, see run_channel0() */ + stat &= ~1; while (stat) { int channel = fls(stat) - 1; @@ -758,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, * These are needed once we start to support transfers between * two peripherals or memory-to-memory transfers */ - int per_2_per = 0, emi_2_emi = 0; + int per_2_per = 0; sdmac->pc_from_device = 0; sdmac->pc_to_device = 0; @@ -766,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac, switch (peripheral_type) { case IMX_DMATYPE_MEMORY: - emi_2_emi = sdma->script_addrs->ap_2_ap_addr; break; case IMX_DMATYPE_DSP: emi_2_per = sdma->script_addrs->bp_2_ap_addr; @@ -999,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan) } else __set_bit(sdmac->event_id0, sdmac->event_mask); - /* Watermark Level */ - sdmac->watermark_level |= sdmac->watermark_level; /* Address */ sdmac->shp_addr = sdmac->per_address; sdmac->per_addr = sdmac->per_address2; @@ -1715,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev) if (ret) return ret; + sdma->irq = irq; + sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); if (!sdma->script_addrs) return -ENOMEM; @@ -1840,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev) struct sdma_engine *sdma = platform_get_drvdata(pdev); int i; + devm_free_irq(&pdev->dev, sdma->irq, sdma); dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); /* Kill the tasklet */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index d406056e8892..7145f7716a92 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev) ioat_disable_interrupts(ioat_dma); } -void ioat_resume(struct ioatdma_device *ioat_dma) +static void ioat_resume(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; u32 chanerr; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 1ba2fd73852d..39de8980128c 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -102,6 +102,7 @@ struct k3_dma_dev { struct clk *clk; u32 dma_channels; u32 dma_requests; + unsigned int irq; }; #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) @@ -425,10 +426,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( num = DIV_ROUND_UP(len, DMA_MAX_SIZE); ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->size = len; ds->desc_num = num; @@ -481,10 +481,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( } ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->desc_num = num; num = 0; @@ -705,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op) if (ret) return ret; + d->irq = irq; + /* init phy channel */ d->phy = devm_kzalloc(&op->dev, d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); @@ -759,7 +760,7 @@ static int k3_dma_probe(struct platform_device *op) ret = dma_async_device_register(&d->slave); if (ret) - return ret; + goto dma_async_register_fail; ret = of_dma_controller_register((&op->dev)->of_node, k3_of_dma_simple_xlate, d); @@ -776,6 +777,8 @@ static int k3_dma_probe(struct platform_device *op) of_dma_register_fail: dma_async_device_unregister(&d->slave); +dma_async_register_fail: + clk_disable_unprepare(d->clk); return ret; } @@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op) dma_async_device_unregister(&d->slave); of_dma_controller_free((&op->dev)->of_node); + devm_free_irq(&op->dev, d->irq, d); + list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 56f1fd68b620..f4b25fb0d040 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data) static int mmp_pdma_remove(struct platform_device *op) { struct mmp_pdma_device *pdev = platform_get_drvdata(op); + struct mmp_pdma_phy *phy; + int i, irq = 0, irq_num = 0; + + + for (i = 0; i < pdev->dma_channels; i++) { + if (platform_get_irq(op, i) > 0) + irq_num++; + } + + if (irq_num != pdev->dma_channels) { + irq = platform_get_irq(op, 0); + devm_free_irq(&op->dev, irq, pdev); + } else { + for (i = 0; i < pdev->dma_channels; i++) { + phy = &pdev->phy[i]; + irq = platform_get_irq(op, i); + devm_free_irq(&op->dev, irq, phy); + } + } dma_async_device_unregister(&pdev->device); return 0; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 3df0422607d5..b3441f57a364 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan) return; } -struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) +static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) { struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); @@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, /* alloc channel */ tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); - if (!tdmac) { - dev_err(tdev->dev, "no free memory for DMA channels!\n"); + if (!tdmac) return -ENOMEM; - } + if (irq) tdmac->irq = irq; tdmac->dev = tdev->dev; @@ -593,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) return true; } -struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, +static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mmp_tdma_device *tdev = ofdma->of_dma_data; diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 631c4435e075..a6e642792e5a 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -148,6 +148,7 @@ struct moxart_chan { struct moxart_dmadev { struct dma_device dma_slave; struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; + unsigned int irq; }; struct moxart_filter_data { @@ -574,10 +575,8 @@ static int moxart_probe(struct platform_device *pdev) struct moxart_dmadev *mdc; mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); - if (!mdc) { - dev_err(dev, "can't allocate DMA container\n"); + if (!mdc) return -ENOMEM; - } irq = irq_of_parse_and_map(node, 0); if (irq == NO_IRQ) { @@ -617,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev) dev_err(dev, "devm_request_irq failed\n"); return ret; } + mdc->irq = irq; ret = dma_async_device_register(&mdc->dma_slave); if (ret) { @@ -640,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev) { struct moxart_dmadev *m = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, m->irq, m); + dma_async_device_unregister(&m->dma_slave); if (pdev->dev.of_node) diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index ccadafa51d5e..fa86592c7ae1 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op) } free_irq(mdma->irq, mdma); irq_dispose_mapping(mdma->irq); + tasklet_kill(&mdma->tasklet); return 0; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d0446a75990a..f4c9f98ec35e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1057,7 +1057,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, err_free_irq: free_irq(mv_chan->irq, mv_chan); - err_free_dma: +err_free_dma: dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); return ERR_PTR(ret); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c new file mode 100644 index 000000000000..a28a01fcba67 --- /dev/null +++ b/drivers/dma/mv_xor_v2.c @@ -0,0 +1,878 @@ +/* + * Copyright (C) 2015-2016 Marvell International Ltd. + + * This program is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include "dmaengine.h" + +/* DMA Engine Registers */ +#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000 +#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004 +#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008 +#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0 +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16 +#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F +#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C +#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014 +#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 +#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF +#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 +#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C + /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ +#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16 +#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050 +#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054 +#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100 +#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1 +#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7 +#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 +#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 +#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 + +/* XOR Global registers */ +#define MV_XOR_V2_GLOB_BW_CTRL 0x4 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_PAUSE 0x014 +#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8 +#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200 +#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204 +#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220 +#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224 + +#define MV_XOR_V2_MIN_DESC_SIZE 32 +#define MV_XOR_V2_EXT_DESC_SIZE 128 + +#define MV_XOR_V2_DESC_RESERVED_SIZE 12 +#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12 + +#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8 + +/* + * Descriptors queue size. With 32 bytes descriptors, up to 2^14 + * descriptors are allowed, with 128 bytes descriptors, up to 2^12 + * descriptors are allowed. This driver uses 128 bytes descriptors, + * but experimentation has shown that a set of 1024 descriptors is + * sufficient to reach a good level of performance. + */ +#define MV_XOR_V2_DESC_NUM 1024 + +/** + * struct mv_xor_v2_descriptor - DMA HW descriptor + * @desc_id: used by S/W and is not affected by H/W. + * @flags: error and status flags + * @crc32_result: CRC32 calculation result + * @desc_ctrl: operation mode and control flags + * @buff_size: amount of bytes to be processed + * @fill_pattern_src_addr: Fill-Pattern or Source-Address and + * AW-Attributes + * @data_buff_addr: Source (and might be RAID6 destination) + * addresses of data buffers in RAID5 and RAID6 + * @reserved: reserved + */ +struct mv_xor_v2_descriptor { + u16 desc_id; + u16 flags; + u32 crc32_result; + u32 desc_ctrl; + + /* Definitions for desc_ctrl */ +#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22 +#define DESC_OP_MODE_SHIFT 28 +#define DESC_OP_MODE_NOP 0 /* Idle operation */ +#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */ +#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */ +#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */ +#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */ +#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */ +#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */ +#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */ +#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */ +#define DESC_Q_BUFFER_ENABLE BIT(16) +#define DESC_P_BUFFER_ENABLE BIT(17) +#define DESC_IOD BIT(27) + + u32 buff_size; + u32 fill_pattern_src_addr[4]; + u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE]; + u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE]; +}; + +/** + * struct mv_xor_v2_device - implements a xor device + * @lock: lock for the engine + * @dma_base: memory mapped DMA register base + * @glob_base: memory mapped global register base + * @irq_tasklet: + * @free_sw_desc: linked list of free SW descriptors + * @dmadev: dma device + * @dmachan: dma channel + * @hw_desq: HW descriptors queue + * @hw_desq_virt: virtual address of DESCQ + * @sw_desq: SW descriptors queue + * @desc_size: HW descriptor size + * @npendings: number of pending descriptors (for which tx_submit has + * been called, but not yet issue_pending) + */ +struct mv_xor_v2_device { + spinlock_t lock; + void __iomem *dma_base; + void __iomem *glob_base; + struct clk *clk; + struct tasklet_struct irq_tasklet; + struct list_head free_sw_desc; + struct dma_device dmadev; + struct dma_chan dmachan; + dma_addr_t hw_desq; + struct mv_xor_v2_descriptor *hw_desq_virt; + struct mv_xor_v2_sw_desc *sw_desq; + int desc_size; + unsigned int npendings; +}; + +/** + * struct mv_xor_v2_sw_desc - implements a xor SW descriptor + * @idx: descriptor index + * @async_tx: support for the async_tx api + * @hw_desc: assosiated HW descriptor + * @free_list: node of the free SW descriprots list +*/ +struct mv_xor_v2_sw_desc { + int idx; + struct dma_async_tx_descriptor async_tx; + struct mv_xor_v2_descriptor hw_desc; + struct list_head free_list; +}; + +/* + * Fill the data buffers to a HW descriptor + */ +static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, + struct mv_xor_v2_descriptor *desc, + dma_addr_t src, int index) +{ + int arr_index = ((index >> 1) * 3); + + /* + * Fill the buffer's addresses to the descriptor. + * + * The format of the buffers address for 2 sequential buffers + * X and X + 1: + * + * First word: Buffer-DX-Address-Low[31:0] + * Second word: Buffer-DX+1-Address-Low[31:0] + * Third word: DX+1-Buffer-Address-High[47:32] [31:16] + * DX-Buffer-Address-High[47:32] [15:0] + */ + if ((index & 0x1) == 0) { + desc->data_buff_addr[arr_index] = lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF; + desc->data_buff_addr[arr_index + 2] |= + upper_32_bits(src) & 0xFFFF; + } else { + desc->data_buff_addr[arr_index + 1] = + lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000; + desc->data_buff_addr[arr_index + 2] |= + (upper_32_bits(src) & 0xFFFF) << 16; + } +} + +/* + * Return the next available index in the DESQ. + */ +static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) +{ + /* read the index for the next available descriptor in the DESQ */ + u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); + + return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) + & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); +} + +/* + * notify the engine of new descriptors, and update the available index. + */ +static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF); +} + +/* + * free HW descriptors + */ +static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF); +} + +/* + * Set descriptor size + * Return the HW descriptor size in bytes + */ +static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) +{ + writel(MV_XOR_V2_DMA_DESQ_CTRL_128B, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF); + + return MV_XOR_V2_EXT_DESC_SIZE; +} + +/* + * Set the IMSG threshold + */ +static inline +void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); + + reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); +} + +static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) +{ + struct mv_xor_v2_device *xor_dev = data; + unsigned int ndescs; + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); + + /* No descriptors to process */ + if (!ndescs) + return IRQ_NONE; + + /* + * Update IMSG threshold, to disable new IMSG interrupts until + * end of the tasklet + */ + mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); + + /* schedule a tasklet to handle descriptors callbacks */ + tasklet_schedule(&xor_dev->irq_tasklet); + + return IRQ_HANDLED; +} + +/* + * submit a descriptor to the DMA engine + */ +static dma_cookie_t +mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) +{ + int desq_ptr; + void *dest_hw_desc; + dma_cookie_t cookie; + struct mv_xor_v2_sw_desc *sw_desc = + container_of(tx, struct mv_xor_v2_sw_desc, async_tx); + struct mv_xor_v2_device *xor_dev = + container_of(tx->chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s sw_desc %p: async_tx %p\n", + __func__, sw_desc, &sw_desc->async_tx); + + /* assign coookie */ + spin_lock_bh(&xor_dev->lock); + cookie = dma_cookie_assign(tx); + + /* get the next available slot in the DESQ */ + desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); + + /* copy the HW descriptor from the SW descriptor to the DESQ */ + dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; + + memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); + + xor_dev->npendings++; + + spin_unlock_bh(&xor_dev->lock); + + return cookie; +} + +/* + * Prepare a SW descriptor + */ +static struct mv_xor_v2_sw_desc * +mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) +{ + struct mv_xor_v2_sw_desc *sw_desc; + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + if (list_empty(&xor_dev->free_sw_desc)) { + spin_unlock_bh(&xor_dev->lock); + /* schedule tasklet to free some descriptors */ + tasklet_schedule(&xor_dev->irq_tasklet); + return NULL; + } + + /* get a free SW descriptor from the SW DESQ */ + sw_desc = list_first_entry(&xor_dev->free_sw_desc, + struct mv_xor_v2_sw_desc, free_list); + list_del(&sw_desc->free_list); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* set the async tx descriptor */ + dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; + async_tx_ack(&sw_desc->async_tx); + + return sw_desc; +} + +/* + * Prepare a HW descriptor for a memcpy operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev; + + xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s len: %zu src %pad dest %pad flags: %ld\n", + __func__, len, &src, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the MEMCPY control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set source address */ + hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src); + hw_descriptor->fill_pattern_src_addr[1] = + upper_32_bits(src) & 0xFFFF; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for a XOR operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + int i; + + if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1) + return NULL; + + dev_dbg(xor_dev->dmadev.dev, + "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the XOR control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set the data buffers */ + for (i = 0; i < src_cnt; i++) + mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i); + + hw_descriptor->desc_ctrl |= + src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for interrupt operation. + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the INTERRUPT control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * push pending transactions to hardware + */ +static void mv_xor_v2_issue_pending(struct dma_chan *chan) +{ + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + spin_lock_bh(&xor_dev->lock); + + /* + * update the engine with the number of descriptors to + * process + */ + mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); + xor_dev->npendings = 0; + + /* Activate the channel */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + spin_unlock_bh(&xor_dev->lock); +} + +static inline +int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev, + int *pending_ptr) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + /* get the next pending descriptor index */ + *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK); + + /* get the number of descriptors pending handle */ + return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); +} + +/* + * handle the descriptors after HW process + */ +static void mv_xor_v2_tasklet(unsigned long data) +{ + struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; + int pending_ptr, num_of_pending, i; + struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; + struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; + + dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); + + /* get the pending descriptors parameters */ + num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); + + /* next HW descriptor */ + next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; + + /* loop over free descriptors */ + for (i = 0; i < num_of_pending; i++) { + + if (pending_ptr > MV_XOR_V2_DESC_NUM) + pending_ptr = 0; + + if (next_pending_sw_desc != NULL) + next_pending_hw_desc++; + + /* get the SW descriptor related to the HW descriptor */ + next_pending_sw_desc = + &xor_dev->sw_desq[next_pending_hw_desc->desc_id]; + + /* call the callback */ + if (next_pending_sw_desc->async_tx.cookie > 0) { + /* + * update the channel's completed cookie - no + * lock is required the IMSG threshold provide + * the locking + */ + dma_cookie_complete(&next_pending_sw_desc->async_tx); + + if (next_pending_sw_desc->async_tx.callback) + next_pending_sw_desc->async_tx.callback( + next_pending_sw_desc->async_tx.callback_param); + + dma_descriptor_unmap(&next_pending_sw_desc->async_tx); + } + + dma_run_dependencies(&next_pending_sw_desc->async_tx); + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + /* add the SW descriptor to the free descriptors list */ + list_add(&next_pending_sw_desc->free_list, + &xor_dev->free_sw_desc); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* increment the next descriptor */ + pending_ptr++; + } + + if (num_of_pending != 0) { + /* free the descriptores */ + mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); + } + + /* Update IMSG threshold, to enable new IMSG interrupts */ + mv_xor_v2_set_imsg_thrd(xor_dev, 0); +} + +/* + * Set DMA Interrupt-message (IMSG) parameters + */ +static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev); + + writel(msg->address_lo, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF); + writel(msg->address_hi & 0xFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF); + writel(msg->data, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF); +} + +static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) +{ + u32 reg; + + /* write the DESQ size to the DMA engine */ + writel(MV_XOR_V2_DESC_NUM, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); + + /* write the DESQ address to the DMA enngine*/ + writel(xor_dev->hw_desq & 0xFFFFFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); + writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); + + /* enable the DMA engine */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + /* + * This is a temporary solution, until we activate the + * SMMU. Set the attributes for reading & writing data buffers + * & descriptors to: + * + * - OuterShareable - Snoops will be performed on CPU caches + * - Enable cacheable - Bufferable, Modifiable, Other Allocate + * and Allocate + */ + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + + /* BW CTRL - set values to optimize the XOR performance: + * + * - Set WrBurstLen & RdBurstLen - the unit will issue + * maximum of 256B write/read transactions. + * - Limit the number of outstanding write & read data + * (OBB/IBB) requests to the maximal value. + */ + reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT)); + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL); + + /* Disable the AXI timer feature */ + reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + + return 0; +} + +static int mv_xor_v2_probe(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev; + struct resource *res; + int i, ret = 0; + struct dma_device *dma_dev; + struct mv_xor_v2_sw_desc *sw_desc; + struct msi_desc *msi_desc; + + BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != + MV_XOR_V2_EXT_DESC_SIZE); + + xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL); + if (!xor_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->dma_base)) + return PTR_ERR(xor_dev->dma_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->glob_base)) + return PTR_ERR(xor_dev->glob_base); + + platform_set_drvdata(pdev, xor_dev); + + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR(xor_dev->clk)) { + ret = clk_prepare_enable(xor_dev->clk); + if (ret) + return ret; + } + + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, + mv_xor_v2_set_msi_msg); + if (ret) + goto disable_clk; + + msi_desc = first_msi_entry(&pdev->dev); + if (!msi_desc) + goto free_msi_irqs; + + ret = devm_request_irq(&pdev->dev, msi_desc->irq, + mv_xor_v2_interrupt_handler, 0, + dev_name(&pdev->dev), xor_dev); + if (ret) + goto free_msi_irqs; + + tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet, + (unsigned long) xor_dev); + + xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev); + + dma_cookie_init(&xor_dev->dmachan); + + /* + * allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly flush the writes + */ + xor_dev->hw_desq_virt = + dma_alloc_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + &xor_dev->hw_desq, GFP_KERNEL); + if (!xor_dev->hw_desq_virt) { + ret = -ENOMEM; + goto free_msi_irqs; + } + + /* alloc memory for the SW descriptors */ + xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) * + MV_XOR_V2_DESC_NUM, GFP_KERNEL); + if (!xor_dev->sw_desq) { + ret = -ENOMEM; + goto free_hw_desq; + } + + spin_lock_init(&xor_dev->lock); + + /* init the free SW descriptors list */ + INIT_LIST_HEAD(&xor_dev->free_sw_desc); + + /* add all SW descriptors to the free list */ + for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { + xor_dev->sw_desq[i].idx = i; + list_add(&xor_dev->sw_desq[i].free_list, + &xor_dev->free_sw_desc); + } + + dma_dev = &xor_dev->dmadev; + + /* set DMA capabilities */ + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_XOR, dma_dev->cap_mask); + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); + + /* init dma link list */ + INIT_LIST_HEAD(&dma_dev->channels); + + /* set base routines */ + dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_issue_pending = mv_xor_v2_issue_pending; + dma_dev->dev = &pdev->dev; + + dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy; + dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt; + dma_dev->max_xor = 8; + dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor; + + xor_dev->dmachan.device = dma_dev; + + list_add_tail(&xor_dev->dmachan.device_node, + &dma_dev->channels); + + mv_xor_v2_descq_init(xor_dev); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto free_hw_desq; + + dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n"); + + return 0; + +free_hw_desq: + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); +free_msi_irqs: + platform_msi_domain_free_irqs(&pdev->dev); +disable_clk: + if (!IS_ERR(xor_dev->clk)) + clk_disable_unprepare(xor_dev->clk); + return ret; +} + +static int mv_xor_v2_remove(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&xor_dev->dmadev); + + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); + + platform_msi_domain_free_irqs(&pdev->dev); + + clk_disable_unprepare(xor_dev->clk); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id mv_xor_v2_dt_ids[] = { + { .compatible = "marvell,xor-v2", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); +#endif + +static struct platform_driver mv_xor_v2_driver = { + .probe = mv_xor_v2_probe, + .remove = mv_xor_v2_remove, + .driver = { + .name = "mv_xor_v2", + .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), + }, +}; + +module_platform_driver(mv_xor_v2_driver); + +MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 2b5a198ac77e..08c45c185549 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -227,6 +227,7 @@ struct nbpf_device { void __iomem *base; struct clk *clk; const struct nbpf_config *config; + unsigned int eirq; struct nbpf_channel chan[]; }; @@ -1300,10 +1301,9 @@ static int nbpf_probe(struct platform_device *pdev) nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * sizeof(nbpf->chan[0]), GFP_KERNEL); - if (!nbpf) { - dev_err(dev, "Memory allocation failed\n"); + if (!nbpf) return -ENOMEM; - } + dma_dev = &nbpf->dma_dev; dma_dev->dev = dev; @@ -1376,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev) IRQF_SHARED, "dma error", nbpf); if (ret < 0) return ret; + nbpf->eirq = eirq; INIT_LIST_HEAD(&dma_dev->channels); @@ -1447,6 +1448,17 @@ e_clk_off: static int nbpf_remove(struct platform_device *pdev) { struct nbpf_device *nbpf = platform_get_drvdata(pdev); + int i; + + devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); + + for (i = 0; i < nbpf->config->num_channels; i++) { + struct nbpf_channel *chan = nbpf->chan + i; + + devm_free_irq(&pdev->dev, chan->irq, chan); + + tasklet_kill(&chan->tasklet); + } of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&nbpf->dma_dev); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 1e984e18c126..d99ca2b511c4 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -59,6 +59,8 @@ struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ + int32_t fi; /* for double indexing */ + int16_t ei; /* for double indexing */ }; struct omap_desc { @@ -66,7 +68,8 @@ struct omap_desc { enum dma_transfer_direction dir; dma_addr_t dev_addr; - int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ + int16_t ei; /* for double indexing */ uint8_t es; /* CSDP_DATA_TYPE_xxx */ uint32_t ccr; /* CCR value */ uint16_t clnk_ctrl; /* CLNK_CTRL value */ @@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, } omap_dma_chan_write(c, cxsa, sg->addr); - omap_dma_chan_write(c, cxei, 0); - omap_dma_chan_write(c, cxfi, 0); + omap_dma_chan_write(c, cxei, sg->ei); + omap_dma_chan_write(c, cxfi, sg->fi); omap_dma_chan_write(c, CEN, sg->en); omap_dma_chan_write(c, CFN, sg->fn); @@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c) } omap_dma_chan_write(c, cxsa, d->dev_addr); - omap_dma_chan_write(c, cxei, 0); + omap_dma_chan_write(c, cxei, d->ei); omap_dma_chan_write(c, cxfi, d->fi); omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); @@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } +static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + struct omap_sg *sg; + uint8_t data_type; + size_t src_icg, dst_icg; + + /* Slave mode is not supported */ + if (is_slave_direction(xt->dir)) + return NULL; + + if (xt->frame_size != 1 || xt->numf == 0) + return NULL; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + sg = &d->sg[0]; + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = xt->src_start; + d->es = data_type; + sg->en = xt->sgl[0].size / BIT(data_type); + sg->fn = xt->numf; + sg->addr = xt->dst_start; + d->sglen = 1; + d->ccr = c->ccr; + + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + if (src_icg) { + d->ccr |= CCR_SRC_AMODE_DBLIDX; + d->ei = 1; + d->fi = src_icg; + } else if (xt->src_inc) { + d->ccr |= CCR_SRC_AMODE_POSTINC; + d->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: SRC constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + if (dst_icg) { + d->ccr |= CCR_DST_AMODE_DBLIDX; + sg->ei = 1; + sg->fi = dst_icg; + } else if (xt->dst_inc) { + d->ccr |= CCR_DST_AMODE_POSTINC; + sg->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: DST constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct omap_chan *c = to_omap_dma_chan(chan); @@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; od->ddev.device_tx_status = omap_dma_tx_status; @@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; + od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; @@ -1204,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev) static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); + int irq; if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); + irq = platform_get_irq(pdev, 1); + devm_free_irq(&pdev->dev, irq, od); + dma_async_device_unregister(&od->ddev); if (!od->legacy) { diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 372b4359da97..4fc3ffbd5ca0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) /* Allocate a new DMAC and its Channels */ pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(&adev->dev, "unable to allocate mem\n"); + if (!pl330) return -ENOMEM; - } pd = &pl330->ddma; pd->dev = &adev->dev; @@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); goto probe_err2; } @@ -3005,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; + int i, irq; pm_runtime_get_noresume(pl330->ddma.dev); if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + devm_free_irq(&adev->dev, irq, pl330); + } + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 9217f893b0d1..da3688b94bdc 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a device */ adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) { - dev_err(&ofdev->dev, "failed to allocate device\n"); initcode = PPC_ADMA_INIT_ALLOC; ret = -ENOMEM; goto err_adev_alloc; @@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(&ofdev->dev, "can't allocate channel structure\n"); initcode = PPC_ADMA_INIT_CHANNEL; ret = -ENOMEM; goto err_chan_alloc; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index e756a30ccba2..dc7850a422b8 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -21,6 +21,7 @@ #include <linux/of_device.h> #include <linux/of_dma.h> #include <linux/of.h> +#include <linux/wait.h> #include <linux/dma/pxa-dma.h> #include "dmaengine.h" @@ -118,6 +119,8 @@ struct pxad_chan { struct pxad_phy *phy; struct dma_pool *desc_pool; /* Descriptors pool */ dma_cookie_t bus_error; + + wait_queue_head_t wq_state; }; struct pxad_device { @@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \ return single_open(file, dbg_show_##name, inode->i_private); \ } \ static const struct file_operations dbg_fops_##name = { \ - .owner = THIS_MODULE, \ .open = dbg_open_##name, \ .llseek = seq_lseek, \ .read = seq_read, \ @@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan, */ phy_writel(chan->phy, desc->first, DDADR); phy_enable(chan->phy, chan->misaligned); + wake_up(&chan->wq_state); } static void set_updater_desc(struct pxad_desc_sw *sw_desc, @@ -717,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) } } spin_unlock_irqrestore(&chan->vc.lock, flags); + wake_up(&chan->wq_state); return IRQ_HANDLED; } @@ -1268,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan, return ret; } +static void pxad_synchronize(struct dma_chan *dchan) +{ + struct pxad_chan *chan = to_pxad_chan(dchan); + + wait_event(chan->wq_state, !is_chan_running(chan)); + vchan_synchronize(&chan->vc); +} + static void pxad_free_channels(struct dma_device *dmadev) { struct pxad_chan *c, *cn; @@ -1372,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op, pdev->slave.device_tx_status = pxad_tx_status; pdev->slave.device_issue_pending = pxad_issue_pending; pdev->slave.device_config = pxad_config; + pdev->slave.device_synchronize = pxad_synchronize; pdev->slave.device_terminate_all = pxad_terminate_all; if (op->dev.coherent_dma_mask) @@ -1389,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op, return -ENOMEM; c->vc.desc_free = pxad_free_desc; vchan_init(&c->vc, &pdev->slave); + init_waitqueue_head(&c->wq_state); } return dma_async_device_register(&pdev->slave); diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 969b48176745..03c4eb3fd314 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -48,6 +48,7 @@ #include <linux/of_dma.h> #include <linux/clk.h> #include <linux/dmaengine.h> +#include <linux/pm_runtime.h> #include "../dmaengine.h" #include "../virt-dma.h" @@ -58,6 +59,8 @@ struct bam_desc_hw { __le16 flags; }; +#define BAM_DMA_AUTOSUSPEND_DELAY 100 + #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) @@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan) struct bam_device *bdev = bchan->bdev; u32 val; unsigned long flags; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; vchan_free_chan_resources(to_virt_chan(chan)); if (bchan->curr_txd) { dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); - return; + goto err; } spin_lock_irqsave(&bchan->vc.lock, flags); @@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan) /* disable irq */ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + +err: + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 1; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 0; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) { struct bam_device *bdev = data; u32 clr_mask = 0, srcs = 0; + int ret; srcs |= process_channel_irqs(bdev); @@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; + if (srcs & BAM_IRQ) { clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); @@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data) writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); } + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + return IRQ_HANDLED; } @@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan) struct bam_desc_hw *desc; struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, sizeof(struct bam_desc_hw)); + int ret; lockdep_assert_held(&bchan->vc.lock); @@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan) async_desc = container_of(vd, struct bam_async_desc, vd); bchan->curr_txd = async_desc; + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; + /* on first use, initialize the channel hardware */ if (!bchan->initialized) bam_chan_init_hw(bchan, async_desc->dir); @@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan) wmb(); writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } + } /** @@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; + pm_runtime_irq_safe(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; err_unregister_dma: @@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev) struct bam_device *bdev = platform_get_drvdata(pdev); u32 i; + pm_runtime_force_suspend(&pdev->dev); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&bdev->common); @@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + clk_disable(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_runtime_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(bdev->bamclk); + if (ret < 0) { + dev_err(dev, "clk_enable failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int __maybe_unused bam_dma_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + pm_runtime_force_suspend(dev); + + clk_unprepare(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; + + pm_runtime_force_resume(dev); + + return 0; +} + +static const struct dev_pm_ops bam_dma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) + SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, + NULL) +}; + static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", + .pm = &bam_dma_pm_ops, .of_match_table = bam_of_match, }, }; diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 41b5c6dee713..b2374cd91e45 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + tasklet_kill(&dmadev->task); hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index f3929001539b..ad20dfb64c71 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; tasklet_kill(&lldev->task); + tasklet_kill(&lldev->rst_task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; lldev->pending_tre_count = 0; diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index c0e365321310..82f36e466083 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) pdevinfo.size_data = 0; pdevinfo.dma_mask = DMA_BIT_MASK(64); new_pdev = platform_device_register_full(&pdevinfo); - if (!new_pdev) { - ret = -ENODEV; + if (IS_ERR(new_pdev)) { + ret = PTR_ERR(new_pdev); goto out; } of_dma_configure(&new_pdev->dev, child); @@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void) #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) struct device_node *child; - for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; - child = of_find_matching_node(child, hidma_mgmt_match)) { + for_each_matching_node(child, hidma_mgmt_match) { /* device tree based firmware here */ hidma_mgmt_of_populate_channels(child); of_node_put(child); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 17ccdfd28f37..ce67075589f5 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) { - spin_unlock_irqrestore(&s3cchan->vc.lock, flags); - return ret; - } /* * There's no point calculating the residue if there's * no txstate to store the value. */ - if (!txstate) { + if (ret == DMA_COMPLETE || !txstate) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } @@ -1105,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, */ for (i = 0; i < channels; i++) { chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(dmadev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->id = i; chan->host = s3cdma; @@ -1143,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) struct s3c24xx_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, vc.chan.device_node) + next, &dmadev->channels, vc.chan.device_node) { list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } } /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ @@ -1366,6 +1361,18 @@ err_memcpy: return ret; } +static void s3c24xx_dma_free_irq(struct platform_device *pdev, + struct s3c24xx_dma_engine *s3cdma) +{ + int i; + + for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { + struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; + + devm_free_irq(&pdev->dev, phy->irq, phy); + } +} + static int s3c24xx_dma_remove(struct platform_device *pdev) { const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); @@ -1376,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&s3cdma->slave); dma_async_device_unregister(&s3cdma->memcpy); + s3c24xx_dma_free_irq(pdev, s3cdma); + s3c24xx_dma_free_virtual_channels(&s3cdma->slave); s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index dfb17926297b..0dd953884d1d 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); - return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; + return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); } static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) @@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, spin_lock_irqsave(&chan->lock, flags); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); - list_add_tail(&desc->node, &chan->desc.free); + list_add(&desc->node, &chan->desc.free); spin_unlock_irqrestore(&chan->lock, flags); } @@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) list_splice_init(&rchan->desc.done, &list); list_splice_init(&rchan->desc.wait, &list); + rchan->desc.running = NULL; + list_for_each_entry(desc, &list, node) rcar_dmac_realloc_hwdesc(rchan, desc, 0); @@ -1143,6 +1145,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc = chan->desc.running; struct rcar_dmac_xfer_chunk *running = NULL; struct rcar_dmac_xfer_chunk *chunk; + enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; @@ -1150,12 +1153,38 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, return 0; /* + * If the cookie corresponds to a descriptor that has been completed + * there is no residue. The same check has already been performed by the + * caller but without holding the channel lock, so the descriptor could + * now be complete. + */ + status = dma_cookie_status(&chan->chan, cookie, NULL); + if (status == DMA_COMPLETE) + return 0; + + /* * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. */ - if (cookie != desc->async_tx.cookie) - return desc->size; + if (cookie != desc->async_tx.cookie) { + list_for_each_entry(desc, &chan->desc.pending, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + list_for_each_entry(desc, &chan->desc.active, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + + /* + * No descriptor found for the cookie, there's thus no residue. + * This shouldn't happen if the calling driver passes a correct + * cookie value. + */ + WARN(1, "No descriptor for cookie!"); + return 0; + } /* * In descriptor mode the descriptor running pointer is not maintained @@ -1202,6 +1231,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, residue = rcar_dmac_chan_get_residue(rchan, cookie); spin_unlock_irqrestore(&rchan->lock, flags); + /* if there's no residue, the cookie is complete */ + if (!residue) + return DMA_COMPLETE; + dma_set_residue(txstate, residue); return status; diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 80d86402490e..c94ffab0d25c 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), GFP_KERNEL); - if (!sh_chan) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sh_chan) return -ENOMEM; - } schan = &sh_chan->shdma_chan; schan->max_xfer_len = SH_DMA_TCR_MAX + 1; @@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev) shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), GFP_KERNEL); - if (!shdev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!shdev) return -ENOMEM; - } dma_dev = &shdev->shdma_dev.dma_dev; diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index 6da2eaa6c294..69b9564dc9d9 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, int err; sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); - if (!sc) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sc) return -ENOMEM; - } schan = &sc->shdma_chan; schan->max_xfer_len = 64 * 1024 * 1024 - 1; @@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev) err = -ENOMEM; su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), GFP_KERNEL); - if (!su_dev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!su_dev) return err; - } dma_dev = &su_dev->shdma_dev.dma_dev; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index e48350e65089..d8bc3f2a71db 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) int ret, i; sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); - if (!sdma) { - dev_err(dev, "Memory exhausted!\n"); + if (!sdma) return -ENOMEM; - } + data = (struct sirfsoc_dmadata *) (of_match_device(op->dev.driver->of_match_table, &op->dev)->data); @@ -981,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) of_dma_controller_free(op->dev.of_node); dma_async_device_unregister(&sdma->dma); free_irq(sdma->irq, sdma); + tasklet_kill(&sdma->tasklet); irq_dispose_mapping(sdma->irq); pm_runtime_disable(&op->dev); if (!pm_runtime_status_suspended(&op->dev)) @@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) }; -struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { .exec = sirfsoc_dma_execute_hw_a6, .type = SIRFSOC_DMA_VER_A6, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { .exec = sirfsoc_dma_execute_hw_a7v1, .type = SIRFSOC_DMA_VER_A7V1, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { .exec = sirfsoc_dma_execute_hw_a7v2, .type = SIRFSOC_DMA_VER_A7V2, }; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6fb8307468ab..8b18e44a02d5 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_COMPLETE) + if (ret != DMA_COMPLETE && txstate) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) (num_phy_chans + num_log_chans + num_memcpy_chans) * sizeof(struct d40_chan), GFP_KERNEL); - if (base == NULL) { - d40_err(&pdev->dev, "Out of memory\n"); + if (base == NULL) goto failure; - } base->rev = rev; base->clk = clk; diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 27b818dee7c7..13b42dd9900c 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -10,7 +10,7 @@ #include "ste_dma40_ll.h" -u8 d40_width_to_bits(enum dma_slave_buswidth width) +static u8 d40_width_to_bits(enum dma_slave_buswidth width) { if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) return STEDMA40_ESIZE_8_BIT; diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 5065ca43face..3835fcde3545 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, state); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !state) return ret; spin_lock_irqsave(&vchan->vc.lock, flags); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 01e316f73559..6ab9eb98588a 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Allocate DMA desc */ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); - if (!dma_desc) { - dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); + if (!dma_desc) return NULL; - } dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); dma_desc->txd.tx_submit = tegra_dma_tx_submit; @@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get( spin_unlock_irqrestore(&tdc->lock, flags); sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT); - if (!sg_req) - dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); + return sg_req; } @@ -484,7 +481,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, * load new configuration. */ tegra_dma_pause(tdc, false); - status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); /* * If interrupt is pending then do nothing as the ISR will handle @@ -822,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, /* Check on wait_ack desc status */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } @@ -836,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { dma_desc = sg_req->dma_desc; if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } - dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); + dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); + dma_desc = NULL; + +found: + if (dma_desc && txstate) { + residual = dma_desc->bytes_requested - + (dma_desc->bytes_transferred % + dma_desc->bytes_requested); + dma_set_residue(txstate, residual); + } + spin_unlock_irqrestore(&tdc->lock, flags); return ret; } @@ -905,7 +902,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, enum dma_slave_buswidth *slave_bw) { - switch (direction) { case DMA_MEM_TO_DEV: *apb_addr = tdc->dma_sconfig.dst_addr; @@ -948,8 +944,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc; - unsigned int i; - struct scatterlist *sg; + unsigned int i; + struct scatterlist *sg; unsigned long csr, ahb_seq, apb_ptr, apb_seq; struct list_head req_list; struct tegra_dma_sg_req *sg_req = NULL; @@ -1062,7 +1058,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc = NULL; - struct tegra_dma_sg_req *sg_req = NULL; + struct tegra_dma_sg_req *sg_req = NULL; unsigned long csr, ahb_seq, apb_ptr, apb_seq; int len; size_t remain_len; @@ -1204,7 +1200,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma *tdma = tdc->tdma; - struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; struct list_head dma_desc_list; @@ -1305,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = { static int tegra_dma_probe(struct platform_device *pdev) { - struct resource *res; + struct resource *res; struct tegra_dma *tdma; int ret; int i; @@ -1319,10 +1314,8 @@ static int tegra_dma_probe(struct platform_device *pdev) tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_dma_channel), GFP_KERNEL); - if (!tdma) { - dev_err(&pdev->dev, "Error: memory allocation failed\n"); + if (!tdma) return -ENOMEM; - } tdma->dev = &pdev->dev; tdma->chip_data = cdata; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index e107779b1a2e..5ae294b256a7 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = { .probe = ti_dma_xbar_probe, }; -int omap_dmaxbar_init(void) +static int omap_dmaxbar_init(void) { return platform_driver_register(&ti_dma_xbar_driver); } diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 559cd4073698..e82745aa42a8 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) int err; td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); - if (!td_desc) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc) goto out; - } td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); - if (!td_desc->desc_list) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc->desc_list) goto err; - } dma_async_tx_descriptor_init(&td_desc->txd, chan); td_desc->txd.tx_submit = td_tx_submit; diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 8849318b32b7..7632290e7c14 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); + dma_async_device_unregister(&dc->dma); - if (dc->irq >= 0) + if (dc->irq >= 0) { + devm_free_irq(&pdev->dev, dc->irq, dc); tasklet_kill(&dc->tasklet); + } dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; return 0; } @@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev) struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); - if (ddev->irq >= 0) + if (ddev->irq >= 0) { + devm_free_irq(&pdev->dev, ddev->irq, ddev); tasklet_kill(&ddev->tasklet); + } return 0; } diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile index 3c4e9f2fea28..9e91f8f5b087 100644 --- a/drivers/dma/xilinx/Makefile +++ b/drivers/dma/xilinx/Makefile @@ -1 +1,2 @@ -obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o +obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_dma.c index df9118540b91..4e223d094433 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -45,6 +45,7 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "../dmaengine.h" @@ -113,7 +114,7 @@ #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -157,12 +158,25 @@ /* AXI DMA Specific Masks/Bit fields */ #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_COALESCE_MAX 255 #define XILINX_DMA_NUM_APP_WORDS 5 +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -194,22 +208,22 @@ struct xilinx_vdma_desc_hw { /** * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 - * @pad2: Reserved @0x0C - * @pad3: Reserved @0x10 - * @pad4: Reserved @0x14 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @pad1: Reserved @0x10 + * @pad2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 */ struct xilinx_axidma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 buf_addr; - u32 pad2; - u32 pad3; - u32 pad4; + u32 buf_addr_msb; + u32 mcdma_control; + u32 vsize_stride; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -218,21 +232,21 @@ struct xilinx_axidma_desc_hw { /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_descmsb: Next Descriptor Pointer MSB @0x04 * @src_addr: Source address @0x08 - * @pad2: Reserved @0x0C + * @src_addrmsb: Source address MSB @0x0C * @dest_addr: Destination address @0x10 - * @pad3: Reserved @0x14 + * @dest_addrmsb: Destination address MSB @0x14 * @control: Control field @0x18 * @status: Status field @0x1C */ struct xilinx_cdma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 src_addr; - u32 pad2; + u32 src_addr_msb; u32 dest_addr; - u32 pad3; + u32 dest_addr_msb; u32 control; u32 status; } __aligned(64); @@ -278,11 +292,13 @@ struct xilinx_cdma_tx_segment { * @async_tx: Async transaction descriptor * @segments: TX segments list * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; + bool cyclic; }; /** @@ -302,6 +318,7 @@ struct xilinx_dma_tx_descriptor { * @direction: Transfer direction * @num_frms: Number of frames * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. * @genlock: Support genlock mode * @err: Channel has errors * @tasklet: Cleanup work after irq @@ -312,6 +329,7 @@ struct xilinx_dma_tx_descriptor { * @desc_submitcount: Descriptor h/w submitted count * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @start_transfer: Differentiate b/w DMA IP's transfer */ struct xilinx_dma_chan { @@ -330,6 +348,7 @@ struct xilinx_dma_chan { enum dma_transfer_direction direction; int num_frms; bool has_sg; + bool cyclic; bool genlock; bool err; struct tasklet_struct tasklet; @@ -340,7 +359,9 @@ struct xilinx_dma_chan { u32 desc_submitcount; u32 residue; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; }; struct xilinx_dma_config { @@ -357,6 +378,7 @@ struct xilinx_dma_config { * @common: DMA device structure * @chan: Driver specific DMA channel * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -366,6 +388,8 @@ struct xilinx_dma_config { * @txs_clk: DMA mm2s stream clock * @rx_clk: DMA s2mm clock * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier */ struct xilinx_dma_device { void __iomem *regs; @@ -373,6 +397,7 @@ struct xilinx_dma_device { struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; bool has_sg; + bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -382,6 +407,8 @@ struct xilinx_dma_device { struct clk *txs_clk; struct clk *rx_clk; struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; }; /* Macros */ @@ -454,6 +481,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); } +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -491,11 +546,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_cdma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -513,11 +567,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_axidma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -660,13 +713,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) dev_dbg(chan->dev, "Free all channel resources.\n"); xilinx_dma_free_descriptors(chan); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); xilinx_dma_free_tx_segment(chan, chan->seg_v); + } dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } /** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** * xilinx_dma_chan_desc_cleanup - Clean channel descriptors * @chan: Driver specific DMA channel */ @@ -681,6 +758,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) dma_async_tx_callback callback; void *callback_param; + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + /* Remove from the list of running transactions */ list_del(&desc->node); @@ -757,7 +839,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* * For AXI DMA case after submitting a pending_list, keep * an extra segment allocated so that the "next descriptor" @@ -768,6 +850,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + } + dma_cookie_init(dchan); if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { @@ -1065,12 +1156,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); /* Update tail ptr register which will start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { /* In simple mode */ struct xilinx_cdma_tx_segment *segment; @@ -1082,8 +1173,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) hw = &segment->hw; - dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1124,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; + if (chan->has_sg && !chan->xdev->mcdma) { + old_head = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, node); + new_head = chan->seg_v; + /* Copy Buffer Descriptor fields. */ + new_head->hw = old_head->hw; - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; + /* Swap and save new reserve */ + list_replace_init(&old_head->node, &new_head->node); + chan->seg_v = old_head; - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; + tail_segment->hw.next_desc = chan->seg_v->phys; + head_desc->async_tx.phys = new_head->phys; + } reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); @@ -1146,9 +1239,25 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + if (chan->has_sg && !chan->xdev->mcdma) + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } xilinx_dma_start(chan); @@ -1156,9 +1265,27 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) return; /* Start the transfer */ - if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->cyclic) + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1168,7 +1295,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1209,7 +1336,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_del(&desc->node); - dma_cookie_complete(&desc->async_tx); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); list_add_tail(&desc->node, &chan->done_list); } } @@ -1397,6 +1525,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; int err; + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + if (chan->err) { /* * If reset fails, need to hard reset the system. @@ -1414,6 +1547,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* Put this transaction onto the tail of the pending queue */ append_desc_queue(chan, desc); + if (desc->cyclic) + chan->cyclic = true; + spin_unlock_irqrestore(&chan->lock, flags); return cookie; @@ -1541,6 +1677,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, hw->control = len; hw->src_addr = dma_src; hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } /* Fill the previous next descriptor with current */ prev = list_last_entry(&desc->segments, @@ -1623,7 +1763,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( hw = &segment->hw; /* Fill in the descriptor */ - hw->buf_addr = sg_dma_address(sg) + sg_used; + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); hw->control = copy; @@ -1669,12 +1810,204 @@ error: } /** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + if (!period_len) + return NULL; + + num_periods = buf_len / period_len; + + if (!num_periods) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.next_desc = (u32) head_segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + head_segment->hw.control |= XILINX_DMA_BD_SOP; + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer */ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); /* Halt the DMA engine */ xilinx_dma_halt(chan); @@ -1682,6 +2015,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + return 0; } @@ -1972,7 +2312,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) * Return: '0' on success and failure value on error */ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, - struct device_node *node) + struct device_node *node, int chan_id) { struct xilinx_dma_chan *chan; bool has_dre = false; @@ -2014,9 +2354,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (!has_dre) xdev->common.copy_align = fls(width - 1); - if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; - chan->id = 0; + chan->id = chan_id; + chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2027,9 +2370,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->flush_on_fsync = true; } } else if (of_device_is_compatible(node, - "xlnx,axi-vdma-s2mm-channel")) { + "xlnx,axi-vdma-s2mm-channel") || + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; - chan->id = 1; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2084,6 +2430,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) { + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** * of_dma_xilinx_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data @@ -2096,7 +2468,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct xilinx_dma_device *xdev = ofdma->of_dma_data; int chan_id = dma_spec->args[0]; - if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) return NULL; return dma_get_slave_channel(&xdev->chan[chan_id]->common); @@ -2172,6 +2544,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", @@ -2218,7 +2592,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; /* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; @@ -2234,13 +2613,13 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { - err = xilinx_dma_chan_probe(xdev, child); + err = xilinx_dma_child_probe(xdev, child); if (err < 0) goto disable_clks; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xdev->chan[i]->num_frms = num_frames; } @@ -2263,7 +2642,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) disable_clks: xdma_disable_allclks(xdev); error: - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); @@ -2285,7 +2664,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&xdev->common); - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c new file mode 100644 index 000000000000..6d221e5c72ee --- /dev/null +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -0,0 +1,1151 @@ +/* + * DMA driver for Xilinx ZynqMP DMA Engine + * + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "../dmaengine.h" + +/* Register Offsets */ +#define ZYNQMP_DMA_ISR 0x100 +#define ZYNQMP_DMA_IMR 0x104 +#define ZYNQMP_DMA_IER 0x108 +#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_CTRL0 0x110 +#define ZYNQMP_DMA_CTRL1 0x114 +#define ZYNQMP_DMA_DATA_ATTR 0x120 +#define ZYNQMP_DMA_DSCR_ATTR 0x124 +#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 +#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C +#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 +#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 +#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 +#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C +#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 +#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 +#define ZYNQMP_DMA_SRC_START_LSB 0x158 +#define ZYNQMP_DMA_SRC_START_MSB 0x15C +#define ZYNQMP_DMA_DST_START_LSB 0x160 +#define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_RATE_CTRL 0x18C +#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 +#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 +#define ZYNQMP_DMA_CTRL2 0x200 + +/* Interrupt registers bit field definitions */ +#define ZYNQMP_DMA_DONE BIT(10) +#define ZYNQMP_DMA_AXI_WR_DATA BIT(9) +#define ZYNQMP_DMA_AXI_RD_DATA BIT(8) +#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) +#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) +#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) +#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) +#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) +#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) +#define ZYNQMP_DMA_INV_APB BIT(0) + +/* Control 0 register bit field definitions */ +#define ZYNQMP_DMA_OVR_FETCH BIT(7) +#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) +#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) + +/* Control 1 register bit field definitions */ +#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) + +/* Data Attribute register bit field definitions */ +#define ZYNQMP_DMA_ARBURST GENMASK(27, 26) +#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) +#define ZYNQMP_DMA_ARCACHE_OFST 22 +#define ZYNQMP_DMA_ARQOS GENMASK(21, 18) +#define ZYNQMP_DMA_ARQOS_OFST 18 +#define ZYNQMP_DMA_ARLEN GENMASK(17, 14) +#define ZYNQMP_DMA_ARLEN_OFST 14 +#define ZYNQMP_DMA_AWBURST GENMASK(13, 12) +#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) +#define ZYNQMP_DMA_AWCACHE_OFST 8 +#define ZYNQMP_DMA_AWQOS GENMASK(7, 4) +#define ZYNQMP_DMA_AWQOS_OFST 4 +#define ZYNQMP_DMA_AWLEN GENMASK(3, 0) +#define ZYNQMP_DMA_AWLEN_OFST 0 + +/* Descriptor Attribute register bit field definitions */ +#define ZYNQMP_DMA_AXCOHRNT BIT(8) +#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) +#define ZYNQMP_DMA_AXCACHE_OFST 4 +#define ZYNQMP_DMA_AXQOS GENMASK(3, 0) +#define ZYNQMP_DMA_AXQOS_OFST 0 + +/* Control register 2 bit field definitions */ +#define ZYNQMP_DMA_ENABLE BIT(0) + +/* Buffer Descriptor definitions */ +#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 +#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 +#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 +#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 + +/* Interrupt Mask specific definitions */ +#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ + ZYNQMP_DMA_AXI_WR_DATA | \ + ZYNQMP_DMA_AXI_RD_DST_DSCR | \ + ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ + ZYNQMP_DMA_INV_APB) +#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ + ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ + ZYNQMP_DMA_IRQ_DST_ACCT_ERR) +#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) +#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ + ZYNQMP_DMA_INT_ERR | \ + ZYNQMP_DMA_INT_OVRFL | \ + ZYNQMP_DMA_DST_DSCR_DONE) + +/* Max number of descriptors per channel */ +#define ZYNQMP_DMA_NUM_DESCS 32 + +/* Max transfer size per descriptor */ +#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + +/* Reset values for data attributes */ +#define ZYNQMP_DMA_AXCACHE_VAL 0xF +#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF +#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF + +#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF + +/* Bus width in bits */ +#define ZYNQMP_DMA_BUS_WIDTH_64 64 +#define ZYNQMP_DMA_BUS_WIDTH_128 128 + +#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) + +#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ + common) +#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ + async_tx) + +/** + * struct zynqmp_dma_desc_ll - Hw linked list descriptor + * @addr: Buffer address + * @size: Size of the buffer + * @ctrl: Control word + * @nxtdscraddr: Next descriptor base address + * @rsvd: Reserved field and for Hw internal use. + */ +struct zynqmp_dma_desc_ll { + u64 addr; + u32 size; + u32 ctrl; + u64 nxtdscraddr; + u64 rsvd; +}; __aligned(64) + +/** + * struct zynqmp_dma_desc_sw - Per Transaction structure + * @src: Source address for simple mode dma + * @dst: Destination address for simple mode dma + * @len: Transfer length for simple mode dma + * @node: Node in the channel descriptor list + * @tx_list: List head for the current transfer + * @async_tx: Async transaction descriptor + * @src_v: Virtual address of the src descriptor + * @src_p: Physical address of the src descriptor + * @dst_v: Virtual address of the dst descriptor + * @dst_p: Physical address of the dst descriptor + */ +struct zynqmp_dma_desc_sw { + u64 src; + u64 dst; + u32 len; + struct list_head node; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + struct zynqmp_dma_desc_ll *src_v; + dma_addr_t src_p; + struct zynqmp_dma_desc_ll *dst_v; + dma_addr_t dst_p; +}; + +/** + * struct zynqmp_dma_chan - Driver specific DMA channel structure + * @zdev: Driver specific device structure + * @regs: Control registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @free_list: Descriptors free + * @active_list: Descriptors active + * @sw_desc_pool: SW descriptor pool + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool_v: Statically allocated descriptor base + * @desc_pool_p: Physical allocated descriptor base + * @desc_free_cnt: Descriptor available count + * @dev: The dma device + * @irq: Channel IRQ + * @is_dmacoherent: Tells whether dma operations are coherent or not + * @tasklet: Cleanup work after irq + * @idle : Channel status; + * @desc_size: Size of the low level descriptor + * @err: Channel has errors + * @bus_width: Bus width + * @src_burst_len: Source burst length + * @dst_burst_len: Dest burst length + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock + */ +struct zynqmp_dma_chan { + struct zynqmp_dma_device *zdev; + void __iomem *regs; + spinlock_t lock; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct zynqmp_dma_desc_sw *sw_desc_pool; + struct list_head done_list; + struct dma_chan common; + void *desc_pool_v; + dma_addr_t desc_pool_p; + u32 desc_free_cnt; + struct device *dev; + int irq; + bool is_dmacoherent; + struct tasklet_struct tasklet; + bool idle; + u32 desc_size; + bool err; + u32 bus_width; + u32 src_burst_len; + u32 dst_burst_len; + struct clk *clk_main; + struct clk *clk_apb; +}; + +/** + * struct zynqmp_dma_device - DMA device structure + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + */ +struct zynqmp_dma_device { + struct device *dev; + struct dma_device common; + struct zynqmp_dma_chan *chan; +}; + +static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, + u64 value) +{ + lo_hi_writeq(value, chan->regs + reg); +} + +/** + * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller + * @chan: ZynqMP DMA DMA channel pointer + * @desc: Transaction descriptor pointer + */ +static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *desc) +{ + dma_addr_t addr; + + addr = desc->src_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); + addr = desc->dst_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); +} + +/** + * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor + * @chan: ZynqMP DMA channel pointer + * @desc: Hw descriptor pointer + */ +static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, + void *desc) +{ + struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; + + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; + hw++; + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; +} + +/** + * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor + * @chan: ZynqMP DMA channel pointer + * @sdesc: Hw descriptor pointer + * @src: Source buffer address + * @dst: Destination buffer address + * @len: Transfer length + * @prev: Previous hw descriptor pointer + */ +static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_ll *sdesc, + dma_addr_t src, dma_addr_t dst, size_t len, + struct zynqmp_dma_desc_ll *prev) +{ + struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; + + sdesc->size = ddesc->size = len; + sdesc->addr = src; + ddesc->addr = dst; + + sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; + if (chan->is_dmacoherent) { + sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + } + + if (prev) { + dma_addr_t addr = chan->desc_pool_p + + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); + ddesc = prev + 1; + prev->nxtdscraddr = addr; + ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); + } +} + +/** + * zynqmp_dma_init - Initialize the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) +{ + u32 val; + + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + val = readl(chan->regs + ZYNQMP_DMA_ISR); + writel(val, chan->regs + ZYNQMP_DMA_ISR); + + if (chan->is_dmacoherent) { + val = ZYNQMP_DMA_AXCOHRNT; + val = (val & ~ZYNQMP_DMA_AXCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); + } + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + if (chan->is_dmacoherent) { + val = (val & ~ZYNQMP_DMA_ARCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); + val = (val & ~ZYNQMP_DMA_AWCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); + } + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + + /* Clearing the interrupt account rgisters */ + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + chan->idle = true; +} + +/** + * zynqmp_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zynqmp_dma_chan *chan = to_chan(tx->chan); + struct zynqmp_dma_desc_sw *desc, *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&chan->lock); + cookie = dma_cookie_assign(tx); + + if (!list_empty(&chan->pending_list)) { + desc = list_last_entry(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!list_empty(&desc->tx_list)) + desc = list_last_entry(&desc->tx_list, + struct zynqmp_dma_desc_sw, node); + desc->src_v->nxtdscraddr = new->src_p; + desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + desc->dst_v->nxtdscraddr = new->dst_p; + desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + } + + list_add_tail(&new->node, &chan->pending_list); + spin_unlock_bh(&chan->lock); + + return cookie; +} + +/** + * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool + * @chan: ZynqMP DMA channel pointer + * + * Return: The sw descriptor + */ +static struct zynqmp_dma_desc_sw * +zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + spin_lock_bh(&chan->lock); + desc = list_first_entry(&chan->free_list, + struct zynqmp_dma_desc_sw, node); + list_del(&desc->node); + spin_unlock_bh(&chan->lock); + + INIT_LIST_HEAD(&desc->tx_list); + /* Clear the src and dst descriptor memory */ + memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + + return desc; +} + +/** + * zynqmp_dma_free_descriptor - Issue pending transactions + * @chan: ZynqMP DMA channel pointer + * @sdesc: Transaction descriptor pointer + */ +static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *sdesc) +{ + struct zynqmp_dma_desc_sw *child, *next; + + chan->desc_free_cnt++; + list_add_tail(&sdesc->node, &chan->free_list); + list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { + chan->desc_free_cnt++; + list_move_tail(&child->node, &chan->free_list); + } +} + +/** + * zynqmp_dma_free_desc_list - Free descriptors list + * @chan: ZynqMP DMA channel pointer + * @list: List to parse and delete the descriptor + */ +static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, + struct list_head *list) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + zynqmp_dma_free_descriptor(chan, desc); +} + +/** + * zynqmp_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + struct zynqmp_dma_desc_sw *desc; + int i; + + chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, + GFP_KERNEL); + if (!chan->sw_desc_pool) + return -ENOMEM; + + chan->idle = true; + chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; + + INIT_LIST_HEAD(&chan->free_list); + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = zynqmp_dma_tx_submit; + list_add_tail(&desc->node, &chan->free_list); + } + + chan->desc_pool_v = dma_zalloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); + if (!chan->desc_pool_v) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); + desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); + desc->src_p = chan->desc_pool_p + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); + desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); + } + + return ZYNQMP_DMA_NUM_DESCS; +} + +/** + * zynqmp_dma_start - Start DMA channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + chan->idle = false; + writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); +} + +/** + * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt + * @chan: ZynqMP DMA channel pointer + * @status: Interrupt status value + */ +static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) +{ + u32 val; + + if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); +} + +static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) +{ + u32 val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + val = (val & ~ZYNQMP_DMA_ARLEN) | + (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + val = (val & ~ZYNQMP_DMA_AWLEN) | + (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); +} + +/** + * zynqmp_dma_device_config - Zynqmp dma device configuration + * @dchan: DMA channel + * @config: DMA device config + */ +static int zynqmp_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + chan->src_burst_len = config->src_maxburst; + chan->dst_burst_len = config->dst_maxburst; + + return 0; +} + +/** + * zynqmp_dma_start_transfer - Initiate the new transfer + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + if (!chan->idle) + return; + + zynqmp_dma_config(chan); + + desc = list_first_entry_or_null(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + zynqmp_dma_update_desc_to_ctrlr(chan, desc); + zynqmp_dma_start(chan); +} + + +/** + * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors + * @chan: ZynqMP DMA channel + */ +static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&chan->lock); + callback(callback_param); + spin_lock(&chan->lock); + } + + /* Run any dependencies, then free the descriptor */ + zynqmp_dma_free_descriptor(chan, desc); + } +} + +/** + * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + desc = list_first_entry_or_null(&chan->active_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); +} + +/** + * zynqmp_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_issue_pending(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_start_transfer(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * zynqmp_dma_free_descriptors - Free channel descriptors + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) +{ + zynqmp_dma_free_desc_list(chan, &chan->active_list); + zynqmp_dma_free_desc_list(chan, &chan->pending_list); + zynqmp_dma_free_desc_list(chan, &chan->done_list); +} + +/** + * zynqmp_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + dma_free_coherent(chan->dev, + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), + chan->desc_pool_v, chan->desc_pool_p); + kfree(chan->sw_desc_pool); +} + +/** + * zynqmp_dma_reset - Reset the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); +} + +/** + * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the ZynqMP DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 isr, imr, status; + irqreturn_t ret = IRQ_NONE; + + isr = readl(chan->regs + ZYNQMP_DMA_ISR); + imr = readl(chan->regs + ZYNQMP_DMA_IMR); + status = isr & ~imr; + + writel(isr, chan->regs + ZYNQMP_DMA_ISR); + if (status & ZYNQMP_DMA_INT_DONE) { + tasklet_schedule(&chan->tasklet); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_DONE) + chan->idle = true; + + if (status & ZYNQMP_DMA_INT_ERR) { + chan->err = true; + tasklet_schedule(&chan->tasklet); + dev_err(chan->dev, "Channel %p has errors\n", chan); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_INT_OVRFL) { + zynqmp_dma_handle_ovfl_int(chan, status); + dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); + ret = IRQ_HANDLED; + } + + return ret; +} + +/** + * zynqmp_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the ZynqMP DMA channel structure + */ +static void zynqmp_dma_do_tasklet(unsigned long data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 count; + + spin_lock(&chan->lock); + + if (chan->err) { + zynqmp_dma_reset(chan); + chan->err = false; + goto unlock; + } + + count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + while (count) { + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + count--; + } + + if (chan->idle) + zynqmp_dma_start_transfer(chan); + +unlock: + spin_unlock(&chan->lock); +} + +/** + * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel + * @dchan: DMA channel pointer + * + * Return: Always '0' + */ +static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + + return 0; +} + +/** + * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct zynqmp_dma_chan *chan; + struct zynqmp_dma_desc_sw *new, *first = NULL; + void *desc = NULL, *prev = NULL; + size_t copy; + u32 desc_cnt; + + chan = to_chan(dchan); + + if (len > ZYNQMP_DMA_MAX_TRANS_LEN) + return NULL; + + desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + do { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + + copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, + dma_dst, copy, prev); + prev = desc; + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + zynqmp_dma_desc_config_eod(chan, desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction + * @dchan: DMA channel + * @dst_sg: Destination scatter list + * @dst_sg_len: Number of entries in destination scatter list + * @src_sg: Source scatter list + * @src_sg_len: Number of entries in source scatter list + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( + struct dma_chan *dchan, struct scatterlist *dst_sg, + unsigned int dst_sg_len, struct scatterlist *src_sg, + unsigned int src_sg_len, unsigned long flags) +{ + struct zynqmp_dma_desc_sw *new, *first = NULL; + struct zynqmp_dma_chan *chan = to_chan(dchan); + void *desc = NULL, *prev = NULL; + size_t len, dst_avail, src_avail; + dma_addr_t dma_dst, dma_src; + u32 desc_cnt = 0, i; + struct scatterlist *sg; + + for_each_sg(src_sg, sg, src_sg_len, i) + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), + ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + dst_avail = sg_dma_len(dst_sg); + src_avail = sg_dma_len(src_sg); + + /* Run until we are out of scatterlist entries */ + while (true) { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + len = min_t(size_t, src_avail, dst_avail); + len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + if (len == 0) + goto fetch; + dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - + dst_avail; + dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - + src_avail; + + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, + len, prev); + prev = desc; + dst_avail -= len; + src_avail -= len; + + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); +fetch: + /* Fetch the next dst scatterlist entry */ + if (dst_avail == 0) { + if (dst_sg_len == 0) + break; + dst_sg = sg_next(dst_sg); + if (dst_sg == NULL) + break; + dst_sg_len--; + dst_avail = sg_dma_len(dst_sg); + } + /* Fetch the next src scatterlist entry */ + if (src_avail == 0) { + if (src_sg_len == 0) + break; + src_sg = sg_next(src_sg); + if (src_sg == NULL) + break; + src_sg_len--; + src_avail = sg_dma_len(src_sg); + } + } + + zynqmp_dma_desc_config_eod(chan, desc); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_chan_remove - Channel remove function + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) +{ + if (!chan) + return; + + devm_free_irq(chan->zdev->dev, chan->irq, chan); + tasklet_kill(&chan->tasklet); + list_del(&chan->common.device_node); + clk_disable_unprepare(chan->clk_apb); + clk_disable_unprepare(chan->clk_main); +} + +/** + * zynqmp_dma_chan_probe - Per Channel Probing + * @zdev: Driver specific device structure + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + struct platform_device *pdev) +{ + struct zynqmp_dma_chan *chan; + struct resource *res; + struct device_node *node = pdev->dev.of_node; + int err; + + chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = zdev->dev; + chan->zdev = zdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chan->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chan->regs)) + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; + chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; + chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if (err < 0) { + dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); + return err; + } + + if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && + chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { + dev_err(zdev->dev, "invalid bus-width value"); + return -EINVAL; + } + + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); + zdev->chan = chan; + tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->free_list); + + dma_cookie_init(&chan->common); + chan->common.device = &zdev->common; + list_add_tail(&chan->common.device_node, &zdev->common.channels); + + zynqmp_dma_init(chan); + chan->irq = platform_get_irq(pdev, 0); + if (chan->irq < 0) + return -ENXIO; + err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, + "zynqmp-dma", chan); + if (err) + return err; + chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(chan->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(chan->clk_main); + } + + chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(chan->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(chan->clk_apb); + } + + err = clk_prepare_enable(chan->clk_main); + if (err) { + dev_err(&pdev->dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(chan->clk_apb); + if (err) { + clk_disable_unprepare(chan->clk_main); + dev_err(&pdev->dev, "Unable to enable apb clock.\n"); + return err; + } + + chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); + chan->idle = true; + return 0; +} + +/** + * of_zynqmp_dma_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zynqmp_dma_device *zdev = ofdma->of_dma_data; + + return dma_get_slave_channel(&zdev->chan->common); +} + +/** + * zynqmp_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_probe(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev; + struct dma_device *p; + int ret; + + zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; + + zdev->dev = &pdev->dev; + INIT_LIST_HEAD(&zdev->common.channels); + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_cap_set(DMA_SG, zdev->common.cap_mask); + dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); + + p = &zdev->common; + p->device_prep_dma_sg = zynqmp_dma_prep_sg; + p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; + p->device_terminate_all = zynqmp_dma_device_terminate_all; + p->device_issue_pending = zynqmp_dma_issue_pending; + p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; + p->device_free_chan_resources = zynqmp_dma_free_chan_resources; + p->device_tx_status = dma_cookie_status; + p->device_config = zynqmp_dma_device_config; + p->dev = &pdev->dev; + + platform_set_drvdata(pdev, zdev); + + ret = zynqmp_dma_chan_probe(zdev, pdev); + if (ret) { + dev_err(&pdev->dev, "Probing channel failed\n"); + goto free_chan_resources; + } + + p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); + p->src_addr_widths = BIT(zdev->chan->bus_width / 8); + + dma_async_device_register(&zdev->common); + + ret = of_dma_controller_register(pdev->dev.of_node, + of_zynqmp_dma_xlate, zdev); + if (ret) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&zdev->common); + goto free_chan_resources; + } + + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); + + return 0; + +free_chan_resources: + zynqmp_dma_chan_remove(zdev->chan); + return ret; +} + +/** + * zynqmp_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int zynqmp_dma_remove(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&zdev->common); + + zynqmp_dma_chan_remove(zdev->chan); + + return 0; +} + +static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "xlnx,zynqmp-dma-1.0", }, + {} +}; +MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); + +static struct platform_driver zynqmp_dma_driver = { + .driver = { + .name = "xilinx-zynqmp-dma", + .of_match_table = zynqmp_dma_of_match, + }, + .probe = zynqmp_dma_probe, + .remove = zynqmp_dma_remove, +}; + +module_platform_driver(zynqmp_dma_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 6664f1108c7c..0e22f241403b 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -10,7 +10,7 @@ config ARM_PSCI_FW config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" - depends on ARM_MHU + depends on MAILBOX help System Control and Power Interface (SCPI) Message Protocol is defined for the purpose of communication between the Application @@ -27,6 +27,15 @@ config ARM_SCPI_PROTOCOL This protocol library provides interface for all the client drivers making use of the features offered by the SCP. +config ARM_SCPI_POWER_DOMAIN + tristate "SCPI power domain driver" + depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCPI power domains which can be + enabled or disabled via the SCP firmware + config EDD tristate "BIOS Enhanced Disk Drive calls determine boot disk" depends on X86 @@ -184,6 +193,7 @@ config FW_CFG_SYSFS_CMDLINE config QCOM_SCM bool depends on ARM || ARM64 + select RESET_CONTROLLER config QCOM_SCM_32 def_bool y diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 474bada56fcd..44a59dcfc398 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_ARM_PSCI_FW) += psci.o obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o +obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o obj-$(CONFIG_DMI) += dmi_scan.o obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_EDD) += edd.o diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 7e3e595c9f30..438893762076 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -210,10 +210,6 @@ struct dvfs_info { } opps[MAX_DVFS_OPPS]; } __packed; -struct dvfs_get { - u8 index; -} __packed; - struct dvfs_set { u8 domain; u8 index; @@ -235,6 +231,11 @@ struct sensor_value { __le32 hi_val; } __packed; +struct dev_pstate_set { + u16 dev_id; + u8 pstate; +} __packed; + static struct scpi_drvinfo *scpi_info; static int scpi_linux_errmap[SCPI_ERR_MAX] = { @@ -431,11 +432,11 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate) static int scpi_dvfs_get_idx(u8 domain) { int ret; - struct dvfs_get dvfs; + u8 dvfs_idx; ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), - &dvfs, sizeof(dvfs)); - return ret ? ret : dvfs.index; + &dvfs_idx, sizeof(dvfs_idx)); + return ret ? ret : dvfs_idx; } static int scpi_dvfs_set_idx(u8 domain, u8 index) @@ -526,7 +527,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) return ret; } -int scpi_sensor_get_value(u16 sensor, u64 *val) +static int scpi_sensor_get_value(u16 sensor, u64 *val) { __le16 id = cpu_to_le16(sensor); struct sensor_value buf; @@ -541,6 +542,29 @@ int scpi_sensor_get_value(u16 sensor, u64 *val) return ret; } +static int scpi_device_get_power_state(u16 dev_id) +{ + int ret; + u8 pstate; + __le16 id = cpu_to_le16(dev_id); + + ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id, + sizeof(id), &pstate, sizeof(pstate)); + return ret ? ret : pstate; +} + +static int scpi_device_set_power_state(u16 dev_id, u8 pstate) +{ + int stat; + struct dev_pstate_set dev_set = { + .dev_id = cpu_to_le16(dev_id), + .pstate = pstate, + }; + + return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set, + sizeof(dev_set), &stat, sizeof(stat)); +} + static struct scpi_ops scpi_ops = { .get_version = scpi_get_version, .clk_get_range = scpi_clk_get_range, @@ -552,6 +576,8 @@ static struct scpi_ops scpi_ops = { .sensor_get_capability = scpi_sensor_get_capability, .sensor_get_info = scpi_sensor_get_info, .sensor_get_value = scpi_sensor_get_value, + .device_get_power_state = scpi_device_get_power_state, + .device_set_power_state = scpi_device_set_power_state, }; struct scpi_ops *get_scpi_ops(void) diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 0883292f640f..c6aeedbdcbb0 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -23,8 +23,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/qcom_scm.h> - -#include <asm/cacheflush.h> +#include <linux/dma-mapping.h> #include "qcom_scm.h" @@ -97,44 +96,6 @@ struct qcom_scm_response { }; /** - * alloc_qcom_scm_command() - Allocate an SCM command - * @cmd_size: size of the command buffer - * @resp_size: size of the response buffer - * - * Allocate an SCM command, including enough room for the command - * and response headers as well as the command and response buffers. - * - * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails. - */ -static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size) -{ - struct qcom_scm_command *cmd; - size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size + - resp_size; - u32 offset; - - cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); - if (cmd) { - cmd->len = cpu_to_le32(len); - offset = offsetof(struct qcom_scm_command, buf); - cmd->buf_offset = cpu_to_le32(offset); - cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size); - } - return cmd; -} - -/** - * free_qcom_scm_command() - Free an SCM command - * @cmd: command to free - * - * Free an SCM command. - */ -static inline void free_qcom_scm_command(struct qcom_scm_command *cmd) -{ - kfree(cmd); -} - -/** * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response * @cmd: command * @@ -168,23 +129,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response return (void *)rsp + le32_to_cpu(rsp->buf_offset); } -static int qcom_scm_remap_error(int err) -{ - pr_err("qcom_scm_call failed with error code %d\n", err); - switch (err) { - case QCOM_SCM_ERROR: - return -EIO; - case QCOM_SCM_EINVAL_ADDR: - case QCOM_SCM_EINVAL_ARG: - return -EINVAL; - case QCOM_SCM_EOPNOTSUPP: - return -EOPNOTSUPP; - case QCOM_SCM_ENOMEM: - return -ENOMEM; - } - return -EINVAL; -} - static u32 smc(u32 cmd_addr) { int context_id; @@ -209,45 +153,9 @@ static u32 smc(u32 cmd_addr) return r0; } -static int __qcom_scm_call(const struct qcom_scm_command *cmd) -{ - int ret; - u32 cmd_addr = virt_to_phys(cmd); - - /* - * Flush the command buffer so that the secure world sees - * the correct data. - */ - secure_flush_area(cmd, cmd->len); - - ret = smc(cmd_addr); - if (ret < 0) - ret = qcom_scm_remap_error(ret); - - return ret; -} - -static void qcom_scm_inv_range(unsigned long start, unsigned long end) -{ - u32 cacheline_size, ctr; - - asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); - cacheline_size = 4 << ((ctr >> 16) & 0xf); - - start = round_down(start, cacheline_size); - end = round_up(end, cacheline_size); - outer_inv_range(start, end); - while (start < end) { - asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) - : "memory"); - start += cacheline_size; - } - dsb(); - isb(); -} - /** * qcom_scm_call() - Send an SCM command + * @dev: struct device * @svc_id: service identifier * @cmd_id: command identifier * @cmd_buf: command buffer @@ -264,42 +172,59 @@ static void qcom_scm_inv_range(unsigned long start, unsigned long end) * and response buffers is taken care of by qcom_scm_call; however, callers are * responsible for any other cached buffers passed over to the secure world. */ -static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, - size_t cmd_len, void *resp_buf, size_t resp_len) +static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const void *cmd_buf, size_t cmd_len, void *resp_buf, + size_t resp_len) { int ret; struct qcom_scm_command *cmd; struct qcom_scm_response *rsp; - unsigned long start, end; + size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; + dma_addr_t cmd_phys; - cmd = alloc_qcom_scm_command(cmd_len, resp_len); + cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); if (!cmd) return -ENOMEM; + cmd->len = cpu_to_le32(alloc_len); + cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); + cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); + cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); if (cmd_buf) memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); + rsp = qcom_scm_command_to_response(cmd); + + cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, cmd_phys)) { + kfree(cmd); + return -ENOMEM; + } + mutex_lock(&qcom_scm_lock); - ret = __qcom_scm_call(cmd); + ret = smc(cmd_phys); + if (ret < 0) + ret = qcom_scm_remap_error(ret); mutex_unlock(&qcom_scm_lock); if (ret) goto out; - rsp = qcom_scm_command_to_response(cmd); - start = (unsigned long)rsp; - do { - qcom_scm_inv_range(start, start + sizeof(*rsp)); + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, + sizeof(*rsp), DMA_FROM_DEVICE); } while (!rsp->is_complete); - end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len; - qcom_scm_inv_range(start, end); - - if (resp_buf) - memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len); + if (resp_buf) { + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + + le32_to_cpu(rsp->buf_offset), + resp_len, DMA_FROM_DEVICE); + memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), + resp_len); + } out: - free_qcom_scm_command(cmd); + dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); + kfree(cmd); return ret; } @@ -342,6 +267,41 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) return r0; } +/** + * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) +{ + int context_id; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2); + register u32 r1 asm("r1") = (u32)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r0") + __asmeq("%2", "r1") + __asmeq("%3", "r2") + __asmeq("%4", "r3") +#ifdef REQUIRES_SEC + ".arch_extension sec\n" +#endif + "smc #0 @ switch to secure world\n" + : "=r" (r0) + : "r" (r0), "r" (r1), "r" (r2), "r" (r3) + ); + return r0; +} + u32 qcom_scm_get_version(void) { int context_id; @@ -378,22 +338,6 @@ u32 qcom_scm_get_version(void) } EXPORT_SYMBOL(qcom_scm_get_version); -/* - * Set the cold/warm boot address for one of the CPU cores. - */ -static int qcom_scm_set_boot_addr(u32 addr, int flags) -{ - struct { - __le32 flags; - __le32 addr; - } cmd; - - cmd.addr = cpu_to_le32(addr); - cmd.flags = cpu_to_le32(flags); - return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - &cmd, sizeof(cmd), NULL, 0); -} - /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus @@ -423,7 +367,8 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) set_cpu_present(cpu, false); } - return qcom_scm_set_boot_addr(virt_to_phys(entry), flags); + return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, + flags, virt_to_phys(entry)); } /** @@ -434,11 +379,16 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, + const cpumask_t *cpus) { int ret; int flags = 0; int cpu; + struct { + __le32 flags; + __le32 addr; + } cmd; /* * Reassign only if we are switching from hotplug entry point @@ -454,7 +404,10 @@ int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) if (!flags) return 0; - ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags); + cmd.addr = cpu_to_le32(virt_to_phys(entry)); + cmd.flags = cpu_to_le32(flags); + ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, + &cmd, sizeof(cmd), NULL, 0); if (!ret) { for_each_cpu(cpu, cpus) qcom_scm_wb[cpu].entry = entry; @@ -477,25 +430,133 @@ void __qcom_scm_cpu_power_down(u32 flags) flags & QCOM_SCM_FLUSH_FLAG_MASK); } -int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) { int ret; __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id); __le32 ret_val = 0; - ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, - sizeof(svc_cmd), &ret_val, sizeof(ret_val)); + ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, + &svc_cmd, sizeof(svc_cmd), &ret_val, + sizeof(ret_val)); if (ret) return ret; return le32_to_cpu(ret_val); } -int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, + u32 req_cnt, u32 *resp) { if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) return -ERANGE; - return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, + return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, req, req_cnt * sizeof(*req), resp, sizeof(*resp)); } + +void __qcom_scm_init(void) +{ +} + +bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) +{ + __le32 out; + __le32 in; + int ret; + + in = cpu_to_le32(peripheral); + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_IS_SUPPORTED_CMD, + &in, sizeof(in), + &out, sizeof(out)); + + return ret ? false : !!out; +} + +int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, + dma_addr_t metadata_phys) +{ + __le32 scm_ret; + int ret; + struct { + __le32 proc; + __le32 image_addr; + } request; + + request.proc = cpu_to_le32(peripheral); + request.image_addr = cpu_to_le32(metadata_phys); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_INIT_IMAGE_CMD, + &request, sizeof(request), + &scm_ret, sizeof(scm_ret)); + + return ret ? : le32_to_cpu(scm_ret); +} + +int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, + phys_addr_t addr, phys_addr_t size) +{ + __le32 scm_ret; + int ret; + struct { + __le32 proc; + __le32 addr; + __le32 len; + } request; + + request.proc = cpu_to_le32(peripheral); + request.addr = cpu_to_le32(addr); + request.len = cpu_to_le32(size); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_MEM_SETUP_CMD, + &request, sizeof(request), + &scm_ret, sizeof(scm_ret)); + + return ret ? : le32_to_cpu(scm_ret); +} + +int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) +{ + __le32 out; + __le32 in; + int ret; + + in = cpu_to_le32(peripheral); + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_AUTH_AND_RESET_CMD, + &in, sizeof(in), + &out, sizeof(out)); + + return ret ? : le32_to_cpu(out); +} + +int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) +{ + __le32 out; + __le32 in; + int ret; + + in = cpu_to_le32(peripheral); + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_SHUTDOWN_CMD, + &in, sizeof(in), + &out, sizeof(out)); + + return ret ? : le32_to_cpu(out); +} + +int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) +{ + __le32 out; + __le32 in = cpu_to_le32(reset); + int ret; + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, + &in, sizeof(in), + &out, sizeof(out)); + + return ret ? : le32_to_cpu(out); +} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index bb6555f6d63b..4a0f5ead4fb5 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -12,7 +12,150 @@ #include <linux/io.h> #include <linux/errno.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/types.h> #include <linux/qcom_scm.h> +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> + +#include "qcom_scm.h" + +#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) + +#define MAX_QCOM_SCM_ARGS 10 +#define MAX_QCOM_SCM_RETS 3 + +enum qcom_scm_arg_types { + QCOM_SCM_VAL, + QCOM_SCM_RO, + QCOM_SCM_RW, + QCOM_SCM_BUFVAL, +}; + +#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ + (((a) & 0x3) << 4) | \ + (((b) & 0x3) << 6) | \ + (((c) & 0x3) << 8) | \ + (((d) & 0x3) << 10) | \ + (((e) & 0x3) << 12) | \ + (((f) & 0x3) << 14) | \ + (((g) & 0x3) << 16) | \ + (((h) & 0x3) << 18) | \ + (((i) & 0x3) << 20) | \ + (((j) & 0x3) << 22) | \ + ((num) & 0xf)) + +#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + +/** + * struct qcom_scm_desc + * @arginfo: Metadata describing the arguments in args[] + * @args: The array of arguments for the secure syscall + * @res: The values returned by the secure syscall + */ +struct qcom_scm_desc { + u32 arginfo; + u64 args[MAX_QCOM_SCM_ARGS]; +}; + +static u64 qcom_smccc_convention = -1; +static DEFINE_MUTEX(qcom_scm_lock); + +#define QCOM_SCM_EBUSY_WAIT_MS 30 +#define QCOM_SCM_EBUSY_MAX_RETRY 20 + +#define N_EXT_QCOM_SCM_ARGS 7 +#define FIRST_EXT_ARG_IDX 3 +#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) + +/** + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. +*/ +static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + int arglen = desc->arginfo & 0xf; + int retry_count = 0, i; + u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); + u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX]; + dma_addr_t args_phys = 0; + void *args_virt = NULL; + size_t alloc_len; + + if (unlikely(arglen > N_REGISTER_ARGS)) { + alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + + if (!args_virt) + return -ENOMEM; + + if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { + __le32 *args = args_virt; + + for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) + args[i] = cpu_to_le32(desc->args[i + + FIRST_EXT_ARG_IDX]); + } else { + __le64 *args = args_virt; + + for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) + args[i] = cpu_to_le64(desc->args[i + + FIRST_EXT_ARG_IDX]); + } + + args_phys = dma_map_single(dev, args_virt, alloc_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, args_phys)) { + kfree(args_virt); + return -ENOMEM; + } + + x5 = args_phys; + } + + do { + mutex_lock(&qcom_scm_lock); + + cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, + qcom_smccc_convention, + ARM_SMCCC_OWNER_SIP, fn_id); + + do { + arm_smccc_smc(cmd, desc->arginfo, desc->args[0], + desc->args[1], desc->args[2], x5, 0, 0, + res); + } while (res->a0 == QCOM_SCM_INTERRUPTED); + + mutex_unlock(&qcom_scm_lock); + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); + + if (args_virt) { + dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); + kfree(args_virt); + } + + if (res->a0 < 0) + return qcom_scm_remap_error(res->a0); + + return 0; +} /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus @@ -29,13 +172,15 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) /** * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @dev: Device pointer * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, + const cpumask_t *cpus) { return -ENOTSUPP; } @@ -52,12 +197,164 @@ void __qcom_scm_cpu_power_down(u32 flags) { } -int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) { - return -ENOTSUPP; + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.arginfo = QCOM_SCM_ARGS(1); + desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, + &desc, &res); + + return ret ? : res.a1; } -int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, + u32 req_cnt, u32 *resp) { - return -ENOTSUPP; + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) + return -ERANGE; + + desc.args[0] = req[0].addr; + desc.args[1] = req[0].val; + desc.args[2] = req[1].addr; + desc.args[3] = req[1].val; + desc.args[4] = req[2].addr; + desc.args[5] = req[2].val; + desc.args[6] = req[3].addr; + desc.args[7] = req[3].val; + desc.args[8] = req[4].addr; + desc.args[9] = req[4].val; + desc.arginfo = QCOM_SCM_ARGS(10); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc, + &res); + *resp = res.a1; + + return ret; +} + +void __qcom_scm_init(void) +{ + u64 cmd; + struct arm_smccc_res res; + u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD); + + /* First try a SMC64 call */ + cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, + ARM_SMCCC_OWNER_SIP, function); + + arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)), + 0, 0, 0, 0, 0, &res); + + if (!res.a0 && res.a1) + qcom_smccc_convention = ARM_SMCCC_SMC_64; + else + qcom_smccc_convention = ARM_SMCCC_SMC_32; +} + +bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = peripheral; + desc.arginfo = QCOM_SCM_ARGS(1); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_IS_SUPPORTED_CMD, + &desc, &res); + + return ret ? false : !!res.a1; +} + +int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, + dma_addr_t metadata_phys) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = peripheral; + desc.args[1] = metadata_phys; + desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, + phys_addr_t addr, phys_addr_t size) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = peripheral; + desc.args[1] = addr; + desc.args[2] = size; + desc.arginfo = QCOM_SCM_ARGS(3); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = peripheral; + desc.arginfo = QCOM_SCM_ARGS(1); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_AUTH_AND_RESET_CMD, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = peripheral; + desc.arginfo = QCOM_SCM_ARGS(1); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = reset; + desc.args[1] = 0; + desc.arginfo = QCOM_SCM_ARGS(2); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc, + &res); + + return ret ? : res.a1; } diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 45c008d68891..e64a501adbf4 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -10,19 +10,64 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. */ - +#include <linux/platform_device.h> +#include <linux/module.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/qcom_scm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/clk.h> +#include <linux/reset-controller.h> #include "qcom_scm.h" +struct qcom_scm { + struct device *dev; + struct clk *core_clk; + struct clk *iface_clk; + struct clk *bus_clk; + struct reset_controller_dev reset; +}; + +static struct qcom_scm *__scm; + +static int qcom_scm_clk_enable(void) +{ + int ret; + + ret = clk_prepare_enable(__scm->core_clk); + if (ret) + goto bail; + + ret = clk_prepare_enable(__scm->iface_clk); + if (ret) + goto disable_core; + + ret = clk_prepare_enable(__scm->bus_clk); + if (ret) + goto disable_iface; + + return 0; + +disable_iface: + clk_disable_unprepare(__scm->iface_clk); +disable_core: + clk_disable_unprepare(__scm->core_clk); +bail: + return ret; +} + +static void qcom_scm_clk_disable(void) +{ + clk_disable_unprepare(__scm->core_clk); + clk_disable_unprepare(__scm->iface_clk); + clk_disable_unprepare(__scm->bus_clk); +} + /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus @@ -47,7 +92,7 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); */ int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { - return __qcom_scm_set_warm_boot_addr(entry, cpus); + return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); @@ -72,12 +117,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down); */ bool qcom_scm_hdcp_available(void) { - int ret; + int ret = qcom_scm_clk_enable(); - ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP, - QCOM_SCM_CMD_HDCP); + if (ret) + return ret; - return (ret > 0) ? true : false; + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, + QCOM_SCM_CMD_HDCP); + + qcom_scm_clk_disable(); + + return ret > 0 ? true : false; } EXPORT_SYMBOL(qcom_scm_hdcp_available); @@ -91,6 +141,287 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available); */ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { - return __qcom_scm_hdcp_req(req, req_cnt, resp); + int ret = qcom_scm_clk_enable(); + + if (ret) + return ret; + + ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); + qcom_scm_clk_disable(); + return ret; } EXPORT_SYMBOL(qcom_scm_hdcp_req); + +/** + * qcom_scm_pas_supported() - Check if the peripheral authentication service is + * available for the given peripherial + * @peripheral: peripheral id + * + * Returns true if PAS is supported for this peripheral, otherwise false. + */ +bool qcom_scm_pas_supported(u32 peripheral) +{ + int ret; + + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PAS_IS_SUPPORTED_CMD); + if (ret <= 0) + return false; + + return __qcom_scm_pas_supported(__scm->dev, peripheral); +} +EXPORT_SYMBOL(qcom_scm_pas_supported); + +/** + * qcom_scm_pas_init_image() - Initialize peripheral authentication service + * state machine for a given peripheral, using the + * metadata + * @peripheral: peripheral id + * @metadata: pointer to memory containing ELF header, program header table + * and optional blob of data used for authenticating the metadata + * and the rest of the firmware + * @size: size of the metadata + * + * Returns 0 on success. + */ +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) +{ + dma_addr_t mdata_phys; + void *mdata_buf; + int ret; + + /* + * During the scm call memory protection will be enabled for the meta + * data blob, so make sure it's physically contiguous, 4K aligned and + * non-cachable to avoid XPU violations. + */ + mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, + GFP_KERNEL); + if (!mdata_buf) { + dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); + return -ENOMEM; + } + memcpy(mdata_buf, metadata, size); + + ret = qcom_scm_clk_enable(); + if (ret) + goto free_metadata; + + ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); + + qcom_scm_clk_disable(); + +free_metadata: + dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_pas_init_image); + +/** + * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral + * for firmware loading + * @peripheral: peripheral id + * @addr: start address of memory area to prepare + * @size: size of the memory area to prepare + * + * Returns 0 on success. + */ +int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) +{ + int ret; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_pas_mem_setup); + +/** + * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware + * and reset the remote processor + * @peripheral: peripheral id + * + * Return 0 on success. + */ +int qcom_scm_pas_auth_and_reset(u32 peripheral) +{ + int ret; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); + +/** + * qcom_scm_pas_shutdown() - Shut down the remote processor + * @peripheral: peripheral id + * + * Returns 0 on success. + */ +int qcom_scm_pas_shutdown(u32 peripheral) +{ + int ret; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_pas_shutdown); + +static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + if (idx != 0) + return -EINVAL; + + return __qcom_scm_pas_mss_reset(__scm->dev, 1); +} + +static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + if (idx != 0) + return -EINVAL; + + return __qcom_scm_pas_mss_reset(__scm->dev, 0); +} + +static const struct reset_control_ops qcom_scm_pas_reset_ops = { + .assert = qcom_scm_pas_reset_assert, + .deassert = qcom_scm_pas_reset_deassert, +}; + +/** + * qcom_scm_is_available() - Checks if SCM is available + */ +bool qcom_scm_is_available(void) +{ + return !!__scm; +} +EXPORT_SYMBOL(qcom_scm_is_available); + +static int qcom_scm_probe(struct platform_device *pdev) +{ + struct qcom_scm *scm; + int ret; + + scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); + if (!scm) + return -ENOMEM; + + scm->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(scm->core_clk)) { + if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) + return PTR_ERR(scm->core_clk); + + scm->core_clk = NULL; + } + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { + scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(scm->iface_clk)) { + if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to acquire iface clk\n"); + return PTR_ERR(scm->iface_clk); + } + + scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(scm->bus_clk)) { + if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to acquire bus clk\n"); + return PTR_ERR(scm->bus_clk); + } + } + + scm->reset.ops = &qcom_scm_pas_reset_ops; + scm->reset.nr_resets = 1; + scm->reset.of_node = pdev->dev.of_node; + reset_controller_register(&scm->reset); + + /* vote for max clk rate for highest performance */ + ret = clk_set_rate(scm->core_clk, INT_MAX); + if (ret) + return ret; + + __scm = scm; + __scm->dev = &pdev->dev; + + __qcom_scm_init(); + + return 0; +} + +static const struct of_device_id qcom_scm_dt_match[] = { + { .compatible = "qcom,scm-apq8064",}, + { .compatible = "qcom,scm-msm8660",}, + { .compatible = "qcom,scm-msm8960",}, + { .compatible = "qcom,scm",}, + {} +}; + +MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); + +static struct platform_driver qcom_scm_driver = { + .driver = { + .name = "qcom_scm", + .of_match_table = qcom_scm_dt_match, + }, + .probe = qcom_scm_probe, +}; + +static int __init qcom_scm_init(void) +{ + struct device_node *np, *fw_np; + int ret; + + fw_np = of_find_node_by_name(NULL, "firmware"); + + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, qcom_scm_dt_match); + + if (!np) { + of_node_put(fw_np); + return -ENODEV; + } + + of_node_put(np); + + ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL); + + of_node_put(fw_np); + + if (ret) + return ret; + + return platform_driver_register(&qcom_scm_driver); +} + +subsys_initcall(qcom_scm_init); + +static void __exit qcom_scm_exit(void) +{ + platform_driver_unregister(&qcom_scm_driver); +} +module_exit(qcom_scm_exit); + +MODULE_DESCRIPTION("Qualcomm SCM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 2cce75c08b99..3584b00fe7e6 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -19,7 +19,8 @@ #define QCOM_SCM_FLAG_HLOS 0x01 #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 #define QCOM_SCM_FLAG_WARMBOOT_MC 0x04 -extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, + const cpumask_t *cpus); extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); #define QCOM_SCM_CMD_TERMINATE_PC 0x2 @@ -29,14 +30,34 @@ extern void __qcom_scm_cpu_power_down(u32 flags); #define QCOM_SCM_SVC_INFO 0x6 #define QCOM_IS_CALL_AVAIL_CMD 0x1 -extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id); +extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id); #define QCOM_SCM_SVC_HDCP 0x11 #define QCOM_SCM_CMD_HDCP 0x01 -extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); +extern int __qcom_scm_hdcp_req(struct device *dev, + struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); + +extern void __qcom_scm_init(void); + +#define QCOM_SCM_SVC_PIL 0x2 +#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1 +#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2 +#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5 +#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6 +#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7 +#define QCOM_SCM_PAS_MSS_RESET 0xa +extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral); +extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, + dma_addr_t metadata_phys); +extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, + phys_addr_t addr, phys_addr_t size); +extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral); +extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral); +extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset); /* common error codes */ +#define QCOM_SCM_V2_EBUSY -12 #define QCOM_SCM_ENOMEM -5 #define QCOM_SCM_EOPNOTSUPP -4 #define QCOM_SCM_EINVAL_ADDR -3 @@ -44,4 +65,22 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, #define QCOM_SCM_ERROR -1 #define QCOM_SCM_INTERRUPTED 1 +static inline int qcom_scm_remap_error(int err) +{ + switch (err) { + case QCOM_SCM_ERROR: + return -EIO; + case QCOM_SCM_EINVAL_ADDR: + case QCOM_SCM_EINVAL_ARG: + return -EINVAL; + case QCOM_SCM_EOPNOTSUPP: + return -EOPNOTSUPP; + case QCOM_SCM_ENOMEM: + return -ENOMEM; + case QCOM_SCM_V2_EBUSY: + return -EBUSY; + } + return -EINVAL; +} + #endif diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c new file mode 100644 index 000000000000..f395dec27113 --- /dev/null +++ b/drivers/firmware/scpi_pm_domain.c @@ -0,0 +1,163 @@ +/* + * SCPI Generic power domain support. + * + * Copyright (C) 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/pm_domain.h> +#include <linux/scpi_protocol.h> + +struct scpi_pm_domain { + struct generic_pm_domain genpd; + struct scpi_ops *ops; + u32 domain; + char name[30]; +}; + +/* + * These device power state values are not well-defined in the specification. + * In case, different implementations use different values, we can make these + * specific to compatibles rather than getting these values from device tree. + */ +enum scpi_power_domain_state { + SCPI_PD_STATE_ON = 0, + SCPI_PD_STATE_OFF = 3, +}; + +#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd) + +static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on) +{ + int ret; + enum scpi_power_domain_state state; + + if (power_on) + state = SCPI_PD_STATE_ON; + else + state = SCPI_PD_STATE_OFF; + + ret = pd->ops->device_set_power_state(pd->domain, state); + if (ret) + return ret; + + return !(state == pd->ops->device_get_power_state(pd->domain)); +} + +static int scpi_pd_power_on(struct generic_pm_domain *domain) +{ + struct scpi_pm_domain *pd = to_scpi_pd(domain); + + return scpi_pd_power(pd, true); +} + +static int scpi_pd_power_off(struct generic_pm_domain *domain) +{ + struct scpi_pm_domain *pd = to_scpi_pd(domain); + + return scpi_pd_power(pd, false); +} + +static int scpi_pm_domain_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct scpi_pm_domain *scpi_pd; + struct genpd_onecell_data *scpi_pd_data; + struct generic_pm_domain **domains; + struct scpi_ops *scpi_ops; + int ret, num_domains, i; + + scpi_ops = get_scpi_ops(); + if (!scpi_ops) + return -EPROBE_DEFER; + + if (!np) { + dev_err(dev, "device tree node not found\n"); + return -ENODEV; + } + + if (!scpi_ops->device_set_power_state || + !scpi_ops->device_get_power_state) { + dev_err(dev, "power domains not supported in the firmware\n"); + return -ENODEV; + } + + ret = of_property_read_u32(np, "num-domains", &num_domains); + if (ret) { + dev_err(dev, "number of domains not found\n"); + return -EINVAL; + } + + scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL); + if (!scpi_pd) + return -ENOMEM; + + scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL); + if (!scpi_pd_data) + return -ENOMEM; + + domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL); + if (!domains) + return -ENOMEM; + + for (i = 0; i < num_domains; i++, scpi_pd++) { + domains[i] = &scpi_pd->genpd; + + scpi_pd->domain = i; + scpi_pd->ops = scpi_ops; + sprintf(scpi_pd->name, "%s.%d", np->name, i); + scpi_pd->genpd.name = scpi_pd->name; + scpi_pd->genpd.power_off = scpi_pd_power_off; + scpi_pd->genpd.power_on = scpi_pd_power_on; + + /* + * Treat all power domains as off at boot. + * + * The SCP firmware itself may have switched on some domains, + * but for reference counting purpose, keep it this way. + */ + pm_genpd_init(&scpi_pd->genpd, NULL, true); + } + + scpi_pd_data->domains = domains; + scpi_pd_data->num_domains = num_domains; + + of_genpd_add_provider_onecell(np, scpi_pd_data); + + return 0; +} + +static const struct of_device_id scpi_power_domain_ids[] = { + { .compatible = "arm,scpi-power-domains", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, scpi_power_domain_ids); + +static struct platform_driver scpi_power_domain_driver = { + .driver = { + .name = "scpi_power_domain", + .of_match_table = scpi_power_domain_ids, + }, + .probe = scpi_pm_domain_probe, +}; +module_platform_driver(scpi_power_domain_driver); + +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM SCPI power domain driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index be43afb08c69..e3dba6f44a79 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ drm_scatter.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ - drm_crtc.o drm_modes.o drm_edid.o \ + drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ @@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ - drm_kms_helper_common.o drm_dp_dual_mode_helper.o + drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ + drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e055d5be1c3c..8ebc5f1eb4c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -85,8 +85,12 @@ extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_powerplay; +extern int amdgpu_powercontainment; extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; +extern unsigned amdgpu_cg_mask; +extern unsigned amdgpu_pg_mask; +extern char *amdgpu_disable_cu; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); struct amdgpu_ip_block_version { enum amd_ip_block_type type; @@ -298,13 +306,16 @@ struct amdgpu_ring_funcs { uint32_t oa_base, uint32_t oa_size); /* testing functions */ int (*test_ring)(struct amdgpu_ring *ring); - int (*test_ib)(struct amdgpu_ring *ring); + int (*test_ib)(struct amdgpu_ring *ring, long timeout); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); /* pad the indirect buffer to the necessary number of dw */ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); unsigned (*init_cond_exec)(struct amdgpu_ring *ring); void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); + /* note usage for clock and power gating */ + void (*begin_use)(struct amdgpu_ring *ring); + void (*end_use)(struct amdgpu_ring *ring); }; /* @@ -594,11 +605,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync); -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence); +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); -int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); @@ -754,12 +763,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); +void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); -void amdgpu_job_free_func(struct kref *refcount); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f); -void amdgpu_job_timeout_func(struct work_struct *work); struct amdgpu_ring { struct amdgpu_device *adev; @@ -767,12 +775,9 @@ struct amdgpu_ring { struct amdgpu_fence_driver fence_drv; struct amd_gpu_scheduler sched; - spinlock_t fence_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; - u64 next_rptr_gpu_addr; - volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -791,14 +796,16 @@ struct amdgpu_ring { u32 doorbell_index; bool use_doorbell; unsigned wptr_offs; - unsigned next_rptr_offs; unsigned fence_offs; uint64_t current_ctx; enum amdgpu_ring_type type; char name[16]; unsigned cond_exe_offs; - u64 cond_exe_gpu_addr; - volatile u32 *cond_exe_cpu_addr; + u64 cond_exe_gpu_addr; + volatile u32 *cond_exe_cpu_addr; +#if defined(CONFIG_DEBUG_FS) + struct dentry *ent; +#endif }; /* @@ -861,6 +868,7 @@ struct amdgpu_vm { struct amdgpu_bo *page_directory; unsigned max_pde_used; struct fence *page_directory_fence; + uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; @@ -883,13 +891,14 @@ struct amdgpu_vm_id { struct fence *first; struct amdgpu_sync active; struct fence *last_flush; - struct amdgpu_ring *last_user; atomic64_t owner; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ struct fence *flushed_updates; + uint32_t current_gpu_reset_count; + uint32_t gds_base; uint32_t gds_size; uint32_t gws_base; @@ -905,6 +914,10 @@ struct amdgpu_vm_manager { struct list_head ids_lru; struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + /* Handling of VM fences */ + u64 fence_context; + unsigned seqno[AMDGPU_MAX_RINGS]; + uint32_t max_pfn; /* vram base address for page table entry */ u64 vram_base_offset; @@ -926,17 +939,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr); -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, @@ -1142,6 +1152,12 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gca_config config; @@ -1178,6 +1194,7 @@ struct amdgpu_gfx { /* ce ram size*/ unsigned ce_ram_size; struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1195,10 +1212,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data); -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, @@ -1250,6 +1263,7 @@ struct amdgpu_job { uint32_t num_ibs; void *owner; uint64_t ctx; + bool vm_needs_flush; unsigned vm_id; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; @@ -1257,8 +1271,7 @@ struct amdgpu_job { uint32_t oa_base, oa_size; /* user fence handling */ - struct amdgpu_bo *uf_bo; - uint32_t uf_offset; + uint64_t uf_addr; uint64_t uf_sequence; }; @@ -1560,6 +1573,12 @@ struct amdgpu_dpm_funcs { u32 (*get_fan_control_mode)(struct amdgpu_device *adev); int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); + int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(struct amdgpu_device *adev); + int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); + int (*get_mclk_od)(struct amdgpu_device *adev); + int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); }; struct amdgpu_dpm { @@ -1662,6 +1681,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + bool use_ctx_buf; struct amd_sched_entity entity; }; @@ -1683,6 +1703,7 @@ struct amdgpu_vce { struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; + struct mutex idle_mutex; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; @@ -1767,6 +1788,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor); void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); + /* * amdgpu smumgr functions */ @@ -1811,12 +1834,8 @@ struct amdgpu_asic_funcs { u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); - /* wait for mc_idle */ - int (*wait_for_mc_idle)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); @@ -2003,6 +2022,10 @@ struct amdgpu_device { spinlock_t didt_idx_lock; amdgpu_rreg_t didt_rreg; amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -2028,6 +2051,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic64_t num_evictions; atomic_t gpu_reset_counter; /* display */ @@ -2038,7 +2062,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2131,6 +2155,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) +#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) +#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -2206,12 +2232,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) -#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) -#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) @@ -2222,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) @@ -2264,6 +2288,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ @@ -2342,6 +2368,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_force_clock_level(adev, type, level) \ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) +#define amdgpu_dpm_get_sclk_od(adev) \ + (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_set_sclk_od(adev, value) \ + (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + +#define amdgpu_dpm_get_mclk_od(adev) \ + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_mclk_od(adev, value) \ + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2383,9 +2421,13 @@ bool amdgpu_device_is_px(struct drm_device *dev); #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); +bool amdgpu_has_atpx_dgpu_power_cntl(void); +bool amdgpu_is_atpx_hybrid(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool amdgpu_is_atpx_hybrid(void) { return false; } #endif /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 32809f749903..d080d0807a5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) { struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; - if (rdev->asic_funcs->get_gpu_clock_counter) - return rdev->asic_funcs->get_gpu_clock_counter(rdev); + if (rdev->gfx.funcs->get_gpu_clock_counter) + return rdev->gfx.funcs->get_gpu_clock_counter(rdev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9df1bcb35bf0..983175363b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) le16_to_cpu(firmware_info->info.usReferenceClock); ppll->reference_div = 0; - if (crev < 2) - ppll->pll_out_min = - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); - else - ppll->pll_out_min = - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); + ppll->pll_out_min = + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); ppll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { - ppll->lcd_pll_out_min = - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_min == 0) - ppll->lcd_pll_out_min = ppll->pll_out_min; - ppll->lcd_pll_out_max = - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_max == 0) - ppll->lcd_pll_out_max = ppll->pll_out_max; - } else { + ppll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_min == 0) ppll->lcd_pll_out_min = ppll->pll_out_min; + ppll->lcd_pll_out_max = + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_max == 0) ppll->lcd_pll_out_max = ppll->pll_out_max; - } if (ppll->pll_out_min == 0) ppll->pll_out_min = 64800; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 35a1248aaa77..49de92600074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "amd_acpi.h" @@ -27,6 +28,7 @@ struct amdgpu_atpx_functions { struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; + bool is_hybrid; }; static struct amdgpu_atpx_priv { @@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) { return amdgpu_atpx_priv.atpx_detected; } +bool amdgpu_has_atpx_dgpu_power_cntl(void) { + return amdgpu_atpx_priv.atpx.functions.power_cntl; +} + +bool amdgpu_is_atpx_hybrid(void) { + return amdgpu_atpx_priv.atpx.is_hybrid; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - if (atpx->functions.power_cntl == false) { - printk("ATPX dGPU power cntl not present, forcing\n"); - atpx->functions.power_cntl = true; - } + u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } @@ -507,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .switchto = amdgpu_atpx_switchto, .power_state = amdgpu_atpx_power_state, - .init = amdgpu_atpx_init, .get_client_id = amdgpu_atpx_get_client_id, }; @@ -542,6 +573,7 @@ static bool amdgpu_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; + amdgpu_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 99ca75baa47d..2b6afe123f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) uint16_t tmp, bios_header_start; r = amdgpu_atrm_get_bios(adev); - if (r == false) + if (!r) r = amdgpu_acpi_vfct_bios(adev); - if (r == false) + if (!r) r = igp_read_bios_from_vram(adev); - if (r == false) + if (!r) r = amdgpu_read_bios(adev); - if (r == false) { + if (!r) { r = amdgpu_read_bios_from_rom(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_disabled_bios(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_platform_bios(adev); } - if (r == false || adev->bios == NULL) { + if (!r || adev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); adev->bios = NULL; return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 823bf5e0b0c8..651115dcce12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; + unsigned long total_size = 0; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; + total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } @@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, list->array = array; list->num_entries = num_entries; + trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index cf6f49fc1c75..bc0440f7a31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_UVD_CTX(index); case CGS_IND_REG__DIDT: return RREG32_DIDT(index); + case CGS_IND_REG_GC_CAC: + return RREG32_GC_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_UVD_CTX(index, value); case CGS_IND_REG__DIDT: return WREG32_DIDT(index, value); + case CGS_IND_REG_GC_CAC: + return WREG32_GC_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { + case CHIP_TOPAZ: + strcpy(fw_name, "amdgpu/topaz_smc.bin"); + break; case CHIP_TONGA: strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; @@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, } hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); @@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = adev->pm.fw_version; info->image_size = ucode_size; + info->ucode_start_address = ucode_start_address; info->kptr = (void *)src; } return 0; } static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, - struct cgs_system_info *sys_info) + struct cgs_system_info *sys_info) { CGS_FUNC_ADEV; @@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_PCIE_DEV: + sys_info->value = adev->pdev->device; + break; + case CGS_SYSTEM_INFO_PCIE_REV: + sys_info->value = adev->pdev->revision; + break; case CGS_SYSTEM_INFO_CG_FLAGS: sys_info->value = adev->cg_flags; break; @@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_GFX_CU_INFO: sys_info->value = adev->gfx.cu_info.number; break; + case CGS_SYSTEM_INFO_GFX_SE_INFO: + sys_info->value = adev->gfx.config.max_shader_engines; + break; default: return -ENODEV; } @@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, acpi_handle handle; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *params = NULL; - union acpi_object *obj = NULL; + union acpi_object *params, *obj; uint8_t name[5] = {'\0'}; - struct cgs_acpi_method_argument *argument = NULL; + struct cgs_acpi_method_argument *argument; uint32_t i, count; acpi_status status; - int result = 0; - uint32_t func_no = 0xFFFFFFFF; + int result; handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) @@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (info->pinput_argument == NULL) return -EINVAL; argument = info->pinput_argument; - func_no = argument->value; for (i = 0; i < info->input_count; i++) { if (((argument->type == ACPI_TYPE_STRING) || (argument->type == ACPI_TYPE_BUFFER)) && @@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params->integer.value = argument->value; break; case ACPI_TYPE_STRING: - params->string.length = argument->method_length; + params->string.length = argument->data_length; params->string.pointer = argument->pointer; break; case ACPI_TYPE_BUFFER: - params->buffer.length = argument->method_length; + params->buffer.length = argument->data_length; params->buffer.pointer = argument->pointer; break; default: @@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (ACPI_FAILURE(status)) { result = -EIO; - goto error; + goto free_input; } /* return the output info */ @@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((obj->type != ACPI_TYPE_PACKAGE) || (obj->package.count != count)) { result = -EIO; - goto error; + goto free_obj; } params = obj->package.elements; } else @@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (params == NULL) { result = -EIO; - goto error; + goto free_obj; } for (i = 0; i < count; i++) { if (argument->type != params->type) { result = -EIO; - goto error; + goto free_obj; } switch (params->type) { case ACPI_TYPE_INTEGER: @@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((params->string.length != argument->data_length) || (params->string.pointer == NULL)) { result = -EIO; - goto error; + goto free_obj; } strncpy(argument->pointer, params->string.pointer, @@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, case ACPI_TYPE_BUFFER: if (params->buffer.pointer == NULL) { result = -EIO; - goto error; + goto free_obj; } memcpy(argument->pointer, params->buffer.pointer, @@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params++; } -error: - if (obj != NULL) - kfree(obj); + result = 0; +free_obj: + kfree(obj); +free_input: kfree((void *)input.pointer); return result; } @@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, } #endif -int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, +static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, uint32_t acpi_method, uint32_t acpi_function, void *pinput, void *poutput, @@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, struct cgs_acpi_method_info info = {0}; acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; - acpi_input[0].method_length = sizeof(uint32_t); acpi_input[0].data_length = sizeof(uint32_t); acpi_input[0].value = acpi_function; acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; - acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_input[1].data_length = input_size; acpi_input[1].pointer = pinput; acpi_output.type = CGS_ACPI_TYPE_BUFFER; - acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_output.data_length = output_size; acpi_output.pointer = poutput; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index cb07da41152b..ff0b55a65ca3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, } if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9bc8f1d99733..0307ff5887c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) if (ret) goto free_all_kdata; - if (p->uf_entry.robj) { - p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj); - p->job->uf_offset = uf_offset; - } - + if (p->uf_entry.robj) + p->job->uf_addr = uf_offset; kfree(chunk_array); return 0; @@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved = 0; @@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (r) goto error_validate; + fpriv->vm.last_eviction_counter = + atomic64_read(&p->adev->num_evictions); + if (p->bo_list) { struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj; @@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } } + if (p->uf_entry.robj) + p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); + error_validate: if (r) { amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); @@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, /* Only for UVD/VCE VM emulation */ if (ring->funcs->parse_cs) { + p->job->vm = NULL; for (i = 0; i < p->job->num_ibs; i++) { r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } - } + } else { + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - r = amdgpu_bo_vm_update_pte(p, vm); - if (!r) - amdgpu_cs_sync_rings(p); + r = amdgpu_bo_vm_update_pte(p, vm); + if (r) + return r; + } - return r; + return amdgpu_cs_sync_rings(p); } static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) @@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } /* UVD & VCE fw doesn't support user fences */ - if (parser->job->uf_bo && ( + if (parser->job->uf_addr && ( parser->job->ring->type == AMDGPU_RING_TYPE_UVD || parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; @@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, { struct amdgpu_ring *ring = p->job->ring; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; - struct fence *fence; struct amdgpu_job *job; int r; job = p->job; p->job = NULL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, - p->filp, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); return r; @@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->ctx = entity->fence_context; - p->fence = fence_get(fence); - cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); + p->fence = fence_get(&job->base.s_fence->finished); + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; + amdgpu_job_free_resources(job); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e920086af46..df7ab2458e50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/kthread.h> #include <linux/console.h> #include <linux/slab.h> #include <linux/debugfs.h> @@ -35,6 +36,7 @@ #include <linux/vga_switcheroo.h> #include <linux/efi.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" @@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev) uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, bool always_indirect) { + uint32_t ret; + if ((reg * 4) < adev->rmmio_size && !always_indirect) - return readl(((void __iomem *)adev->rmmio) + (reg * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); else { unsigned long flags; - uint32_t ret; spin_lock_irqsave(&adev->mmio_idx_lock, flags); writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - return ret; } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + return ret; } void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, state); if (r) return r; + break; } } return r; @@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, state); if (r) return r; + break; } } return r; } +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); + if (r) + return r; + break; + } + } + return 0; + +} + +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) + return adev->ip_blocks[i].funcs->is_idle((void *)adev); + } + return true; + +} + const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } } + adev->cg_flags &= amdgpu_cg_mask; + adev->pg_flags &= amdgpu_pg_mask; + return 0; } @@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; adev->didt_wreg = &amdgpu_invalid_wreg; + adev->gc_cac_rreg = &amdgpu_invalid_rreg; + adev->gc_cac_wreg = &amdgpu_invalid_wreg; adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); @@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->pcie_idx_lock); spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); + spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); adev->rmmio_base = pci_resource_start(adev->pdev, 5); @@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev, vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); /* Read BIOS */ - if (!amdgpu_get_bios(adev)) - return -EINVAL; + if (!amdgpu_get_bios(adev)) { + r = -EINVAL; + goto failed; + } /* Must be an ATOMBIOS */ if (!adev->is_atom_bios) { dev_err(adev->dev, "Expecting atombios for GPU\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - return r; + goto failed; } /* See if the asic supports SR-IOV */ @@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } DRM_INFO("GPU not posted. posting now...\n"); amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); - return r; + goto failed; } /* init the mode config */ @@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_fini(adev); - return r; + goto failed; } adev->accel_working = true; @@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); - return r; + goto failed; } r = amdgpu_ib_ring_tests(adev); @@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_ERROR("registering register debugfs failed (%d).\n", r); } + r = amdgpu_debugfs_firmware_init(adev); + if (r) { + DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + return r; + } + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); - return r; + goto failed; } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(adev->dev); + return r; } static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); @@ -1645,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); + drm_crtc_force_disable_all(adev->ddev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); kfree(adev->ip_block_status); @@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; vga_switcheroo_unregister_client(adev->pdev); + if (adev->flags & AMD_IS_PX) + vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) pci_iounmap(adev->pdev, adev->rio_mem); @@ -1841,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + /* + * Most of the connector probing functions try to acquire runtime pm + * refs to ensure that the GPU is powered on when connector polling is + * performed. Since we're calling this from a runtime PM callback, + * trying to acquire rpm refs will cause us to deadlock. + * + * Since we're guaranteed to be holding the rpm lock, it's safe to + * temporarily disable the rpm helpers so this doesn't deadlock us. + */ +#ifdef CONFIG_PM + dev->dev->power.disable_depth++; +#endif drm_helper_hpd_irq_event(dev); +#ifdef CONFIG_PM + dev->dev->power.disable_depth--; +#endif if (fbcon) { amdgpu_fbdev_set_suspend(adev, 0); @@ -1861,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) */ int amdgpu_gpu_reset(struct amdgpu_device *adev) { - unsigned ring_sizes[AMDGPU_MAX_RINGS]; - uint32_t *ring_data[AMDGPU_MAX_RINGS]; - - bool saved = false; - int i, r; int resched; @@ -1874,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - r = amdgpu_suspend(adev); - + /* block scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) continue; - - ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(adev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); - } + kthread_park(ring->sched.thread); + amd_sched_hw_job_reset(&ring->sched); } + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(adev); + + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + r = amdgpu_asic_reset(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1898,32 +1987,29 @@ retry: dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); } - + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + goto retry; + } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; - - amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; - } - - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - if (saved) { - saved = false; - r = amdgpu_suspend(adev); - goto retry; - } + amd_sched_job_recovery(&ring->sched); + kthread_unpark(ring->sched.thread); } } else { - amdgpu_fence_driver_force_completion(adev); + dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i]) - kfree(ring_data[i]); + if (adev->rings[i]) { + kthread_unpark(adev->rings[i]->sched.thread); + } } } @@ -1934,13 +2020,11 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } + amdgpu_irq_gpu_reset_resume_helper(adev); return r; } -#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ -#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ - void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; @@ -2094,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; + bool use_bank; + unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos & (1ULL << 62)) { + se_bank = (*pos >> 24) & 0x3FF; + sh_bank = (*pos >> 34) & 0x3FF; + instance_bank = (*pos >> 44) & 0x3FF; + use_bank = 1; + *pos &= 0xFFFFFF; + } else { + use_bank = 0; + } + + if (use_bank) { + if (sh_bank >= adev->gfx.config.max_sh_per_se || + se_bank >= adev->gfx.config.max_shader_engines) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + while (size) { uint32_t value; if (*pos > adev->rmmio_size) - return result; + goto end; value = RREG32(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) - return r; + if (r) { + result = r; + goto end; + } result += 4; buf += 4; @@ -2115,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size -= 4; } +end: + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + return result; } @@ -2314,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return result; } +static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + uint32_t *config, no_regs = 0; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + /* version, increment each time something is added */ + config[no_regs++] = 0; + config[no_regs++] = adev->gfx.config.max_shader_engines; + config[no_regs++] = adev->gfx.config.max_tile_pipes; + config[no_regs++] = adev->gfx.config.max_cu_per_sh; + config[no_regs++] = adev->gfx.config.max_sh_per_se; + config[no_regs++] = adev->gfx.config.max_backends_per_se; + config[no_regs++] = adev->gfx.config.max_texture_channel_caches; + config[no_regs++] = adev->gfx.config.max_gprs; + config[no_regs++] = adev->gfx.config.max_gs_threads; + config[no_regs++] = adev->gfx.config.max_hw_contexts; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; + config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.num_tile_pipes; + config[no_regs++] = adev->gfx.config.backend_enable_mask; + config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; + config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; + config[no_regs++] = adev->gfx.config.shader_engine_tile_size; + config[no_regs++] = adev->gfx.config.num_gpus; + config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; + config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; + config[no_regs++] = adev->gfx.config.gb_addr_config; + config[no_regs++] = adev->gfx.config.num_rbs; + + while (size && (*pos < no_regs * 4)) { + uint32_t value; + + value = config[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + kfree(config); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + kfree(config); + return result; +} + + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -2339,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gca_config_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gca_config_read, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, + &amdgpu_debugfs_gca_config_fops, }; static const char *debugfs_regs_names[] = { @@ -2351,6 +2533,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", + "amdgpu_gca_config", }; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b0832da2ef7e..76f96028313d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) spin_unlock_irqrestore(&crtc->dev->event_lock, flags); usleep_range(min_udelay, 2 * min_udelay); spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; + } if (!repcnt) DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " @@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); - goto cleanup; + goto unreserve; } r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); DRM_ERROR("failed to get fences for buffer\n"); - goto cleanup; + goto unpin; } amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); @@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->base = base; - r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } +unpin: if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } +unreserve: amdgpu_bo_unreserve(new_rbo); cleanup: @@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - if (amdgpu_fb->obj) { - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); - } + drm_gem_object_unreference_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f888c015f76c..9aa533cf4ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -52,9 +52,10 @@ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. + * - 3.3.0 - Add VM support for UVD on supported hardware. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 2 +#define KMS_DRIVER_MINOR 3 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -82,8 +83,12 @@ int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; +int amdgpu_powercontainment = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; +unsigned amdgpu_cg_mask = 0xffffffff; +unsigned amdgpu_pg_mask = 0xffffffff; +char *amdgpu_disable_cu = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -160,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); + +MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)"); +module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); @@ -168,6 +176,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); +MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); +module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); + +MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); +module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); + +MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); +module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -413,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (amdgpu_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -430,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (amdgpu_is_atpx_hybrid() || + !amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) @@ -515,7 +537,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, .dev_priv_size = 0, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, @@ -590,7 +612,6 @@ static int __init amdgpu_init(void) DRM_INFO("amdgpu kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &amdgpu_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d1558768cfb7..0b109aebfec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (seq != ring->fence_drv.sync_seq) amdgpu_fence_schedule_fallback(ring); - while (last_seq != seq) { + if (unlikely(seq == last_seq)) + return; + + last_seq &= drv->num_fences_mask; + seq &= drv->num_fences_mask; + + do { struct fence *fence, **ptr; - ptr = &drv->fences[++last_seq & drv->num_fences_mask]; + ++last_seq; + last_seq &= drv->num_fences_mask; + ptr = &drv->fences[last_seq]; /* There is always exactly one thread signaling this fence slot */ fence = rcu_dereference_protected(*ptr, 1); RCU_INIT_POINTER(*ptr, NULL); - BUG_ON(!fence); + if (!fence) + continue; r = fence_signal(fence); if (!r) @@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); fence_put(fence); - } + } while (last_seq != seq); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8fab6486064f..88fbed2389c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_print; - amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); + amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f95da4f0536..a074edd95c70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) } } } + +/** + * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter + * + * @mask: array in which the per-shader array disable masks will be stored + * @max_se: number of SEs + * @max_sh: number of SHs + * + * The bitmask of CUs to be disabled in the shader array determined by se and + * sh is stored in mask[se * max_sh + sh]. + */ +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +{ + unsigned se, sh, cu; + const char *p; + + memset(mask, 0, sizeof(*mask) * max_se * max_sh); + + if (!amdgpu_disable_cu || !*amdgpu_disable_cu) + return; + + p = amdgpu_disable_cu; + for (;;) { + char *next; + int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { + DRM_ERROR("amdgpu: could not parse disable_cu\n"); + return; + } + + if (se < max_se && sh < max_sh && cu < 16) { + DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); + mask[se * max_sh + sh] |= 1u << cu; + } else { + DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", + se, sh, cu); + } + + next = strchr(p, ','); + if (!next) + break; + p = next + 1; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index dc06cbda7be6..51321e154c09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -27,4 +27,6 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); +unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e35423b78e..a31d7ef3032c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -33,6 +33,8 @@ #include "amdgpu.h" #include "atom.h" +#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) + /* * IB * IBs (Indirect Buffers) and areas of GPU accessible memory where @@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; - struct fence *hwf; uint64_t ctx; unsigned i; @@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { - r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, - job->gds_base, job->gds_size, - job->gws_base, job->gws_size, - job->oa_base, job->oa_size); + r = amdgpu_vm_flush(ring, job); if (r) { amdgpu_ring_undo(ring); return r; @@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring); - r = amdgpu_fence_emit(ring, &hwf); + r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) @@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } /* wrap the last IB with fence */ - if (job && job->uf_bo) { - uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); - - addr += job->uf_offset; - amdgpu_ring_emit_fence(ring, addr, job->uf_sequence, + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } - if (f) - *f = fence_get(hwf); - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); @@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring); + r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); if (r) { ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 835a3fa8d8df..278708f5a744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev, return r; } +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) +{ + int i, j; + for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { + struct amdgpu_irq_src *src = adev->irq.sources[i]; + if (!src) + continue; + for (j = 0; j < src->num_types; j++) + amdgpu_irq_update(adev, src, j); + } +} + /** * amdgpu_irq_get - enable interrupt * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index e124b59f39c1..7ef09352e534 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f0dafa514fe4..6674d40eb3ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,21 +28,15 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_free_handler(struct work_struct *ws) +static void amdgpu_job_timedout(struct amd_sched_job *s_job) { - struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); - amd_sched_job_put(&job->base); -} + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); -void amdgpu_job_timeout_func(struct work_struct *work) -{ - struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work); DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", - job->base.sched->name, - (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), - job->ring->fence_drv.sync_seq); - - amd_sched_job_put(&job->base); + job->base.sched->name, + atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); + amdgpu_gpu_reset(job->adev); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); amdgpu_sync_create(&(*job)->sync); @@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } -void amdgpu_job_free(struct amdgpu_job *job) +void amdgpu_job_free_resources(struct amdgpu_job *job) { - unsigned i; struct fence *f; + unsigned i; + /* use sched fence if available */ - f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; + f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; for (i = 0; i < job->num_ibs; ++i) - amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); - fence_put(job->fence); + amdgpu_ib_free(job->adev, &job->ibs[i], f); +} - amdgpu_bo_unref(&job->uf_bo); - amdgpu_sync_free(&job->sync); +void amdgpu_job_free_cb(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - if (!job->base.use_sched) - kfree(job); + fence_put(job->fence); + amdgpu_sync_free(&job->sync); + kfree(job); } -void amdgpu_job_free_func(struct kref *refcount) +void amdgpu_job_free(struct amdgpu_job *job) { - struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); + amdgpu_job_free_resources(job); + + fence_put(job->fence); + amdgpu_sync_free(&job->sync); kfree(job); } @@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { - struct fence *fence; int r; job->ring = ring; if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, owner, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; - *f = fence_get(fence); + *f = fence_get(&job->base.s_fence->finished); + amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; @@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base, - &job->vm_id, &job->vm_pd_addr); + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); @@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); - r = amdgpu_sync_wait(&job->sync); - if (r) { - DRM_ERROR("failed to sync wait (%d)\n", r); - return NULL; - } + BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->sync.last_vm_update, job, &fence); - if (r) { + if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } -err: - job->fence = fence; - amdgpu_job_free(job); + /* if gpu reset, hw fence will be replaced here */ + fence_put(job->fence); + job->fence = fence_get(fence); + amdgpu_job_free_resources(job); return fence; } const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, - .begin_job = amd_sched_job_begin, - .finish_job = amd_sched_job_finish, + .timedout_job = amdgpu_job_timedout, + .free_job = amdgpu_job_free_cb }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d851ea15059f..d942654a1de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (amdgpu_device_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } amdgpu_amdkfd_device_fini(adev); @@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } out: - if (r) + if (r) { + /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ + if (adev->rmmio && amdgpu_device_is_px(dev)) + pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); - + } return r; } +static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, + struct drm_amdgpu_query_fw *query_fw, + struct amdgpu_device *adev) +{ + switch (query_fw->fw_type) { + case AMDGPU_INFO_FW_VCE: + fw_info->ver = adev->vce.fw_version; + fw_info->feature = adev->vce.fb_version; + break; + case AMDGPU_INFO_FW_UVD: + fw_info->ver = adev->uvd.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GMC: + fw_info->ver = adev->mc.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GFX_ME: + fw_info->ver = adev->gfx.me_fw_version; + fw_info->feature = adev->gfx.me_feature_version; + break; + case AMDGPU_INFO_FW_GFX_PFP: + fw_info->ver = adev->gfx.pfp_fw_version; + fw_info->feature = adev->gfx.pfp_feature_version; + break; + case AMDGPU_INFO_FW_GFX_CE: + fw_info->ver = adev->gfx.ce_fw_version; + fw_info->feature = adev->gfx.ce_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC: + fw_info->ver = adev->gfx.rlc_fw_version; + fw_info->feature = adev->gfx.rlc_feature_version; + break; + case AMDGPU_INFO_FW_GFX_MEC: + if (query_fw->index == 0) { + fw_info->ver = adev->gfx.mec_fw_version; + fw_info->feature = adev->gfx.mec_feature_version; + } else if (query_fw->index == 1) { + fw_info->ver = adev->gfx.mec2_fw_version; + fw_info->feature = adev->gfx.mec2_feature_version; + } else + return -EINVAL; + break; + case AMDGPU_INFO_FW_SMC: + fw_info->ver = adev->pm.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_SDMA: + if (query_fw->index >= adev->sdma.num_instances) + return -EINVAL; + fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; + fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; + break; + default: + return -EINVAL; + } + return 0; +} + /* * Userspace get information ioctl */ @@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: - ui64 = amdgpu_asic_get_gpu_clock_counter(adev); + ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; + int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) return -EINVAL; - switch (info->query_fw.fw_type) { - case AMDGPU_INFO_FW_VCE: - fw_info.ver = adev->vce.fw_version; - fw_info.feature = adev->vce.fb_version; - break; - case AMDGPU_INFO_FW_UVD: - fw_info.ver = adev->uvd.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GMC: - fw_info.ver = adev->mc.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GFX_ME: - fw_info.ver = adev->gfx.me_fw_version; - fw_info.feature = adev->gfx.me_feature_version; - break; - case AMDGPU_INFO_FW_GFX_PFP: - fw_info.ver = adev->gfx.pfp_fw_version; - fw_info.feature = adev->gfx.pfp_feature_version; - break; - case AMDGPU_INFO_FW_GFX_CE: - fw_info.ver = adev->gfx.ce_fw_version; - fw_info.feature = adev->gfx.ce_feature_version; - break; - case AMDGPU_INFO_FW_GFX_RLC: - fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = adev->gfx.rlc_feature_version; - break; - case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) { - fw_info.ver = adev->gfx.mec_fw_version; - fw_info.feature = adev->gfx.mec_feature_version; - } else if (info->query_fw.index == 1) { - fw_info.ver = adev->gfx.mec2_fw_version; - fw_info.feature = adev->gfx.mec2_feature_version; - } else - return -EINVAL; - break; - case AMDGPU_INFO_FW_SMC: - fw_info.ver = adev->pm.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_SDMA: - if (info->query_fw.index >= adev->sdma.num_instances) - return -EINVAL; - fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version; - fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version; - break; - default: - return -EINVAL; - } + ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); + if (ret) + return ret; + return copy_to_user(out, &fw_info, min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; } @@ -566,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_uvd_free_handles(adev, file_priv); + amdgpu_vce_free_handles(adev, file_priv); + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -590,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; - - amdgpu_uvd_free_handles(adev, file_priv); - amdgpu_vce_free_handles(adev, file_priv); } /* @@ -756,3 +773,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); + +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct drm_amdgpu_info_firmware fw_info; + struct drm_amdgpu_query_fw query_fw; + int ret, i; + + /* VCE */ + query_fw.fw_type = AMDGPU_INFO_FW_VCE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* UVD */ + query_fw.fw_type = AMDGPU_INFO_FW_UVD; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* GMC */ + query_fw.fw_type = AMDGPU_INFO_FW_GMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* ME */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* PFP */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* CE */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; + query_fw.index = 0; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC2 */ + if (adev->asic_type == CHIP_KAVERI || + (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { + query_fw.index = 1; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + } + + /* SMC */ + query_fw.fw_type = AMDGPU_INFO_FW_SMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* SDMA */ + query_fw.fw_type = AMDGPU_INFO_FW_SDMA; + for (i = 0; i < adev->sdma.num_instances; i++) { + query_fw.index = i; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, fw_info.feature, fw_info.ver); + } + + return 0; +} + +static const struct drm_info_list amdgpu_firmware_info_list[] = { + {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, +}; +#endif + +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, + ARRAY_SIZE(amdgpu_firmware_info_list)); +#else + return 0; +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7ecea83ce453..6f0873c75a25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_bo *rbo; + struct ttm_mem_reg *old_mem = &bo->mem; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; @@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, /* move_notify is called before move happens */ amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); + + trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); } int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 0e13d80d2a95..ff63b88b0ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -347,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); return size; } @@ -363,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -377,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); fail: return count; } @@ -391,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); return size; } @@ -407,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -421,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); fail: return count; } @@ -435,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); return size; } @@ -451,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -465,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_sclk_od(adev); + else if (adev->pm.funcs->get_sclk_od) + value = adev->pm.funcs->get_sclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_sclk_od) { + adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + +fail: + return count; +} + +static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_mclk_od(adev); + else if (adev->pm.funcs->get_mclk_od) + value = adev->pm.funcs->get_mclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_mclk_od) { + adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + fail: return count; } @@ -490,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_pcie, amdgpu_set_pp_dpm_pcie); +static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_sclk_od, + amdgpu_set_pp_sclk_od); +static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_mclk_od, + amdgpu_set_pp_mclk_od); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1108,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_table\n"); return ret; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } } + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_sclk_od\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_mclk_od\n"); + return ret; + } + ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -1146,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_cur_state); device_remove_file(adev->dev, &dev_attr_pp_force_state); device_remove_file(adev->dev, &dev_attr_pp_table); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); } + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + device_remove_file(adev->dev, &dev_attr_pp_sclk_od); + device_remove_file(adev->dev, &dev_attr_pp_mclk_od); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 82256558e0f5..c5738a22b690 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); + pp_init->powercontainment_enabled = amdgpu_powercontainment; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 870f9494252c..85aeb0a804bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -28,6 +28,7 @@ */ #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/debugfs.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -48,6 +49,7 @@ */ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer @@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) ring->count_dw = ndw; ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); + return 0; } @@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) mb(); amdgpu_ring_set_wptr(ring); + + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) void amdgpu_ring_undo(struct amdgpu_ring *ring) { ring->wptr = ring->wptr_old; -} - -/** - * amdgpu_ring_backup - Back up the content of a ring - * - * @ring: the ring we want to back up - * - * Saves all unprocessed commits from a ring, returns the number of dwords saved. - */ -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data) -{ - unsigned size, ptr, i; - - *data = NULL; - - if (ring->ring_obj == NULL) - return 0; - - /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) - return 0; - - ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); - - size = ring->wptr + (ring->ring_size / 4); - size -= ptr; - size &= ring->ptr_mask; - if (size == 0) - return 0; - - /* and then save the content of the ring */ - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) - return 0; - for (i = 0; i < size; ++i) { - (*data)[i] = ring->ring[ptr++]; - ptr &= ring->ptr_mask; - } - - return size; -} - -/** - * amdgpu_ring_restore - append saved commands to the ring again - * - * @ring: ring to append commands to - * @size: number of dwords we want to write - * @data: saved commands - * - * Allocates space on the ring and restore the previously saved commands. - */ -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data) -{ - int i, r; - - if (!size || !data) - return 0; - - /* restore the saved ring content */ - r = amdgpu_ring_alloc(ring, size); - if (r) - return r; - - for (i = 0; i < size; ++i) { - amdgpu_ring_write(ring, data[i]); - } - amdgpu_ring_commit(ring); - kfree(data); - return 0; + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - r = amdgpu_wb_get(adev, &ring->next_rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); - return r; - } - ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4; - ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - r = amdgpu_wb_get(adev, &ring->cond_exe_offs); if (r) { dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); @@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; - spin_lock_init(&ring->fence_lock); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); @@ -310,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } r = amdgpu_bo_kmap(ring->ring_obj, (void **)&ring->ring); + + memset((void *)ring->ring, 0, ring->ring_size); + amdgpu_bo_unreserve(ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring map failed\n", r); @@ -347,7 +281,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); - amdgpu_wb_free(ring->adev, ring->next_rptr_offs); if (ring_obj) { r = amdgpu_bo_reserve(ring_obj, false); @@ -358,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) } amdgpu_bo_unref(&ring_obj); } + amdgpu_debugfs_ring_fini(ring); } /* @@ -365,57 +299,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) +/* Layout of file is 12 bytes consisting of + * - rptr + * - wptr + * - driver's copy of wptr + * + * followed by n-words of ring data + */ +static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int roffset = (unsigned long)node->info_ent->data; - struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); - uint32_t rptr, wptr, rptr_next; - unsigned i; - - wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); - - rptr = amdgpu_ring_get_rptr(ring); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); - - seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); - - seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", - ring->wptr, ring->wptr); - - if (!ring->ready) - return 0; - - /* print 8 dw before current rptr as often it's the last executed - * packet that is the root issue - */ - i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - while (i != rptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private; + int r, i; + uint32_t value, result, early[3]; + + if (*pos & 3 || size & 3) + return -EINVAL; + + result = 0; + + if (*pos < 12) { + early[0] = amdgpu_ring_get_rptr(ring); + early[1] = amdgpu_ring_get_wptr(ring); + early[2] = ring->wptr; + for (i = *pos / 4; i < 3 && size; i++) { + r = put_user(early[i], (uint32_t *)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; + } } - while (i != wptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + + while (size) { + if (*pos >= (ring->ring_size + 12)) + return result; + + value = ring->ring[(*pos - 12)/4]; + r = put_user(value, (uint32_t*)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; } - return 0; + + return result; } -static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS]; -static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32]; +static const struct file_operations amdgpu_debugfs_ring_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_ring_read, + .llseek = default_llseek +}; #endif @@ -423,28 +362,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - unsigned offset = (uint8_t*)ring - (uint8_t*)adev; - unsigned i; - struct drm_info_list *info; - char *name; - - for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { - info = &amdgpu_debugfs_ring_info_list[i]; - if (!info->data) - break; - } + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + char name[32]; - if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list)) - return -ENOSPC; - - name = &amdgpu_debugfs_ring_names[i][0]; sprintf(name, "amdgpu_ring_%s", ring->name); - info->name = name; - info->show = amdgpu_debugfs_ring_info; - info->driver_features = 0; - info->data = (void*)(uintptr_t)offset; - return amdgpu_debugfs_add_files(adev, info, 1); + ent = debugfs_create_file(name, + S_IFREG | S_IRUGO, root, + ring, &amdgpu_debugfs_ring_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + i_size_write(ent->d_inode, ring->ring_size + 12); + ring->ent = ent; #endif return 0; } + +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_remove(ring->ent); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 48618ee324eb..d8af37a845f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %d", + seq_printf(m, " protected by 0x%08x on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 34a92808bbd4..5c8d3022fb87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, } /** - * amdgpu_sync_is_idle - test if all fences are signaled + * amdgpu_sync_peek_fence - get the next fence not signaled yet * * @sync: the sync object + * @ring: optional ring to use for test * - * Returns true if all fences in the sync object are signaled. + * Returns the next fence not signaled yet without removing it from the sync + * object. */ -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) hash_for_each_safe(sync->fences, i, tmp, e, node) { struct fence *f = e->fence; + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + + if (ring && s_fence) { + /* For fences from the same ring it is sufficient + * when they are scheduled. + */ + if (s_fence->sched == &ring->sched) { + if (fence_is_signaled(&s_fence->scheduled)) + continue; + + return &s_fence->scheduled; + } + } if (fence_is_signaled(f)) { hash_del(&e->node); @@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) continue; } - return false; + return f; } - return true; + return NULL; } /** - * amdgpu_sync_cycle_fences - move fences from one sync object into another + * amdgpu_sync_get_fence - get the next fence from the sync object * - * @dst: the destination sync object - * @src: the source sync object - * @fence: fence to add to source + * @sync: sync object to use * - * Remove all fences from source and put them into destination and add - * fence as new one into source. + * Get and removes the next fence from the sync object not signaled yet. */ -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence) -{ - struct amdgpu_sync_entry *e, *newone; - struct hlist_node *tmp; - int i; - - /* Allocate the new entry before moving the old ones */ - newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); - if (!newone) - return -ENOMEM; - - hash_for_each_safe(src->fences, i, tmp, e, node) { - struct fence *f = e->fence; - - hash_del(&e->node); - if (fence_is_signaled(f)) { - fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - if (amdgpu_sync_add_later(dst, f)) { - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - hash_add(dst->fences, &e->node, f->context); - } - - hash_add(src->fences, &newone->node, fence->context); - newone->fence = fence_get(fence); - - return 0; -} - struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; @@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) return NULL; } -int amdgpu_sync_wait(struct amdgpu_sync *sync) -{ - struct amdgpu_sync_entry *e; - struct hlist_node *tmp; - int i, r; - - hash_for_each_safe(sync->fences, i, tmp, e, node) { - r = fence_wait(e->fence, false); - if (r) - return r; - - hash_del(&e->node); - fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); - } - - return 0; -} - /** * amdgpu_sync_free - free the sync object * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26a5f4acf584..0d8d65eb46cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -11,19 +11,68 @@ #define TRACE_SYSTEM amdgpu #define TRACE_INCLUDE_FILE amdgpu_trace +TRACE_EVENT(amdgpu_mm_rreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +TRACE_EVENT(amdgpu_mm_wreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + TRACE_EVENT(amdgpu_bo_create, TP_PROTO(struct amdgpu_bo *bo), TP_ARGS(bo), TP_STRUCT__entry( __field(struct amdgpu_bo *, bo) __field(u32, pages) + __field(u32, type) + __field(u32, prefer) + __field(u32, allow) + __field(u32, visible) ), TP_fast_assign( __entry->bo = bo; __entry->pages = bo->tbo.num_pages; + __entry->type = bo->tbo.mem.mem_type; + __entry->prefer = bo->prefered_domains; + __entry->allow = bo->allowed_domains; + __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) + + TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", + __entry->bo, __entry->pages, __entry->type, + __entry->prefer, __entry->allow, __entry->visible) ); TRACE_EVENT(amdgpu_cs, @@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, - uint64_t pd_addr), - TP_ARGS(vm, ring, vmid, pd_addr), + TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), + TP_ARGS(vm, ring, job), TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) __field(u32, vmid) __field(u64, pd_addr) + __field(u32, needs_flush) ), TP_fast_assign( __entry->vm = vm; __entry->ring = ring; - __entry->vmid = vmid; - __entry->pd_addr = pd_addr; + __entry->vmid = job->vm_id; + __entry->pd_addr = job->vm_pd_addr; + __entry->needs_flush = job->vm_needs_flush; ), - TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, - __entry->ring, __entry->vmid, __entry->pd_addr) + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", + __entry->vm, __entry->ring, __entry->vmid, + __entry->pd_addr, __entry->needs_flush) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_STRUCT__entry( __field(struct amdgpu_bo_list *, list) __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) ), TP_fast_assign( __entry->list = list; __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); ), - TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) + TP_printk("list=%p, bo=%p, bo_size = %Ld", + __entry->list, + __entry->bo, + __entry->bo_size) +); + +TRACE_EVENT(amdgpu_cs_bo_status, + TP_PROTO(uint64_t total_bo, uint64_t total_size), + TP_ARGS(total_bo, total_size), + TP_STRUCT__entry( + __field(u64, total_bo) + __field(u64, total_size) + ), + + TP_fast_assign( + __entry->total_bo = total_bo; + __entry->total_size = total_size; + ), + TP_printk("total bo size = %Ld, total bo count = %Ld", + __entry->total_bo, __entry->total_size) +); + +TRACE_EVENT(amdgpu_ttm_bo_move, + TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), + TP_ARGS(bo, new_placement, old_placement), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) + __field(u32, new_placement) + __field(u32, old_placement) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + ), + TP_printk("bo=%p from:%d to %d with size = %Ld", + __entry->bo, __entry->old_placement, + __entry->new_placement, __entry->bo_size) ); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3b9053af4762..b7742e62972a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, fence, - evict, no_wait_gpu, new_mem); + if (r) + return r; + + r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); fence_put(fence); return r; } @@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, return -EINVAL; adev = amdgpu_get_adev(bo->bdev); + + /* remember the eviction */ + if (evict) + atomic64_inc(&adev->num_evictions); + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); return 0; @@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d9c88d13f8db..b11f4e8868d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -40,9 +40,16 @@ #include "uvd/uvd_4_2_d.h" /* 1 second timeout */ -#define UVD_IDLE_TIMEOUT_MS 1000 +#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) + +/* Firmware versions for VI */ +#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) +#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) +#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) +#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) + /* Polaris10/11 firmware version */ -#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) +#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) @@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + switch (adev->asic_type) { + case CHIP_TONGA: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; + break; + case CHIP_CARRIZO: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; + break; + case CHIP_FIJI: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; + break; + case CHIP_STONEY: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; + break; + default: + adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; + } + return 0; } @@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) if (handle != 0 && adev->uvd.filp[i] == filp) { struct fence *fence; - amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { @@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, unsigned fs_in_mb = width_in_mb * height_in_mb; unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; - unsigned min_ctx_size = 0; + unsigned min_ctx_size = ~0; image_size = width * height; image_size += image_size / 2; @@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, /* reference picture buffer */ min_dpb_size = image_size * num_dpb_buffer; - if (adev->asic_type < CHIP_POLARIS10){ + if (!adev->uvd.use_ctx_buf){ /* macroblock context buffer */ min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; @@ -662,7 +683,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, } DRM_ERROR("No more free UVD handles!\n"); - return -EINVAL; + return -ENOSPC; case 1: /* it's a decode msg, calc buffer sizes */ @@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } - amdgpu_uvd_note_usage(ctx.parser->adev); - return 0; } @@ -968,7 +987,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err_free; @@ -1106,24 +1125,18 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) if (fences == 0 && handles == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); - /* just work around for uvd clock remain high even - * when uvd dpm disabled on Polaris10 */ - if (adev->asic_type == CHIP_POLARIS10) - amdgpu_asic_set_uvd_clocks(adev, 0, 0); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); - set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); if (set_clocks) { if (adev->pm.dpm_enabled) { @@ -1133,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) } } } + +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +} + +/** + * amdgpu_uvd_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct fence *fence; + long r; + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + goto error; + } + + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + } else { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } + +error: + fence_put(fence); + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 9a3b449081a7..c850009602d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring); +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 875626a2eccb..05865ce35351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -36,7 +36,7 @@ #include "cikd.h" /* 1 second timeout */ -#define VCE_IDLE_TIMEOUT_MS 1000 +#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) unsigned ucode_version, version_major, version_minor, binary_id; int i, r; - INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); - switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: @@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) adev->vce.filp[i] = NULL; } + INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); + mutex_init(&adev->vce.idle_mutex); + return 0; } @@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->vce.ring[1]); release_firmware(adev->vce.fw); + mutex_destroy(&adev->vce.idle_mutex); return 0; } @@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_asic_set_vce_clocks(adev, 0, 0); } } else { - schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); } } /** - * amdgpu_vce_note_usage - power up VCE + * amdgpu_vce_ring_begin_use - power up VCE * - * @adev: amdgpu_device pointer + * @ring: amdgpu ring * * Make sure VCE is powerd up when we want to use it */ -static void amdgpu_vce_note_usage(struct amdgpu_device *adev) +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) { - bool streams_changed = false; - bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); - set_clocks &= schedule_delayed_work(&adev->vce.idle_work, - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); - - if (adev->pm.dpm_enabled) { - /* XXX figure out if the streams changed */ - streams_changed = false; - } + struct amdgpu_device *adev = ring->adev; + bool set_clocks; - if (set_clocks || streams_changed) { + mutex_lock(&adev->vce.idle_mutex); + set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); + if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); } } + mutex_unlock(&adev->vce.idle_mutex); +} + +/** + * amdgpu_vce_ring_end_use - power VCE down + * + * @ring: amdgpu ring + * + * Schedule work to power VCE down again + */ +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); } /** @@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) int i, r; for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->vce.handles[i]); + if (!handle || adev->vce.filp[i] != filp) continue; - amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; - uint64_t dummy; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ ib->length_dw = 0; @@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000014; /* len */ - ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; - ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000020; /* len */ + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ + ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ + ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; /* len */ ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ @@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = f; + job->fence = fence_get(f); if (r) goto err; @@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, * we we don't have another free session index. */ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, - uint32_t handle, bool *allocated) + uint32_t handle, uint32_t *allocated) { unsigned i; - *allocated = false; - /* validate the handle */ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { if (atomic_read(&p->adev->vce.handles[i]) == handle) { @@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { p->adev->vce.filp[i] = p->filp; p->adev->vce.img_size[i] = 0; - *allocated = true; + *allocated |= 1 << i; return i; } } @@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; - bool destroyed = false; - bool created = false; - bool allocated = false; + uint32_t destroyed = 0; + uint32_t created = 0; + uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; int i, r = 0, idx = 0; - amdgpu_vce_note_usage(p->adev); - while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); @@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; } - if (destroyed) { - DRM_ERROR("No other command allowed after destroy!\n"); - r = -EINVAL; - goto out; - } - switch (cmd) { - case 0x00000001: // session + case 0x00000001: /* session */ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); session_idx = amdgpu_vce_validate_handle(p, handle, &allocated); - if (session_idx < 0) - return session_idx; + if (session_idx < 0) { + r = session_idx; + goto out; + } size = &p->adev->vce.img_size[session_idx]; break; - case 0x00000002: // task info + case 0x00000002: /* task info */ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); break; - case 0x01000001: // create - created = true; - if (!allocated) { + case 0x01000001: /* create */ + created |= 1 << session_idx; + if (destroyed & (1 << session_idx)) { + destroyed &= ~(1 << session_idx); + allocated |= 1 << session_idx; + + } else if (!(allocated & (1 << session_idx))) { DRM_ERROR("Handle already in use!\n"); r = -EINVAL; goto out; @@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 8 * 3 / 2; break; - case 0x04000001: // config extension - case 0x04000002: // pic control - case 0x04000005: // rate control - case 0x04000007: // motion estimation - case 0x04000008: // rdo - case 0x04000009: // vui - case 0x05000002: // auxiliary buffer + case 0x04000001: /* config extension */ + case 0x04000002: /* pic control */ + case 0x04000005: /* rate control */ + case 0x04000007: /* motion estimation */ + case 0x04000008: /* rdo */ + case 0x04000009: /* vui */ + case 0x05000002: /* auxiliary buffer */ break; - case 0x03000001: // encode + case 0x03000001: /* encode */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, *size, 0); if (r) @@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x02000001: // destroy - destroyed = true; + case 0x02000001: /* destroy */ + destroyed |= 1 << session_idx; break; - case 0x05000001: // context buffer + case 0x05000001: /* context buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, *size * 2, 0); if (r) goto out; break; - case 0x05000004: // video bitstream buffer + case 0x05000004: /* video bitstream buffer */ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, tmp, bs_idx); @@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; - case 0x05000005: // feedback buffer + case 0x05000005: /* feedback buffer */ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 4096, fb_idx); if (r) @@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) idx += len / 4; } - if (allocated && !created) { + if (allocated & ~created) { DRM_ERROR("New session without create command!\n"); r = -ENOENT; } out: - if ((!r && destroyed) || (r && allocated)) { - /* - * IB contains a destroy msg or we have allocated an - * handle and got an error, anyway free the handle - */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); + if (!r) { + /* No error, free all destroyed handle slots */ + tmp = destroyed; + } else { + /* Error during parsing, free all allocated handle slots */ + tmp = allocated; } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) + if (tmp & (1 << i)) + atomic_set(&p->adev->vce.handles[i], 0); + return r; } @@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) * @ring: the engine to test on * */ -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct fence *fence = NULL; - int r; + long r; /* skip vce ring1 ib test for now, since it's not reliable */ if (ring == &ring->adev->vce.ring[1]) @@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); goto error; } r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; } - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } error: fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index f40cf761c66f..63f83d0d985c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout); +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9f36ed30ba11..8e642fc48df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/fence-array.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, /** * amdgpu_vm_get_bos - add the vm BOs to a duplicates list * + * @adev: amdgpu device pointer * @vm: vm providing the BOs * @duplicates: head of duplicates list * * Add the page directory to the BO duplicates list * for command submission. */ -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates) { + uint64_t num_evictions; unsigned i; + /* We only need to validate the page tables + * if they aren't already valid. + */ + num_evictions = atomic64_read(&adev->num_evictions); + if (num_evictions == vm->last_eviction_counter) + return; + /* add the vm page table to the list */ for (i = 0; i <= vm->max_pde_used; ++i) { struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; @@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, spin_unlock(&glob->lru_lock); } +static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter) ? true : false; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr) + struct amdgpu_job *job) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; + uint64_t fence_context = adev->fence_context + ring->idx; struct fence *updates = sync->last_vm_update; - struct amdgpu_vm_id *id; - unsigned i = ring->idx; - int r; + struct amdgpu_vm_id *id, *idle; + struct fence **fences; + unsigned i; + int r = 0; + + fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, + GFP_KERNEL); + if (!fences) + return -ENOMEM; mutex_lock(&adev->vm_manager.lock); + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &adev->vm_manager.ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + fence_get(fences[j]); + + array = fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } + + + r = amdgpu_sync_fence(ring->adev, sync, &array->base); + fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&adev->vm_manager.lock); + return 0; + + } + kfree(fences); + + job->vm_needs_flush = true; /* Check if we can use a VMID already assigned to this VM */ + i = ring->idx; do { struct fence *flushed; @@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check all the prerequisites to using this VMID */ if (!id) continue; + if (amdgpu_vm_is_gpu_reset(adev, id)) + continue; if (atomic64_read(&id->owner) != vm->client_id) continue; - if (pd_addr != id->pd_gpu_addr) + if (job->vm_pd_addr != id->pd_gpu_addr) continue; - if (id->last_user != ring && - (!id->last_flush || !fence_is_signaled(id->last_flush))) + if (!id->last_flush) continue; - flushed = id->flushed_updates; - if (updates && (!flushed || fence_is_later(updates, flushed))) + if (id->last_flush->context != fence_context && + !fence_is_signaled(id->last_flush)) continue; - /* Good we can use this VMID */ - if (id->last_user == ring) { - r = amdgpu_sync_fence(ring->adev, sync, - id->first); - if (r) - goto error; - } + flushed = id->flushed_updates; + if (updates && + (!flushed || fence_is_later(updates, flushed))) + continue; - /* And remember this submission as user of the VMID */ + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = AMDGPU_VM_NO_FLUSH; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + job->vm_needs_flush = false; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); mutex_unlock(&adev->vm_manager.lock); return 0; } while (i != ring->idx); - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); - - if (!amdgpu_sync_is_idle(&id->active)) { - struct list_head *head = &adev->vm_manager.ids_lru; - struct amdgpu_vm_id *tmp; + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; - list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru, - list) { - if (amdgpu_sync_is_idle(&id->active)) { - list_move(&id->list, head); - head = &id->list; - } - } - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); - } - - r = amdgpu_sync_cycle_fences(sync, &id->active, fence); + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; @@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, fence_put(id->flushed_updates); id->flushed_updates = fence_get(updates); - id->pd_gpu_addr = pd_addr; - + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); - id->last_user = ring; atomic64_set(&id->owner, vm->client_id); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = pd_addr; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, ring->idx, job); error: mutex_unlock(&adev->vm_manager.lock); return r; } +static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + const struct amdgpu_ip_block_version *ip_block; + + if (ring->type != AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + return false; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (!ip_block) + return false; + + if (ip_block->major <= 7) { + /* gfx7 has no workaround */ + return true; + } else if (ip_block->major == 8) { + if (adev->gfx.mec_fw_version >= 673) + /* gfx8 is fixed in MEC firmware 673 */ + return false; + else + return true; + } + return false; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -294,59 +370,52 @@ error: * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; + struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( - id->gds_base != gds_base || - id->gds_size != gds_size || - id->gws_base != gws_base || - id->gws_size != gws_size || - id->oa_base != oa_base || - id->oa_size != oa_size); + id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size); int r; if (ring->funcs->emit_pipeline_sync && ( - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || - ring->type == AMDGPU_RING_TYPE_COMPUTE)) + job->vm_needs_flush || gds_switch_needed || + amdgpu_vm_ring_has_compute_vm_bug(ring))) amdgpu_ring_emit_pipeline_sync(ring); - if (ring->funcs->emit_vm_flush && - pd_addr != AMDGPU_VM_NO_FLUSH) { + if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || + amdgpu_vm_is_gpu_reset(adev, id))) { struct fence *fence; - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); - amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); + trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; mutex_lock(&adev->vm_manager.lock); - if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) { - r = amdgpu_fence_emit(ring, &fence); - if (r) { - mutex_unlock(&adev->vm_manager.lock); - return r; - } - fence_put(id->last_flush); - id->last_flush = fence; - } + fence_put(id->last_flush); + id->last_flush = fence; mutex_unlock(&adev->vm_manager.lock); } if (gds_switch_needed) { - id->gds_base = gds_base; - id->gds_size = gds_size; - id->gws_base = gws_base; - id->gws_size = gws_size; - id->oa_base = oa_base; - id->oa_size = oa_size; - amdgpu_ring_emit_gds_switch(ring, vm_id, - gds_base, gds_size, - gws_base, gws_size, - oa_base, oa_size); + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + amdgpu_ring_emit_gds_switch(ring, job->vm_id, + job->gds_base, job->gds_size, + job->gws_base, job->gws_size, + job->oa_base, job->oa_size); } return 0; @@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range - * @dst: destination address to map to + * @dst: destination address to map to, the next dst inside the function * @flags: mapping flags * * Update the page tables in the range @start - @end. @@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; - uint64_t addr; + uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t addr; /* next GPU address to be updated */ + uint64_t pt_idx; + struct amdgpu_bo *pt; + unsigned nptes; /* next number of ptes to be updated */ + uint64_t next_pe_start; + + /* initialize the variables */ + addr = start; + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + cur_pe_start = amdgpu_bo_gpu_offset(pt); + cur_pe_start += (addr & mask) * 8; + cur_pe_end = cur_pe_start + 8 * nptes; + cur_dst = dst; + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; ) { - uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; - unsigned nptes; - uint64_t pe_start; + while (addr < end) { + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pe_start = amdgpu_bo_gpu_offset(pt); - pe_start += (addr & mask) * 8; - - if (last_pe_end != pe_start) { + next_pe_start = amdgpu_bo_gpu_offset(pt); + next_pe_start += (addr & mask) * 8; + if (cur_pe_end == next_pe_start) { + /* The next ptb is consecutive to current ptb. + * Don't call amdgpu_vm_frag_ptes now. + * Will update two ptbs together in future. + */ + cur_pe_end += 8 * nptes; + } else { amdgpu_vm_frag_ptes(adev, vm_update_params, - last_pe_start, last_pe_end, - last_dst, flags); + cur_pe_start, cur_pe_end, + cur_dst, flags); - last_pe_start = pe_start; - last_pe_end = pe_start + 8 * nptes; - last_dst = dst; - } else { - last_pe_end += 8 * nptes; + cur_pe_start = next_pe_start; + cur_pe_end = next_pe_start + 8 * nptes; + cur_dst = dst; } + /* for next ptb*/ addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, - last_pe_end, last_dst, flags); + amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, + cur_pe_end, cur_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @src: address where to copy page table entries from * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, vm_update_params.ib = &job->ibs[0]; + r = amdgpu_sync_fence(adev, &job->sync, exclusive); + if (r) + goto error_free; + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); if (r) @@ -889,6 +989,7 @@ error_free: * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -902,6 +1003,7 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint32_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr += mapping->offset; if (!pages_addr || src) - return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + return amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, mapping->it.last, flags, addr, fence); @@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t last; last = min((uint64_t)mapping->it.last, start + max_size - 1); - r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct fence *exclusive; uint64_t addr; int r; @@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, default: break; } + + exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); } else { addr = 0; + exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, + gtt_flags, pages_addr, vm, mapping, flags, addr, &bo_va->last_pt_update); if (r) @@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, 0, 0, NULL); kfree(mapping); if (r) @@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unreserve(vm->page_directory); if (r) goto error_free_page_directory; + vm->last_eviction_counter = atomic64_read(&adev->num_evictions); return 0; @@ -1516,6 +1626,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) &adev->vm_manager.ids_lru); } + adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 48b6bd671cda..c32eca26155c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 5ec1f1e9c983..e2f0e5d58d5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -50,7 +50,9 @@ #include "gmc/gmc_7_1_sh_mask.h" MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt = { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; +#if 0 static const struct ci_pt_defaults defaults_bonaire_pro = { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } }; +#endif static const struct ci_pt_defaults defaults_saturn_xt = { @@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt = { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; +#if 0 static const struct ci_pt_defaults defaults_saturn_pro = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } }; +#endif static const struct ci_pt_config_reg didt_config_ci[] = { @@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) if (pi->caps_sq_ramping || pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = ci_program_pt_config_registers(adev, didt_config_ci); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } ci_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -3030,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (memory_clock <= pi->mclk_stutter_mode_threshold) && - (pi->uvd_enabled == false) && + (!pi->uvd_enabled) && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) memory_level->StutterEnable = true; @@ -3636,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) ci_setup_default_pcie_tables(adev); + /* save a copy of the default DPM table */ + memcpy(&(pi->golden_dpm_table), &(pi->dpm_table), + sizeof(struct ci_dpm_table)); + return 0; } @@ -5754,10 +5764,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - chip_name = "bonaire"; + if ((adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x665f)) + chip_name = "bonaire_k"; + else + chip_name = "bonaire"; break; case CHIP_HAWAII: - chip_name = "hawaii"; + if (adev->pdev->revision == 0x80) + chip_name = "hawaii_k"; + else + chip_name = "hawaii"; break; case CHIP_KAVERI: case CHIP_KABINI: @@ -6404,6 +6422,186 @@ static int ci_dpm_set_powergating_state(void *handle, return 0; } +static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, char *buf) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = ci_get_current_pcie_speed(adev); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + + return size; +} + +static int ci_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, uint32_t mask) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (adev->pm.dpm.forced_level + != AMDGPU_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!pi->sclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + + case PP_MCLK: + if (!pi->mclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + + case PP_PCIE: + { + uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!pi->pcie_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].sclk = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].mclk = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + const struct amd_ip_funcs ci_dpm_ip_funcs = { .name = "ci_dpm", .early_init = ci_dpm_early_init, @@ -6438,6 +6636,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { .get_fan_control_mode = &ci_dpm_get_fan_control_mode, .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, + .print_clock_levels = ci_dpm_print_clock_levels, + .force_clock_level = ci_dpm_force_clock_level, + .get_sclk_od = ci_dpm_get_sclk_od, + .set_sclk_od = ci_dpm_set_sclk_od, + .get_mclk_od = ci_dpm_get_mclk_od, + .set_mclk_od = ci_dpm_set_mclk_od, }; static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h index faccc30c93bf..91be2996ae7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h @@ -193,6 +193,7 @@ struct ci_pt_defaults { struct ci_power_info { struct ci_dpm_table dpm_table; + struct ci_dpm_table golden_dpm_table; u32 voltage_control; u32 mvdd_control; u32 vddci_control; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 910431808542..4efc901f658c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t tmp; tmp = RREG32(mmCONFIG_CNTL); - if (state == false) + if (!state) tmp |= CONFIG_CNTL__VGA_DIS_MASK; else tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; @@ -1035,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -1158,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) +static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; + int r = -EINVAL; dev_info(adev->dev, "GPU pci config reset\n"); @@ -1177,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + r = 0; break; + } udelay(1); } /* does asic init need to be run first??? */ if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); + + return r; } static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -1210,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { + int r; cik_set_bios_scratch_engine_hung(adev, true); - cik_gpu_pci_config_reset(adev); + r = cik_gpu_pci_config_reset(adev); cik_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -2014,9 +2022,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, .get_virtual_caps = &cik_get_virtual_caps, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 9dc4e24e31e7..ee6466912497 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 extra_bits = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 4) - next_rptr++; - - next_rptr += 4; - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); /* number of DWs to follow */ - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); @@ -365,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) u32 me_cntl; int i; - if (enable == false) { + if (!enable) { cik_sdma_gfx_stop(adev); cik_sdma_rlc_stop(adev); } @@ -628,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. */ -static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) +static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -651,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; @@ -665,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 933e425a8154..2a11413ed54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->mgcg_cgtt_local1 = 0x0; pi->clock_slow_down_step = 25000; pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = 0; + pi->enable_nb_ps_policy = false; pi->caps_power_containment = true; pi->caps_cac = true; pi->didt_enabled = false; @@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) } } } else { /*pi->caps_vce_pg*/ + pi->vce_power_gated = gate; cz_update_vce_dpm(adev); cz_enable_vce_dpm(adev, !gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 8227344d2ff6..c1b04e9aab57 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v10_0_crtc_load_lut(crtc); + + return 0; } static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) @@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v10_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index af26ec0bc59d..d4bf133908b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev, struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; u32 tmp; - /* flip at hsync for async, default is vsync */ - /* use UPDATE_IMMEDIATE_EN instead for async? */ + /* flip immediate for async, default is vsync */ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); + GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); /* update the scanout addresses */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v11_0_crtc_load_lut(crtc); + + return 0; } static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) @@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v11_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3fb65e41a6ef..4fdfab1e9200 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), CRTC_CONTROL, CRTC_MASTER_EN); if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - +#if 1 save->crtc_enabled[i] = true; tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + /*it is correct only for RGB ; black is 0*/ + WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); } + mdelay(20); #else /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); @@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { - u32 tmp, frame_count; - int i, j; + u32 tmp; + int i; /* update crtc base addresses */ for (i = 0; i < adev->mode_info.num_crtc; i++) { WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } } + mdelay(20); } WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); @@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v8_0_crtc_load_lut(crtc); + + return 0; } static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) @@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v8_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index b336c918d6a7..b3e19ba4c57f 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!fiji_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index fc8ff4d3ccf8..d869d058ef24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1583,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * registers are instanced per SE or SH. 0xffffffff means * broadcast to all SEs or SHs (CIK). */ -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | @@ -1659,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1746,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -2050,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -2089,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); - - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - control |= ib->length_dw | (vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -2123,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, * Provides a basic gfx ring test to verify that IBs are working. * Returns 0 on success, error on failure. */ -static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -2154,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); @@ -2176,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -3221,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } } adev->gfx.rlc.cs_data = ci_cs_data; - adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ src_ptr = adev->gfx.rlc.reg_list; dws = adev->gfx.rlc.reg_list_size; @@ -3379,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3387,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3434,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) return orig; } -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; @@ -3456,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) } } -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp; @@ -3471,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) * * Halt the RLC ME (MicroEngine) (CIK). */ -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, 0); @@ -3547,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3587,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3638,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3689,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -3867,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } +static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -4123,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -4183,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v7_0_select_se_sh, +}; + +static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { + .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, + .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode +}; + static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v7_0_gfx_funcs; + adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); gfx_v7_0_set_irq_funcs(adev); gfx_v7_0_set_gds_init(adev); @@ -5032,16 +5037,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v7_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -5057,7 +5068,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index e747aa935c88..94e3ea147c26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index c2ef94511f70..bff8668e9e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -787,26 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } -static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - unsigned i; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -818,28 +817,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; goto err2; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } err2: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err1: @@ -1160,6 +1156,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, buffer[count++] = cpu_to_le32(0); } +static void cz_init_cp_jump_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_CARRIZO) + max_me = 5; + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 4) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { int r; @@ -1175,6 +1236,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } + + /* jump table block */ + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1231,6 +1304,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ + if (adev->gfx.rlc.cp_table_obj == NULL) { + r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + return r; + } + + cz_init_cp_jump_table(adev); + + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + } + return 0; } @@ -1612,7 +1725,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) RREG32(sec_ded_counter_registers[i]); fail: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); @@ -3339,9 +3451,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); @@ -3391,13 +3509,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -3501,7 +3619,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3524,7 +3642,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3532,7 +3650,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3693,13 +3811,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) WREG32(mmRLC_SRM_CNTL, data); } -static void polaris11_init_power_gating(struct amdgpu_device *adev) +static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) { uint32_t data; if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG)) { + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG)) { data = RREG32(mmCP_RB_WPTR_POLL_CNTL); data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); @@ -3724,6 +3842,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev) } } +static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; + else + data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + static void gfx_v8_0_init_pg(struct amdgpu_device *adev) { if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | @@ -3736,8 +3901,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); - if (adev->asic_type == CHIP_POLARIS11) - polaris11_init_power_gating(adev); + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + } else if (adev->asic_type == CHIP_POLARIS11) { + gfx_v8_0_init_power_gating(adev); + } } } @@ -4976,7 +5158,7 @@ static int gfx_v8_0_soft_reset(void *handle) * Fetches a GPU clock counter snapshot. * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -5036,12 +5218,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v8_0_select_se_sh, +}; + static int gfx_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); gfx_v8_0_set_gds_init(adev); @@ -5074,51 +5262,43 @@ static int gfx_v8_0_late_init(void *handle) return 0; } -static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - /* Send msg to SMU via Powerplay */ - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_SMC, - enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + if (adev->asic_type == CHIP_POLARIS11) + /* Send msg to SMU via Powerplay */ + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - if (enable) { - /* Enable static MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable static MGPG */ + if (enable) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } -static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - if (enable) { - /* Enable dynamic MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable dynamic MGPG */ + if (enable) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, @@ -5126,19 +5306,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade { uint32_t data, temp; - if (enable) { - /* Enable quick PG */ - temp = data = RREG32(mmRLC_PG_CNTL); - data |= 0x100000; + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable quick PG */ + if (enable) + data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); - data &= ~0x100000; + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); +static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + /* Read any GFX register to wake up GFX. */ + if (!enable) + data = RREG32(mmDB_RENDER_CONTROL); +} + +static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { + cz_enable_gfx_cg_power_gating(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + cz_enable_gfx_pipeline_power_gating(adev, true); + } else { + cz_enable_gfx_cg_power_gating(adev, false); + cz_enable_gfx_pipeline_power_gating(adev, false); } } @@ -5146,21 +5370,42 @@ static int gfx_v8_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE) ? true : false; if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + cz_update_gfx_cg_power_gating(adev, enable); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + break; case CHIP_POLARIS11: - if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) - polaris11_enable_gfx_static_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); - else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) - polaris11_enable_gfx_dynamic_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable) + polaris11_enable_gfx_quick_mg_power_gating(adev, true); else - polaris11_enable_gfx_quick_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + polaris11_enable_gfx_quick_mg_power_gating(adev, false); break; default: break; @@ -5174,7 +5419,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -5562,6 +5807,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev WREG32(mmRLC_CGCG_CGLS_CTRL, data); } + gfx_v8_0_wait_for_rlc_serdes(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, @@ -5687,17 +5934,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -5726,23 +5962,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - - control |= ib->length_dw | (vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -6195,9 +6417,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - case CHIP_STONEY: adev->gfx.rlc.funcs = &iceland_rlc_funcs; break; + case CHIP_STONEY: case CHIP_CARRIZO: adev->gfx.rlc.funcs = &cz_rlc_funcs; break; @@ -6235,6 +6457,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } +static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -6255,16 +6491,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v8_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -6280,7 +6522,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index 16a49f53a2fa..bc82c794312c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,7 +26,6 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 1feb6439cb0b..d24a82bd0c7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -39,6 +39,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v7_0_wait_for_idle(void *handle); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); @@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc7_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & 0x1F00; - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v7_0_mc_resume(adev, &save); @@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev, &save); - if (gmc_v7_0_wait_for_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 36fcbbc46ada..0b386b5d2f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9945d5bbf1fe..717359d3ba8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -41,6 +41,7 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc8_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | - SRBM_STATUS__MCB_BUSY_MASK | - SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | - SRBM_STATUS__MCD_BUSY_MASK | - SRBM_STATUS__VMC1_BUSY_MASK); - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v8_0_mc_resume(adev, &save); @@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index 973436086b38..fc5001a8119d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 825ccd63f2dc..2f078ad6095c 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -24,7 +24,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 52ee08193295..211839913728 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -25,7 +25,7 @@ #include "drmP.h" #include "amdgpu.h" #include "ppsmc.h" -#include "iceland_smumgr.h" +#include "iceland_smum.h" #include "smu_ucode_xfer_vi.h" #include "amdgpu_ucode.h" @@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!iceland_is_smc_ram_running(adev)) - return -EINVAL;; + return -EINVAL; if (wait_smu_response(adev)) { DRM_ERROR("Failed to send previous message\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h index 1e0769e110fa..5983e3150cc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h @@ -21,8 +21,8 @@ * */ -#ifndef ICELAND_SMUMGR_H -#define ICELAND_SMUMGR_H +#ifndef ICELAND_SMUM_H +#define ICELAND_SMUM_H #include "ppsmc.h" diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a789a863d677..a845e883f5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, vid_mapping_table->num_entries = i; } +#if 0 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, @@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; +#endif static const struct kv_pt_config_reg didt_config_kv[] = { @@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } kv_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h index 7837f2ecc357..8463245f424f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h +++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h @@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result; #define PPSMC_StartFanControl ((uint8_t)0x5B) #define PPSMC_StopFanControl ((uint8_t)0x5C) #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) +#define PPSMC_NoDisplay ((uint8_t)0x5D) #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) +#define PPSMC_HasDisplay ((uint8_t)0x5E) #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) #define PPSMC_MSG_EnableULV ((uint8_t)0x62) @@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_DisableDTE ((uint8_t)0x88) #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) /* CI/KV/KB */ #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) @@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index b556bd0a8797..1351c7e834a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -406,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v2_4_gfx_stop(adev); sdma_v2_4_rlc_stop(adev); } @@ -580,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; - if (!adev->firmware.smu_load) { - r = sdma_v2_4_load_microcode(adev); - if (r) - return r; - } else { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA0); - if (r) - return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_SDMA1); - if (r) - return -EINVAL; + if (!adev->pp_enabled) { + if (!adev->firmware.smu_load) { + r = sdma_v2_4_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } } /* halt the engine before programing */ @@ -679,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -702,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -721,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 532ea88da66a..653ce5ed55ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -616,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v3_0_gfx_stop(adev); sdma_v3_0_rlc_stop(adev); } @@ -908,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; - unsigned i; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -931,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -950,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; goto err1; - } - for (i = 0; i < adev->usec_timeout; i++) { - tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i < adev->usec_timeout) { - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ring->idx, i); + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: - fence_put(f); amdgpu_ib_free(adev, &ib, NULL); fence_put(f); err0: diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 083893dd68c0..940de1836f8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!tonga_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f07551476a70..132e613ed674 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -34,6 +34,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_4_1_d.h" + static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); @@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v4_2_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, } /** - * uvd_v4_2_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - -/** * uvd_v4_2_mc_resume - memory controller programming * * @adev: amdgpu_device pointer @@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, + .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, .test_ring = uvd_v4_2_ring_test_ring, - .test_ib = uvd_v4_2_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e0a76a883d46..101de136ba63 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -31,6 +31,7 @@ #include "uvd/uvd_5_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_5_0_d.h" #include "vi.h" static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); @@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v5_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, + .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, - .test_ib = uvd_v5_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index c9929d665c01..7f21102bfb99 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -33,6 +33,8 @@ #include "oss/oss_2_0_sh_mask.h" #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" +#include "bif/bif_5_1_d.h" +#include "gmc/gmc_8_1_d.h" #include "vi.h" static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); @@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uint32_t mp_swap_cntl; int i, j, r; - /*disable DPG */ - WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + /* disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); /* disable byte swapping */ lmi_swap_cntl = 0; @@ -405,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) } /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + WREG32(mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); @@ -424,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + WREG32(mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -447,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -484,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) return r; } /* enable master interrupt */ - WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + WREG32_P(mmUVD_MASTINT_EN, + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); /* clear the bit 4 of UVD_STATUS */ - WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); rb_bufsz = order_base_2(ring->ring_size); tmp = 0; @@ -581,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v6_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -634,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); @@ -642,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v6_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct fence *fence = NULL; - int r; + uint32_t reg; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } + if (vm_id < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + else + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, pd_addr >> 12); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xC); +} + +static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xE); } static bool uvd_v6_0_is_idle(void *handle) @@ -847,7 +905,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; static int curstate = -1; - if (adev->asic_type == CHIP_FIJI) + if (adev->asic_type == CHIP_FIJI || + adev->asic_type == CHIP_POLARIS10) uvd_v6_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) @@ -912,22 +971,51 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_powergating_state = uvd_v6_0_set_powergating_state, }; -static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { +static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, + .test_ring = uvd_v6_0_ring_test_ring, + .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { + .get_rptr = uvd_v6_0_ring_get_rptr, + .get_wptr = uvd_v6_0_ring_get_wptr, + .set_wptr = uvd_v6_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = uvd_v6_0_ring_emit_ib, + .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, + .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, - .test_ib = uvd_v6_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; + if (adev->asic_type >= CHIP_POLARIS10) { + adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + DRM_INFO("UVD is enabled in VM mode\n"); + } else { + adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + DRM_INFO("UVD is enabled in physical mode\n"); + } } static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 45d92aceb485..80a37a602181 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 30e8099e94c5..c271abffd8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,6 +43,7 @@ #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -51,6 +52,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int vce_v3_0_wait_for_idle(void *handle); /** * vce_v3_0_ring_get_rptr - get read pointer @@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, vce_v3_0_override_vce_clock_gating(adev, false); } +static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v3_0_start - start VCE block * @@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static int vce_v3_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int idx, i, j, r; + int idx, r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { - if (adev->vce.harvest_config & (1 << idx)) continue; @@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev) vce_v3_0_mc_resume(adev, idx); - /* set BUSY flag */ - WREG32_P(mmVCE_STATUS, 1, ~1); + WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, + ~VCE_STATUS__JOB_BUSY_MASK); + if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + mdelay(100); + + r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~1); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); mutex_unlock(&adev->grbm_idx_mutex); - ring = &adev->vce.ring[0]; - WREG32(mmVCE_RB_RPTR, ring->wptr); - WREG32(mmVCE_RB_WPTR, ring->wptr); - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + return 0; +} - ring = &adev->vce.ring[1]; - WREG32(mmVCE_RB_RPTR2, ring->wptr); - WREG32(mmVCE_RB_WPTR2, ring->wptr); - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); +static int vce_v3_0_stop(struct amdgpu_device *adev) +{ + int idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + WREG32_P(mmGRBM_GFX_INDEX, 0, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + else + WREG32_P(mmGRBM_GFX_INDEX, + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, 0, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + /* hold on ECPU */ + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + + /* Set Clock-Gating off */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) + vce_v3_0_set_vce_sw_clock_gating(adev, false); + } + + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + mutex_unlock(&adev->grbm_idx_mutex); return 0; } @@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle) static int vce_v3_0_hw_fini(void *handle) { - return 0; + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + return vce_v3_0_stop(adev); } static int vce_v3_0_suspend(void *handle) @@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + else + tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; + if (adev->asic_type == CHIP_POLARIS10) + vce_v3_set_bypass_mode(adev, enable); + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index a65c96029476..03a31c53aec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + r = RREG32(mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + WREG32(mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + + static const u32 tonga_mgcg_cgcg_init[] = { mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, @@ -533,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -597,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) +static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; @@ -612,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) - break; + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + return 0; + } udelay(1); } - + return -EINVAL; } static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -642,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { + int r; + vi_set_bios_scratch_engine_hung(adev, true); - vi_gpu_pci_config_reset(adev); + r = vi_gpu_pci_config_reset(adev); vi_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -1133,9 +1161,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, .get_virtual_caps = &vi_get_virtual_caps, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, }; static int vi_common_early_init(void *handle) @@ -1156,6 +1181,8 @@ static int vi_common_early_init(void *handle) adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; adev->didt_rreg = &vi_didt_rreg; adev->didt_wreg = &vi_didt_wreg; + adev->gc_cac_rreg = &vi_gc_cac_rreg; + adev->gc_cac_wreg = &vi_gc_cac_wreg; adev->asic_funcs = &vi_asic_funcs; @@ -1229,12 +1256,18 @@ static int vi_common_early_init(void *handle) adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index ec4036a09f3e..a625b9137da2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm, unsigned int get_first_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_num(struct device_queue_manager *dqm); -extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) +static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { return (pdd->lds_base >> 16) & 0xFF; } -extern inline unsigned int +static inline unsigned int get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) { return (pdd->lds_base >> 60) & 0x0E; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d0d5f4baf72d..80113c335966 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd); int kfd_init_apertures(struct kfd_process *process); /* Queue Context Management */ -inline uint32_t lower_32(uint64_t x); -inline uint32_t upper_32(uint64_t x); struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); -inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m); int init_queue(struct queue **q, struct queue_properties properties); void uninit_queue(struct queue *q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7708d90b9da9..4f3849ac8c07 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread); void kfd_process_create_wq(void) { if (!kfd_process_wq) - kfd_process_wq = create_workqueue("kfd_process_wq"); + kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); } void kfd_process_destroy_wq(void) { if (kfd_process_wq) { - flush_workqueue(kfd_process_wq); destroy_workqueue(kfd_process_wq); kfd_process_wq = NULL; } @@ -330,6 +329,7 @@ err_process_pqm_init: synchronize_rcu(); mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); err_mmu_notifier: + mutex_destroy(&process->mutex); kfd_pasid_free(process->pasid); err_alloc_pasid: kfree(process->queues); diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h index 7c2a916c1e63..5eb895fd98bf 100644 --- a/drivers/gpu/drm/amd/include/amd_pcie.h +++ b/drivers/gpu/drm/amd/include/amd_pcie.h @@ -37,6 +37,13 @@ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 +/* gen: chipset 1/2, asic 1/2/3 */ +#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) + /* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000 @@ -47,4 +54,11 @@ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16 +/* 1/2/4/8/16 lanes */ +#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + #endif diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index afce1edbe250..a74a0d2ff1ca 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -26,15 +26,6 @@ #define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ /* -* Supported GPU families (aligned with amdgpu_drm.h) -*/ -#define AMD_FAMILY_UNKNOWN 0 -#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */ -#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ -#define AMD_FAMILY_VI 130 /* Iceland, Tonga */ -#define AMD_FAMILY_CZ 135 /* Carrizo */ - -/* * Supported ASIC types */ enum amd_asic_type { @@ -120,6 +111,8 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_SDMA (1 << 8) #define AMD_PG_SUPPORT_ACP (1 << 9) #define AMD_PG_SUPPORT_SAMU (1 << 10) +#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11) +#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) enum amd_pm_state_type { /* not used for dpm */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 293329719bba..809759f7bb81 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 #define mmBUS_CNTL 0x1508 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h index ebaf67bb1589..90ff7c8a6011 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h @@ -2823,4 +2823,7 @@ #define mmDC_EDC_CSINVOC_CNT 0x3192 #define mmDC_EDC_RESTORE_CNT 0x3193 +#define mmGC_CAC_IND_INDEX 0x129a +#define mmGC_CAC_IND_DATA 0x129b + #endif /* GFX_8_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h index 7d722458d9f5..4070ca3a68eb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h @@ -8730,8 +8730,6 @@ #define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11 -#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000 -#define RLC_GPM_STAT__RESERVED__SHIFT 0x12 #define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000 #define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18 #define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f @@ -8764,8 +8762,10 @@ #define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13 -#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000 -#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14 +#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000 +#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14 +#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000 +#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0 #define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00 @@ -9102,8 +9102,6 @@ #define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0 -#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00 -#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8 #define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff #define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0 #define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff @@ -9124,14 +9122,8 @@ #define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8 #define RLC_SRM_DEBUG__DATA_MASK 0xffffffff #define RLC_SRM_DEBUG__DATA__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00 -#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff #define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0 -#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0 #define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00 #define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff @@ -17946,8 +17938,6 @@ #define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18 #define VGT_TF_RING_SIZE__SIZE_MASK 0xffff #define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0 #define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1 @@ -20502,8 +20492,6 @@ #define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20558,8 +20546,6 @@ #define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20614,8 +20600,6 @@ #define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20670,8 +20654,6 @@ #define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20726,8 +20708,6 @@ #define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20773,4 +20753,84 @@ #define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000 #define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + #endif /* GFX_8_0_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index 6f6fb34742d2..ec69869c55ff 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -111,6 +111,8 @@ #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5 #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4 #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f +#define mmUVD_GP_SCRATCH8 0x3c0a +#define mmUVD_GP_SCRATCH9 0x3c0b #define mmUVD_GP_SCRATCH4 0x3d38 #endif /* UVD_6_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 7464daf89ca1..b86aba9d019f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -49,6 +49,7 @@ enum cgs_ind_reg { CGS_IND_REG__SMC, CGS_IND_REG__UVD_CTX, CGS_IND_REG__DIDT, + CGS_IND_REG_GC_CAC, CGS_IND_REG__AUDIO_ENDPT }; @@ -112,20 +113,23 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_PCIE_DEV, + CGS_SYSTEM_INFO_PCIE_REV, CGS_SYSTEM_INFO_CG_FLAGS, CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_GFX_CU_INFO, + CGS_SYSTEM_INFO_GFX_SE_INFO, CGS_SYSTEM_INFO_ID_MAXIMUM, }; struct cgs_system_info { - uint64_t size; - uint64_t info_id; + uint64_t size; + enum cgs_system_info_id info_id; union { - void *ptr; - uint64_t value; + void *ptr; + uint64_t value; }; - uint64_t padding[13]; + uint64_t padding[13]; }; /* @@ -158,6 +162,10 @@ struct cgs_firmware_info { uint16_t feature_version; uint32_t image_size; uint64_t mc_addr; + + /* only for smc firmware */ + uint32_t ucode_start_address; + void *kptr; }; @@ -189,7 +197,6 @@ typedef unsigned long cgs_handle_t; struct cgs_acpi_method_argument { uint32_t type; - uint32_t method_length; uint32_t data_length; union{ uint32_t value; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index e629f8a9fe93..abbb658bdc1e 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle) static bool pp_is_idle(void *handle) { - return 0; + return false; } static int pp_wait_for_idle(void *handle) @@ -536,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, case AMD_PP_EVENT_COMPLETE_INIT: ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); break; + case AMD_PP_EVENT_READJUST_POWER_STATE: + pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps; + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; default: break; } @@ -740,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->get_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; - } + if (!hwmgr->soft_pp_table) + return -EINVAL; - return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); + *table = (char *)hwmgr->soft_pp_table; + + return hwmgr->soft_pp_table_size; } static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) @@ -759,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->set_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; + if (!hwmgr->hardcode_pp_table) { + hwmgr->hardcode_pp_table = + kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + + if (!hwmgr->hardcode_pp_table) + return -ENOMEM; + + /* to avoid powerplay crash when hardcode pptable is empty */ + memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size); } - return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); + memcpy(hwmgr->hardcode_pp_table, buf, size); + + hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; + + return amd_powerplay_reset(handle); } static int pp_dpm_force_clock_level(void *handle, @@ -806,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle, return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); } +static int pp_dpm_get_sclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_sclk_od(hwmgr); +} + +static int pp_dpm_set_sclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); +} + +static int pp_dpm_get_mclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_mclk_od(hwmgr); +} + +static int pp_dpm_set_mclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -828,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_pp_table = pp_dpm_set_pp_table, .force_clock_level = pp_dpm_force_clock_level, .print_clock_levels = pp_dpm_print_clock_levels, + .get_sclk_od = pp_dpm_get_sclk_od, + .set_sclk_od = pp_dpm_set_sclk_od, + .get_mclk_od = pp_dpm_get_mclk_od, + .set_mclk_od = pp_dpm_set_mclk_od, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, @@ -909,6 +1004,44 @@ int amd_powerplay_fini(void *handle) return 0; } +int amd_powerplay_reset(void *handle) +{ + struct pp_instance *instance = (struct pp_instance *)handle; + struct pp_eventmgr *eventmgr; + struct pem_event_data event_data = { {0} }; + int ret; + + if (instance == NULL) + return -EINVAL; + + eventmgr = instance->eventmgr; + if (!eventmgr || !eventmgr->pp_eventmgr_fini) + return -EINVAL; + + eventmgr->pp_eventmgr_fini(eventmgr); + + ret = pp_sw_fini(handle); + if (ret) + return ret; + + kfree(instance->hwmgr->ps); + + ret = pp_sw_init(handle); + if (ret) + return ret; + + hw_init_power_state_table(instance->hwmgr); + + if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) + return -EINVAL; + + ret = eventmgr->pp_eventmgr_init(eventmgr); + if (ret) + return ret; + + return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data); +} + /* export this function to DAL */ int amd_powerplay_display_configuration_change(void *handle, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index d6635cc4b0fc..635fc4b48184 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = { system_config_tasks, setup_asic_tasks, enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, get_2d_performance_state_tasks, set_performance_state_tasks, initialize_thermal_controller_tasks, @@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = { setup_asic_tasks, enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, enable_disable_bapm_tasks, initialize_thermal_controller_tasks, get_2d_performance_state_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c index 5cd123472db4..b6f45fd01fa6 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c @@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) { - /* TODO */ - return 0; + return phm_disable_dynamic_state_management(eventmgr->hwmgr); } int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 436fc16dabb6..2028980f1ed4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerdown_uvd(hwmgr); } else { cz_dpm_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -206,25 +206,26 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); cz_enable_disable_vce_dpm(hwmgr, false); - /* TODO: to figure out why vce can't be poweroff*/ + cz_dpm_powerdown_vce(hwmgr); cz_hwmgr->vce_power_gated = true; } else { cz_dpm_powerup_vce(hwmgr); cz_hwmgr->vce_power_gated = false; - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; } } } else { + cz_hwmgr->vce_power_gated = bgate; cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, !bgate); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 1f14c477d15d..8cc0df9b534a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_ps->action = cz_current_ps->action; - if ((force_high == false) && (cz_ps->action == FORCE_HIGH)) + if (!force_high && (cz_ps->action == FORCE_HIGH)) cz_ps->action = CANCEL_FORCE_HIGH; - else if ((force_high == true) && (cz_ps->action != FORCE_HIGH)) + else if (force_high && (cz_ps->action != FORCE_HIGH)) cz_ps->action = FORCE_HIGH; else cz_ps->action = DO_NOTHING; @@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; + struct cz_hwmgr *data; + + data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { @@ -1649,7 +1656,7 @@ static void cz_hw_print_display_cfg( struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); uint32_t data = 0; - if (hw_data->cc6_settings.cc6_setting_changed == true) { + if (hw_data->cc6_settings.cc6_setting_changed) { hw_data->cc6_settings.cc6_setting_changed = false; @@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { int cz_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct cz_hwmgr *cz_hwmgr; - int ret = 0; - - cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); - if (cz_hwmgr == NULL) - return -ENOMEM; - - hwmgr->backend = cz_hwmgr; hwmgr->hwmgr_func = &cz_hwmgr_funcs; hwmgr->pptable_func = &pptable_funcs; - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c index e1b649bd5344..5afe82068b29 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c @@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) fiji_update_uvd_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 92912ab20944..120a9e2c3152 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_hwmgr *data; uint32_t i; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); bool stay_in_boot; int result; + data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; @@ -699,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = FIJI_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -734,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -743,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -1236,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -1363,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, } /** +* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value +* +* @param hwmgr the address of the powerplay hardware manager. +* @return if success then 0; +*/ +static int fiji_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + +/** * Initial switch from ARB F0->F1 * * @param hwmgr the address of the powerplay hardware manager. @@ -1375,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return fiji_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, struct fiji_single_dpm_table *dpm_table, uint32_t count) { @@ -1397,7 +1450,7 @@ static void fiji_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) @@ -1609,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1631,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, VOLTAGE_SCALE)) / 25); } - return result; + return 0; } /** @@ -3177,6 +3229,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3197,6 +3260,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -3357,6 +3435,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0), + "Failed to force MCLK DPM0!", + return -1); + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int fiji_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (fiji_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Disable) == 0), + "Failed to disable voltage DPM during DPM Stop Function!", + return -1); + + return 0; +} + static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { @@ -3415,6 +3557,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -3529,6 +3688,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return result; } +static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = fiji_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = fiji_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = fiji_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = fiji_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = fiji_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = fiji_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = fiji_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = fiji_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; +} + static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -4171,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", @@ -4182,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", @@ -4353,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct fiji_power_state *fiji_ps) { - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4373,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, fiji_ps->performance_levels[0].memory_clock, fiji_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int fiji_generate_dpm_level_enable_mask( @@ -4632,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", @@ -4643,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", @@ -5071,42 +5291,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -5276,12 +5460,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h return is_update_required; } +static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &fiji_hwmgr_backend_fini, .asic_setup = &fiji_setup_asic_task, .dynamic_state_management_enable = &fiji_enable_dpm_tasks, + .dynamic_state_management_disable = &fiji_disable_dpm_tasks, .force_dpm_level = &fiji_dpm_force_dpm_level, .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, .get_power_state_size = &fiji_get_power_state_size, @@ -5314,24 +5582,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .get_fan_control_mode = fiji_get_fan_control_mode, .check_states_equal = fiji_check_states_equal, .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, - .get_pp_table = fiji_get_pp_table, - .set_pp_table = fiji_set_pp_table, .force_clock_level = fiji_force_clock_level, .print_clock_levels = fiji_print_clock_levels, + .get_sclk_od = fiji_get_sclk_od, + .set_sclk_od = fiji_set_sclk_od, + .get_mclk_od = fiji_get_mclk_od, + .set_mclk_od = fiji_set_mclk_od, }; int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data; - int ret = 0; - - data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &fiji_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_fiji_thermal_initialize(hwmgr); - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h index 170edf5a772d..bf67c2a92c68 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h @@ -302,9 +302,6 @@ struct fiji_hwmgr { bool pg_acp_init; bool frtc_enabled; bool frtc_status_changed; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; /* To convert to Q8.8 format for firmware */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index db23a4068baf..44658451a8d2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) if (!tmp) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); fiji_hwmgr->fast_watermark_threshold = 100; - tmp = 1; - fiji_hwmgr->enable_dte_feature = tmp ? false : true; - fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + if (hwmgr->powercontainment_enabled) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + tmp = 1; + fiji_hwmgr->enable_dte_feature = tmp ? false : true; + fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } } } @@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h index 55e58200f33a..fec772421733 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h @@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type { #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d + struct fiji_pt_config_reg { uint32_t offset; uint32_t mask; @@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr); int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr); int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c index 7a705cee0cc2..a6abe81bc843 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c @@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, struct phm_runtime_table_header *rt_table, void *input, void *output) { - int result = 0; - void *temp_storage = NULL; + int result; + void *temp_storage; if (hwmgr == NULL || rt_table == NULL) { printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); @@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); return -ENOMEM; } + } else { + temp_storage = NULL; } result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); - if (NULL != temp_storage) - kfree(temp_storage); + kfree(temp_storage); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index efb77eda7508..789f98ad2615 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) return ret; } +int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) +{ + int ret = -1; + bool enabled; + + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (hwmgr->hwmgr_func->dynamic_state_management_disable) + ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); + } else { + ret = phm_dispatch_table(hwmgr, + &(hwmgr->disable_dynamic_state_management), + NULL, NULL); + } + + enabled = ret == 0 ? false : true; + + cgs_notify_dpm_enabled(hwmgr->device, enabled); + + return ret; +} + int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { PHM_FUNC_CHECK(hwmgr); @@ -314,7 +338,7 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, if (hwmgr->hwmgr_func->store_cc6_data == NULL) return -EINVAL; - /* to do pass other display configuration in furture */ + /* TODO: pass other display configuration in the future */ if (hwmgr->hwmgr_func->store_cc6_data) hwmgr->hwmgr_func->store_cc6_data(hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 20f20e075588..27e07624ac28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "cgs_common.h" #include "power_state.h" #include "hwmgr.h" @@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->hw_revision = pp_init->rev_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; + hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; switch (hwmgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_hwmgr_init(hwmgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TONGA: tonga_hwmgr_init(hwmgr); @@ -94,6 +96,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) return -EINVAL; /* do hwmgr finish*/ + kfree(hwmgr->hardcode_pp_table); + kfree(hwmgr->backend); kfree(hwmgr->start_thermal_controller.function_list); @@ -530,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c index 8f142a74ad08..b5edb5105986 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c @@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); polaris10_update_uvd_dpm(hwmgr, true); polaris10_phm_powerdown_uvd(hwmgr); } else { polaris10_phm_powerup_uvd(hwmgr); polaris10_update_uvd_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); } return 0; @@ -125,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) data->vce_power_gated = bgate; - if (bgate) + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + polaris10_update_vce_dpm(hwmgr, true); polaris10_phm_powerdown_vce(hwmgr); - else + } else { polaris10_phm_powerup_vce(hwmgr); - + polaris10_update_vce_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 91e25f942d90..769636a0c5b5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, return 0; } +static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + /** * Initial switch from ARB F0->F1 * @@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return polaris10_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -1356,9 +1404,9 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) return result; } - /* in order to prevent MC activity from stutter mode to push DPM up. + /* In order to prevent MC activity from stutter mode to push DPM up, * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not effected by + * a higher state by default such that we are not affected by * up threshold or and MCLK DPM latency. */ levels[0].ActivityLevel = 0x1f; @@ -1425,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, * already converted to SMC_UL */ - sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_sclk, sclk_frequency, @@ -1461,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_mclk, table->MemoryACPILevel.MclkFrequency, @@ -1780,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1831,11 +1878,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && + stretch_amount != 4 && stretch_amount != 5) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, @@ -1890,9 +1934,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { config = VR_SVI2_PLANE_2; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); } else { config = VR_STATIC_VOLTAGE; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); @@ -2262,6 +2305,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -2282,6 +2336,21 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2379,6 +2448,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (polaris10_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + return 0; +} + static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { bool protection; @@ -2436,6 +2557,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2530,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_enable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_start_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); @@ -2559,8 +2701,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { + int tmp_result, result = 0; - return 0; + tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = polaris10_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = polaris10_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = polaris10_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = polaris10_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = polaris10_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = polaris10_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = polaris10_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; } int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) @@ -2571,13 +2765,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -2624,17 +2811,22 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicUVDState); /* power tune caps Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (hwmgr->powercontainment_enabled) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); @@ -2706,12 +2898,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) } } - - PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, - VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), - "Error retrieving EVV voltage value!", - continue); - + if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, + VOLTAGE_TYPE_VDDC, + sclk, vv_id, &vddc) != 0) { + printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + continue; + } /* need to make sure vddc is less than 2v or else, it could burn the ASIC. * real voltage level in unit of 0.01mv */ @@ -2968,13 +3160,19 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_hwmgr *data; struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; uint32_t temp_reg; int result; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; data->mclk_dpm0_activity_target = 0xa; @@ -3063,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = POLARIS10_MAX_HARDWARE_POWERLEVELS; @@ -3148,7 +3346,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -3157,7 +3355,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; @@ -3446,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + disable_mclk_switching = (1 < info.display_count) || disable_mclk_switching_for_frame_lock; @@ -3950,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), @@ -3962,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), @@ -4123,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct polaris10_power_state *polaris10_ps) { - int result = 0; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4143,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, polaris10_ps->performance_levels[0].memory_clock, polaris10_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int polaris10_generate_dpm_level_enable_mask( @@ -4226,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); } -static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) { - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_nps = - cast_const_phw_polaris10_power_state(states->pnew_state); - const struct polaris10_power_state *polaris10_cps = - cast_const_phw_polaris10_power_state(states->pcurrent_state); - uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (polaris10_nps->vce_clks.evclk > 0 && - (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) { - - data->smc_state_table.VceBootLevel = + if (!bgate) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + data->smc_state_table.VceBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); + else + data->smc_state_table.VceBootLevel = 0; mm_boot_level_offset = data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); @@ -4257,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << data->smc_state_table.VceBootLevel); - - polaris10_enable_disable_vce_dpm(hwmgr, true); - } else if (polaris10_nps->vce_clks.evclk == 0 && - polaris10_cps != NULL && - polaris10_cps->vce_clks.evclk > 0) - polaris10_enable_disable_vce_dpm(hwmgr, false); } + polaris10_enable_disable_vce_dpm(hwmgr, !bgate); + return 0; } @@ -4353,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4365,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4422,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } + + static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) { int tmp_result, result = 0; @@ -4455,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i "Failed to generate DPM level enabled mask!", result = tmp_result); - tmp_result = polaris10_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update VCE DPM!", - result = tmp_result); - tmp_result = polaris10_update_sclk_threshold(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", @@ -4530,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ polaris10_notify_smc_display_change(hwmgr, false); + return 0; } @@ -4579,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + return 0; } @@ -4820,42 +5008,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr) return result; } -static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -4998,6 +5150,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .backend_init = &polaris10_hwmgr_backend_init, .backend_fini = &polaris10_hwmgr_backend_fini, @@ -5036,22 +5271,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .check_states_equal = polaris10_check_states_equal, .set_fan_control_mode = polaris10_set_fan_control_mode, .get_fan_control_mode = polaris10_get_fan_control_mode, - .get_pp_table = polaris10_get_pp_table, - .set_pp_table = polaris10_set_pp_table, .force_clock_level = polaris10_force_clock_level, .print_clock_levels = polaris10_print_clock_levels, .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, + .get_sclk_od = polaris10_get_sclk_od, + .set_sclk_od = polaris10_set_sclk_od, + .get_mclk_od = polaris10_get_mclk_od, + .set_mclk_od = polaris10_set_mclk_od, }; int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data; - - data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_polaris10_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index afc3434822d1..33c33947e827 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h @@ -309,10 +309,6 @@ struct polaris10_hwmgr { uint32_t up_hyst; uint32_t disable_dpm_mask; bool apply_optimized_settings; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; - uint32_t avfs_vdroop_override_setting; bool apply_avfs_cks_off_voltage; uint32_t frame_time_x2; @@ -356,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); - +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index ae96f14b827c..b9cb240a135d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c @@ -28,10 +28,360 @@ #include "polaris10_smumgr.h" #include "smu74_discrete.h" #include "pp_debug.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "oss/oss_3_0_sh_mask.h" #define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 +uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + +struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ @@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) +{ + + uint32_t en = enable ? 1 : 0; + int32_t result = 0; + uint32_t data; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + DIDTBlock_Info &= ~SQ_Enable_MASK; + DIDTBlock_Info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + DIDTBlock_Info &= ~DB_Enable_MASK; + DIDTBlock_Info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + DIDTBlock_Info &= ~TD_Enable_MASK; + DIDTBlock_Info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + DIDTBlock_Info &= ~TCP_Enable_MASK; + DIDTBlock_Info |= en << TCP_Enable_SHIFT; + } + + if (enable) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); + + return result; +} + +static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, + struct polaris10_pt_config_reg *cac_config_regs) +{ + struct polaris10_pt_config_reg *config_regs = cac_config_regs; + uint32_t cache = 0; + uint32_t data = 0; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == POLARIS10_CONFIGREG_CACHE) + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + else { + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + break; + + default: + data = cgs_read_register(hwmgr->device, config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + + default: + cgs_write_register(hwmgr->device, config_regs->offset, data); + break; + } + cache = 0; + } + + config_regs++; + } + + return 0; +} + +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, value, value2; + struct cgs_system_info sys_info = {0}; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + + if (result == 0) + num_se = sys_info.value; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + + /* TO DO Pre DIDT disable clock gating */ + value = 0; + value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK + | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK + | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); + + if (hwmgr->chip_id == CHIP_POLARIS10) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } + } + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); + + result = polaris10_enable_didt(hwmgr, true); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + +int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + /* TO DO Pre DIDT disable clock gating */ + + result = polaris10_enable_didt(hwmgr, false); + PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + + static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -312,6 +843,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -373,6 +921,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h index 68bc1cb6d40c..bc78e28f010d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h @@ -27,15 +27,37 @@ enum polaris10_pt_config_reg_type { POLARIS10_CONFIGREG_MMR = 0, POLARIS10_CONFIGREG_SMC_IND, POLARIS10_CONFIGREG_DIDT_IND, + POLARIS10_CONFIGREG_GC_CAC_IND, POLARIS10_CONFIGREG_CACHE, POLARIS10_CONFIGREG_MAX }; +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_DTE 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define ixGC_CAC_CNTL 0x0000 +#define ixDIDT_SQ_STALL_CTRL 0x0004 +#define ixDIDT_SQ_TUNING_CTRL 0x0005 +#define ixDIDT_TD_STALL_CTRL 0x0044 +#define ixDIDT_TD_TUNING_CTRL 0x0045 +#define ixDIDT_TCP_STALL_CTRL 0x0064 +#define ixDIDT_TCP_TUNING_CTRL 0x0065 + struct polaris10_pt_config_reg { uint32_t offset; uint32_t mask; @@ -62,9 +84,11 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); - +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr); #endif /* POLARIS10_POWERTUNE_H */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index a3c38bbd1e94..1944d289f846 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c @@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) int result; struct cgs_system_info info = {0}; - if( 0 != acpi_atcs_notify_pcie_device_ready(device)) + if (acpi_atcs_notify_pcie_device_ready(device)) return -EINVAL; info.size = sizeof(struct cgs_system_info); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 90b35c5c10a4..26f3e30d0fef 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770( /* They are both in 10KHz Units. */ engine_clock_parameters.ulTargetEngineClock = - (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; - engine_clock_parameters.ulTargetEngineClock |= - (COMPUTE_ENGINE_PLL_PARAM << 24); + cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) | + ((COMPUTE_ENGINE_PLL_PARAM << 24))); /* in 10 khz units.*/ engine_clock_parameters.sReserved.ulClock = - (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); return cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), &engine_clock_parameters); @@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si( COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; int result; - mpll_parameters.ulClock = (uint32_t) clock_value; + mpll_parameters.ulClock = cpu_to_le32(clock_value); mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); result = cgs_atom_exec_cmd_table @@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si( if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = - mpll_parameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac); mpll_param->mpll_fb_divider.cl_kf = - mpll_parameters.ulFbDiv.usFbDiv; + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv); mpll_param->mpll_post_divider = (uint32_t)mpll_parameters.ucPostDiv; mpll_param->vco_mode = @@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; int result; - mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), @@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; int result; - pll_parameters.ulClock = clock_value; + pll_parameters.ulClock = cpu_to_le32(clock_value); result = cgs_atom_exec_cmd_table (hwmgr->device, @@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; - dividers->real_clock = pll_parameters.ulClock; + dividers->real_clock = le32_to_cpu(pll_parameters.ulClock); } return result; @@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; result = cgs_atom_exec_cmd_table @@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi( COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; - pll_patameters.ulClock.ulClock = clock_value; + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; @@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi( dividers->pll_post_divider = pll_patameters.ulClock.ucPostDiv; dividers->real_clock = - pll_patameters.ulClock.ulClock; + le32_to_cpu(pll_patameters.ulClock.ulClock); dividers->ul_fb_div.ul_fb_div_frac = - pll_patameters.ulFbDiv.usFbDivFrac; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); dividers->ul_fb_div.ul_fb_div = - pll_patameters.ulFbDiv.usFbDiv; + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); dividers->uc_pll_ref_div = pll_patameters.ucPllRefDiv; @@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3( for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { voltage_table->entries[i].value = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; + le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue); voltage_table->entries[i].smio_low = - voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; + le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId); } voltage_table->mask_low = - voltage_object->asGpioVoltageObj.ulGpioMaskVal; + le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal); voltage_table->count = voltage_object->asGpioVoltageObj.ucGpioEntryNum; voltage_table->phase_delay = @@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin( pin_assignment->ucGpioPinBitShift; gpio_pin_assignment->us_gpio_pin_aindex = le16_to_cpu(pin_assignment->usGpioPin_AIndex); - return false; + return true; } offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; } - return true; + return false; } /** @@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin( const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) { - bool bRet = 0; + bool bRet = false; ATOM_GPIO_PIN_LUT *gpio_lookup_table = get_gpio_lookup_table(hwmgr->device); PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), - "Could not find GPIO lookup Table in BIOS.", return -1); + "Could not find GPIO lookup Table in BIOS.", return false); bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, gpio_pin_assignment); @@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk( return -1; if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) + (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && + getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) return -1; /*----------------------------------------------------------- @@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk( switch (dpm_level) { case 1: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); break; case 2: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); break; case 3: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); break; case 4: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); break; case 5: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); break; case 6: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); break; case 7: - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7)); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); break; default: printk(KERN_ERR "DPM Level not supported\n"); fPowerDPMx = Convert_ULONG_ToFraction(1); - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); } /*------------------------- @@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( return result; /* Finally, the actual fuse value */ - ul_RO_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); - fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); + ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); + fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); sCACm_fuse = getASICProfilingInfo->sCACm; @@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); + ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); @@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; - fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); - fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); + ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); + fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); @@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); + ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); @@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); + ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); fRange = fMultiply(fRange, ConvertToFraction(-1)); fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, @@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; - fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); - fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); + ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); + fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, fAverage, fRange, sKv_b_fuse.ucEfuseLength); @@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( if (result) return result; - ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; - fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); - fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); + ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); + fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); + fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); @@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk( * PART 2 - Grabbing all required values *------------------------------------------- */ - fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), + fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), + fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), + fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), + fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), + fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), + fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), + fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), + fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); - fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); - fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); + fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); + fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); + fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); + fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); fMargin_FMAX_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); fMargin_Plat_mean = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_mean, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); fMargin_FMAX_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); fMargin_Plat_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_plat_sigma, 10000); + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); fMargin_DC_sigma = GetScaledFraction( - getASICProfilingInfo->ulMargin_DC_sigma, 100); + le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); @@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk( fSclk = GetScaledFraction(sclk, 100); fV_max = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); - fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); - fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); + le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); + fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); + fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); + fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); fV_FT = fDivide(GetScaledFraction( - getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); fV_min = fDivide(GetScaledFraction( - getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); + le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); /*----------------------- * PART 3 @@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0,fLkg_FT), + fAdd(fMultiply(fSM_A0, fLkg_FT), fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), fAdd(fMultiply(fSM_A3, fSclk), fSubtract(fSM_A7, fRO_fused))))); @@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk( get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; get_voltage_info_param_space.usVoltageLevel = - virtual_voltage_Id; + cpu_to_le16(virtual_voltage_Id); get_voltage_info_param_space.ulSCLKFreq = - sclk; + cpu_to_le32(sclk); + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), + &get_voltage_info_param_space); + + if (0 != result) + return result; + + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); + + return result; +} + +/** + * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. + * @param hwmgr input: pointer to hwManager + * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 + * @param voltage output: real voltage level in unit of mv + */ +int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, + uint16_t virtual_voltage_id, + uint16_t *voltage) +{ + int result; + int entry_id; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ + for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { + /* found */ + break; + } + } + + PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "Can't find requested voltage id in vddc_dependency_on_sclk table!", + return -EINVAL; + ); + + get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; + get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; + get_voltage_info_param_space.ulSCLKFreq = + cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk( if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) - (&get_voltage_info_param_space))->usVoltageLevel; + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + (&get_voltage_info_param_space))->usVoltageLevel); return result; } @@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, if (entry_found) { ssEntry->speed_spectrum_percentage = - ssInfo->usSpreadSpectrumPercentage; - ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; + le16_to_cpu(ssInfo->usSpreadSpectrumPercentage); + ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz); if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || @@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, int result; READ_EFUSE_VALUE_PARAMETER efuse_param; - efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; + efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4); efuse_param.sEfuse.ucBitShift = (uint8_t) (start_index - ((start_index / 32) * 32)); efuse_param.sEfuse.ucBitLength = (uint8_t) @@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), &efuse_param); if (!result) - *efuse = efuse_param.ulEfuseValue & mask; + *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; } int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, - uint8_t level) + uint8_t level) { DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; int result; - memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK; - memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM; + memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = + memory_clock & SET_CLOCK_FREQ_MASK; + memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = + ADJUST_MC_SETTING_PARAM; memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; result = cgs_atom_exec_cmd_table @@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ get_voltage_info_param_space.ucVoltageType = voltage_type; get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; - get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id; - get_voltage_info_param_space.ulSCLKFreq = sclk; + get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); + get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); result = cgs_atom_exec_cmd_table(hwmgr->device, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), @@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ if (0 != result) return result; - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; + *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); return result; } @@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; - table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc; - table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper; - table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower; + table->entry[i].usFcw_pcc = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc); + table->entry[i].usFcw_trans_upper = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper); + table->entry[i].usRcw_trans_lower = + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower); } return 0; } -int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) +int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, + struct pp_atom_ctrl__avfs_parameters *param) { ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; @@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a if (!profile) return -1; - param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; - param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; - param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; - param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; - param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; - param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; - param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; - param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; - param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; - param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; - param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; - param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; - param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; - param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; - param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; - param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; - param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; - param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; + param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0); + param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1); + param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2); + param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma); + param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean); + param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma); + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0); + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1); + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2); + param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0); + param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1); + param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2); + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b); + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1); + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2); + param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b); + param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv); param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; - param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; + param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor); param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index 1e35a9625baf..fc898afce002 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters { extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); +extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h index 009bd5963ed8..8f50a038396c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -50,55 +50,45 @@ typedef union _fInt { * Function Declarations * ------------------------------------------------------------------------------- */ -fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -fInt fNegate(fInt); /* Returns -1 * input fInt value */ -fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -fInt fDivide (fInt A, fInt B); /* Returns A/B */ -fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -int uAbs(int); /* Returns the Absolute value of the Int */ -fInt fAbs(fInt); /* Returns the Absolute value of the fInt */ -int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ +static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ +static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ +static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ +static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ + +static fInt fNegate(fInt); /* Returns -1 * input fInt value */ +static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ +static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ +static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ +static fInt fDivide (fInt A, fInt B); /* Returns A/B */ +static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ +static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ + +static int uAbs(int); /* Returns the Absolute value of the Int */ +static int uPow(int base, int exponent); /* Returns base^exponent an INT */ + +static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ +static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ +static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ + +static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ +static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ /* Fuse decoding functions * ------------------------------------------------------------------------------------- */ -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); /* Internal Support Functions - Use these ONLY for testing or adding to internal functions * ------------------------------------------------------------------------------------- * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. */ -fInt Add (int, int); /* Add two INTs and return Sum as FINT */ -fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */ -fInt Divide (int, int); /* You get the idea... */ -fInt fNegate(fInt); +static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ +static fInt fNegate(fInt); -int uGetScaledDecimal (fInt); /* Internal function */ -int GetReal (fInt A); /* Internal function */ - -/* Future Additions and Incomplete Functions - * ------------------------------------------------------------------------------------- - */ -int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */ - /* Let us say we have 2.126 but can only handle 2 decimal points. We could */ - /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */ +static int uGetScaledDecimal (fInt); /* Internal function */ +static int GetReal (fInt A); /* Internal function */ /* ------------------------------------------------------------------------------------- * TROUBLESHOOTING INFORMATION @@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef * START OF CODE * ------------------------------------------------------------------------------------- */ -fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ +static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ { uint32_t i; bool bNegated = false; @@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ return solution; } -fInt fNaturalLog(fInt value) +static fInt fNaturalLog(fInt value) { uint32_t i; fInt upper_bound = Divide(8, 1000); @@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value) return (fAdd(solution, error_term)); } -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b } -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint return f_decoded_value; } -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) { fInt fLeakage; fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, return fLeakage; } -fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ +static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ { fInt temp; @@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m return temp; } -fInt fNegate(fInt X) +static fInt fNegate(fInt X) { fInt CONSTANT_NEGONE = ConvertToFraction(-1); return (fMultiply(X, CONSTANT_NEGONE)); } -fInt Convert_ULONG_ToFraction(uint32_t X) +static fInt Convert_ULONG_ToFraction(uint32_t X) { fInt temp; @@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X) return temp; } -fInt GetScaledFraction(int X, int factor) +static fInt GetScaledFraction(int X, int factor) { int times_shifted, factor_shifted; bool bNEGATED; @@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor) } /* Addition using two fInts */ -fInt fAdd (fInt X, fInt Y) +static fInt fAdd (fInt X, fInt Y) { fInt Sum; @@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y) } /* Addition using two fInts */ -fInt fSubtract (fInt X, fInt Y) +static fInt fSubtract (fInt X, fInt Y) { fInt Difference; @@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y) return Difference; } -bool Equal(fInt A, fInt B) +static bool Equal(fInt A, fInt B) { if (A.full == B.full) return true; @@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B) return false; } -bool GreaterThan(fInt A, fInt B) +static bool GreaterThan(fInt A, fInt B) { if (A.full > B.full) return true; @@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B) return false; } -fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ +static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ { fInt Product; int64_t tempProduct; @@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ return Product; } -fInt fDivide (fInt X, fInt Y) +static fInt fDivide (fInt X, fInt Y) { fInt fZERO, fQuotient; int64_t longlongX, longlongY; @@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y) return fQuotient; } -int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ +static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ { fInt fullNumber, scaledDecimal, scaledReal; @@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch return fullNumber.full; } -fInt fGetSquare(fInt A) +static fInt fGetSquare(fInt A) { return fMultiply(A,A); } /* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -fInt fSqrt(fInt num) +static fInt fSqrt(fInt num) { fInt F_divide_Fprime, Fprime; fInt test; @@ -460,7 +450,7 @@ fInt fSqrt(fInt num) return (x_new); } -void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) +static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) { fInt *pRoots = &Roots[0]; fInt temp, root_first, root_second; @@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) * ----------------------------------------------------------------------------- */ -/* Addition using two normal ints - Temporary - Use only for testing purposes?. */ -fInt Add (int X, int Y) -{ - fInt A, B, Sum; - - A.full = (X << SHIFT_AMOUNT); - B.full = (Y << SHIFT_AMOUNT); - - Sum.full = A.full + B.full; - - return Sum; -} - /* Conversion Functions */ -int GetReal (fInt A) +static int GetReal (fInt A) { return (A.full >> SHIFT_AMOUNT); } -/* Temporarily Disabled */ -int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */ -{ - /* ROUNDING TEMPORARLY DISABLED - int temp = A.full; - int decimal_cutoff, decimal_mask = 0x000001FF; - decimal_cutoff = temp & decimal_mask; - if (decimal_cutoff > 0x147) { - temp += 673; - }*/ - - return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */ -} - -fInt Multiply (int X, int Y) -{ - fInt A, B, Product; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Product = fMultiply(A, B); - - return Product; -} - -fInt Divide (int X, int Y) +static fInt Divide (int X, int Y) { fInt A, B, Quotient; @@ -555,7 +506,7 @@ fInt Divide (int X, int Y) return Quotient; } -int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ +static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ { int dec[PRECISION]; int i, scaledDecimal = 0, tmp = A.partial.decimal; @@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege return scaledDecimal; } -int uPow(int base, int power) +static int uPow(int base, int power) { if (power == 0) return 1; @@ -578,15 +529,7 @@ int uPow(int base, int power) return (base)*uPow(base, power - 1); } -fInt fAbs(fInt A) -{ - if (A.partial.real < 0) - return (fMultiply(A, ConvertToFraction(-1))); - else - return A; -} - -int uAbs(int X) +static int uAbs(int X) { if (X < 0) return (X * -1); @@ -594,7 +537,7 @@ int uAbs(int X) return X; } -fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) +static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) { fInt solution; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 2f1a14fe05b1..6c321b0d8a1e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( struct pp_hwmgr *hwmgr) { - const void *table_addr = NULL; + const void *table_addr = hwmgr->soft_pp_table; uint8_t frev, crev; uint16_t size; - table_addr = cgs_atom_get_data_table(hwmgr->device, - GetIndexIntoMasterTable(DATA, PowerPlayInfo), - &size, &frev, &crev); + if (!table_addr) { + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); - hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table_size = size; + } return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; } +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time) +{ + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE(NULL != powerplay_tab, + "Missing PowerPlay Table!", return -EINVAL); + + *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime); + *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime); + + return 0; +} int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, unsigned long *num_of_entries) @@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries( const ATOM_PPLIB_VCE_State_Table *vce_table = get_vce_state_table(hwmgr, table); - if (vce_table > 0) + if (vce_table) return vce_table->numEntries; return 0; @@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); - hwmgr->soft_pp_table = NULL; - } - if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h index 30434802417e..baddaa75693b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h @@ -32,16 +32,19 @@ struct pp_hw_power_state; extern const struct pp_table_func pptable_funcs; typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps, - unsigned int index, - const void *clock_info); + struct pp_hw_power_state *hw_ps, + unsigned int index, + const void *clock_info); int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, - unsigned long *num_of_entries); + unsigned long *num_of_entries); int pp_tables_get_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, - struct pp_power_state *ps, - pp_tables_hw_clock_info_callback func); + unsigned long entry_index, + struct pp_power_state *ps, + pp_tables_hw_clock_info_callback func); + +int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, + uint32_t *vol_rep_time, uint32_t *bb_rep_time); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 5d0f655bf160..c7dc111221c2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable SCLK DPM when DPM is disabled", return -1 ); @@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable MCLK DPM when DPM is disabled", return -1 ); @@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) if (0 == data->pcie_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable PCIE DPM when DPM is disabled", return -1 ); @@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable Voltage CNTL when DPM is disabled", return -1 ); @@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", + return -1;); if (0 == data->sclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", + return -1;); if (0 == data->mclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", + return -1;); if (0 == data->pcie_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) uint32_t tmp; int result; - bool error = 0; + bool error = false; result = tonga_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + @@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - data->uvd_power_gated = 0; - data->vce_power_gated = 0; - data->samu_power_gated = 0; - data->acp_power_gated = 0; - data->pg_acp_init = 1; + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + data->acp_power_gated = false; + data->pg_acp_init = true; return 0; } @@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, * whereas voltage control is a fundemental change that will not be disabled */ - return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1); + return (!tonga_is_dpm_running(hwmgr) ? 0 : 1); } /** @@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - if (0 != tonga_is_dpm_running(hwmgr)) { + if (tonga_is_dpm_running(hwmgr)) { /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ if (!data->dpm_table_start) { return 1; @@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, { uint32_t table_size, i, j; uint16_t vvalue; - bool bVoltageFound = 0; + bool bVoltageFound = false; pp_atomctrl_voltage_table *table; PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); @@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, for (i = 0; i < voltage_table->count; i++) { vvalue = voltage_table->entries[i].value; - bVoltageFound = 0; + bVoltageFound = false; for (j = 0; j < table->count; j++) { if (vvalue == table->entries[j].value) { - bVoltageFound = 1; + bVoltageFound = true; break; } } @@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; @@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, } } - return result; + return 0; } @@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level( if ((data->mclk_stutter_mode_threshold != 0) && (memory_clock <= data->mclk_stutter_mode_threshold) && - (data->is_uvd_enabled == 0) + (!data->is_uvd_enabled) && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) && (data->display_timing.num_existing_displays <= 2) && (data->display_timing.num_existing_displays != 0)) @@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table( dpm_table->count = count; for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = 0; + dpm_table->dpm_levels[i].enabled = false; } return 0; @@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) @@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_sclk_table->entries[i].clk) { data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */ data->dpm_table.sclk_table.count++; } } @@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_mclk_table->entries[i].clk) { data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */ data->dpm_table.mclk_table.count++; } } @@ -3026,8 +3028,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment))) { table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot); @@ -3040,8 +3042,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* ACDC Switch GPIO */ reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment))) { table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition); @@ -3063,8 +3065,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) } reg_value = 0; - if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, + if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalOutGPIO); @@ -3135,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { @@ -3150,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { @@ -3261,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); @@ -3336,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); int result = 1; - PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return result); + PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return result); if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( @@ -3742,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) { - bool result = 1; + bool result = true; switch (inReg) { case mmMC_SEQ_RAS_TIMING: @@ -3826,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) break; default: - result = 0; + result = false; break; } @@ -4422,13 +4423,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -4442,7 +4436,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; SMU72_Discrete_DpmTable *table = NULL; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + tonga_hwmgr *data; pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); phw_tonga_ulv_parm *ulv; @@ -4451,7 +4445,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != hwmgr), "Invalid Parameter!", return -1;); - data->dll_defaule_on = 0; + data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + data->dll_defaule_on = false; data->sram_end = SMC_RAM_END; data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; @@ -4557,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* ULV Support*/ ulv = &(data->ulv); - ulv->ulv_supported = 0; + ulv->ulv_supported = false; /* Initalize Dynamic State Adjustment Rule Settings*/ result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); if (result) printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - data->uvd_enabled = 0; + data->uvd_enabled = false; table = &(data->smc_state_table); @@ -4571,7 +4571,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, * Peak Current Control feature is enabled and we should program PCC HW register */ - if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); @@ -4610,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SMU7); - data->vddc_phase_shed_control = 0; + data->vddc_phase_shed_control = false; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); @@ -4629,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } if (0 == result) { - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = TONGA_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -4639,7 +4639,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -4648,7 +4648,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -5310,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5324,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5460,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) { - int result = 0; struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -5480,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe hw_state->performance_levels[0].memory_clock, hw_state->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) @@ -5627,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5640,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -6031,42 +6028,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -6174,11 +6135,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .backend_init = &tonga_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, .asic_setup = &tonga_setup_asic_task, .dynamic_state_management_enable = &tonga_enable_dpm_tasks, + .dynamic_state_management_disable = &tonga_disable_dpm_tasks, .apply_state_adjust_rules = tonga_apply_state_adjust_rules, .force_dpm_level = &tonga_force_dpm_level, .power_state_set = tonga_set_power_state_tasks, @@ -6212,22 +6258,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .check_states_equal = tonga_check_states_equal, .set_fan_control_mode = tonga_set_fan_control_mode, .get_fan_control_mode = tonga_get_fan_control_mode, - .get_pp_table = tonga_get_pp_table, - .set_pp_table = tonga_set_pp_table, .force_clock_level = tonga_force_clock_level, .print_clock_levels = tonga_print_clock_levels, + .get_sclk_od = tonga_get_sclk_od, + .set_sclk_od = tonga_set_sclk_od, + .get_mclk_od = tonga_get_mclk_od, + .set_mclk_od = tonga_set_mclk_od, }; int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) { - tonga_hwmgr *data; - - data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - memset(data, 0x00, sizeof(tonga_hwmgr)); - - hwmgr->backend = data; hwmgr->hwmgr_func = &tonga_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_tonga_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 573cd39fe78d..3961884bfa9b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -352,9 +352,6 @@ struct tonga_hwmgr { bool samu_power_gated; /* 1: gated, 0:not gated */ bool acp_power_gated; /* 1: gated, 0:not gated */ bool pg_acp_init; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; typedef struct tonga_hwmgr tonga_hwmgr; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index dccc859f638c..cfb647f76cbe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -167,8 +167,7 @@ static int get_vddc_lookup_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; - table = (phm_ppt_v1_voltage_lookup_table *) - kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -327,7 +326,7 @@ static int get_valid_clk( table_size = sizeof(uint32_t) + sizeof(uint32_t) * clk_volt_pp_table->count; - table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * mclk_dep_table->ucNumEntries; - mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mclk_table) return -ENOMEM; @@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * tonga_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * polaris_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -504,7 +500,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -541,7 +537,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * mm_dependency_table->ucNumEntries; - mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mm_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mm_table) return -ENOMEM; @@ -1073,13 +1068,9 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - int result = 0; struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (NULL != hwmgr->soft_pp_table) - hwmgr->soft_pp_table = NULL; - kfree(pp_table_information->vdd_dep_on_sclk); pp_table_information->vdd_dep_on_sclk = NULL; @@ -1116,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) kfree(hwmgr->pptable); hwmgr->pptable = NULL; - return result; + return 0; } const struct pp_table_func tonga_pptable_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 50b367d44307..b764c8c05ec8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,6 +132,7 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; + bool powercontainment_enabled; }; enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, @@ -342,6 +343,10 @@ struct amd_powerplay_funcs { int (*set_pp_table)(void *handle, const char *buf, size_t size); int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(void *handle); + int (*set_sclk_od)(void *handle, uint32_t value); + int (*get_mclk_od)(void *handle); + int (*set_mclk_od)(void *handle, uint32_t value); }; struct amd_powerplay { @@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init, int amd_powerplay_fini(void *handle); +int amd_powerplay_reset(void *handle); + int amd_powerplay_display_configuration_change(void *handle, const struct amd_pp_display_configuration *input); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 56f712c7d07a..962cb5385951 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 77e8e33d5870..bf0d2accf7bf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -278,6 +278,8 @@ struct pp_hwmgr_func { int (*dynamic_state_management_enable)( struct pp_hwmgr *hw_mgr); + int (*dynamic_state_management_disable)( + struct pp_hwmgr *hw_mgr); int (*patch_boot_state)(struct pp_hwmgr *hwmgr, struct pp_hw_power_state *hw_ps); @@ -333,11 +335,13 @@ struct pp_hwmgr_func { int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); - int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); - int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); + int (*get_sclk_od)(struct pp_hwmgr *hwmgr); + int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*get_mclk_od)(struct pp_hwmgr *hwmgr); + int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); }; struct pp_table_func { @@ -580,6 +584,7 @@ struct pp_hwmgr { struct pp_smumgr *smumgr; const void *soft_pp_table; uint32_t soft_pp_table_size; + void *hardcode_pp_table; bool need_pp_table_upload; enum amd_dpm_forced_level dpm_level; bool block_hw_access; @@ -609,6 +614,7 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; + bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index fc9e3d1dd409..3c235f0177cd 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle); smum_wait_on_indirect_register(smumgr, \ mm##port##_INDEX, index, value, mask) +#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ index, value, mask) \ @@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle); (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ SMUM_FIELD_SHIFT(reg, field)))) +#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ + SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field) + #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ port, index, value, mask) \ smum_wait_on_indirect_register(smumgr, \ @@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle); SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) + +#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ + cgs_write_ind_register(device, port, ix##reg, \ + SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ + reg, field, fieldval)) + + #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ @@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle); SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ + smum_wait_for_indirect_register_unequal(smumgr, \ + mm##port##_INDEX, index, value, mask) + +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) + +#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ + SMUM_FIELD_MASK(reg, field) ) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 0728c1e3d97a..7723473e51a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -23,6 +23,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) handle->smu_mgr = smumgr; switch (smumgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_smum_init(smumgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { case CHIP_TONGA: tonga_smum_init(smumgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b22722eabafc..f42c536b3af1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(smumgr->backend); uint16_t fw_to_load; - int result = 0; struct SMU_DRAMData_TOC *toc; /** * First time this gets called during SmuMgr init, @@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), "Fail to Request SMU Load uCode", return 0); - return result; + return 0; } static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index c89dc777768f..b961a1c6caf3 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; __entry->sched_job = sched_job; - __entry->fence = &sched_job->s_fence->base; + __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); @@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job, ), TP_fast_assign( - __entry->fence = &fence->base; + __entry->fence = &fence->finished; ), TP_printk("fence=%p signaled", __entry->fence) ); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index c16248cee779..ef312bb75fda 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,6 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(1); + entity->fence_context = fence_context_alloc(2); return 0; } @@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) s_fence = to_amd_sched_fence(fence); if (s_fence && s_fence->sched == sched) { - /* Fence is from the same scheduler */ - if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { - /* Ignore it when it is already scheduled */ - fence_put(entity->dependency); - return false; - } - /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_clear_dep; - list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); - return true; + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = fence_get(&s_fence->scheduled); + fence_put(entity->dependency); + entity->dependency = fence; + if (!fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + fence_put(fence); + return false; } if (!fence_add_callback(entity->dependency, &entity->cb, @@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } -static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); - schedule_work(&job->work_free_job); -} - /* job_finish is called after hw fence signaled, and * the job had already been deleted from ring_mirror_list */ -void amd_sched_job_finish(struct amd_sched_job *s_job) +static void amd_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *next; + struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + finish_work); struct amd_gpu_scheduler *sched = s_job->sched; + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - if (cancel_delayed_work(&s_job->work_tdr)) - amd_sched_job_put(s_job); + struct amd_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node); - if (next) { - INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); - amd_sched_job_get(next); + if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); - } } + spin_unlock(&sched->job_list_lock); + sched->ops->free_job(s_job); } -void amd_sched_job_begin(struct amd_sched_job *s_job) +static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void amd_sched_job_begin(struct amd_sched_job *s_job) { struct amd_gpu_scheduler *sched = s_job->sched; + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) - { - INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); - amd_sched_job_get(s_job); + list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void amd_sched_job_timedout(struct work_struct *work) +{ + struct amd_sched_job *job = container_of(work, struct amd_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + } + } + atomic_set(&sched->hw_rq_count, 0); + spin_unlock(&sched->job_list_lock); +} + +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job, *tmp; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node); + if (s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + struct amd_sched_fence *s_fence = s_job->s_fence; + struct fence *fence; + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); + if (fence) { + s_fence->parent = fence_get(fence); + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } + spin_lock(&sched->job_list_lock); } + spin_unlock(&sched->job_list_lock); } /** @@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - sched_job->use_sched = 1; - fence_add_callback(&sched_job->s_fence->base, - &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); + fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } /* init a sched_job with basic field */ int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref *refcount), - void *owner, struct fence **fence) + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner) { - INIT_LIST_HEAD(&job->node); - kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; - job->s_fence->s_job = job; - job->timeout_callback = timeout_cb; - job->free_callback = free_cb; + INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - if (fence) - *fence = &job->s_fence->base; return 0; } @@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; - unsigned long flags; atomic_dec(&sched->hw_rq_count); - - /* remove job from ring_mirror_list */ - spin_lock_irqsave(&sched->job_list_lock, flags); - list_del_init(&s_fence->s_job->node); - sched->ops->finish_job(s_fence->s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - amd_sched_fence_signal(s_fence); + amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->base); + fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } +static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; @@ -476,14 +544,15 @@ static int amd_sched_main(void *param) sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_entity *entity; + struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, - (entity = amd_sched_select_entity(sched)) || - kthread_should_stop()); + (!amd_sched_blocked(sched) && + (entity = amd_sched_select_entity(sched))) || + kthread_should_stop()); if (!entity) continue; @@ -495,16 +564,19 @@ static int amd_sched_main(void *param) s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - amd_sched_job_pre_schedule(sched, sched_job); + amd_sched_job_begin(sched_job); + fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { + s_fence->parent = fence_get(fence); r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); + DRM_ERROR("fence add callback failed (%d)\n", + r); fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 070095a9433c..7cbbbfb502ef 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,8 +27,6 @@ #include <linux/kfifo.h> #include <linux/fence.h> -#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS - struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,36 +66,34 @@ struct amd_sched_rq { }; struct amd_sched_fence { - struct fence base; + struct fence scheduled; + struct fence finished; struct fence_cb cb; - struct list_head scheduled_cb; + struct fence *parent; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; - struct amd_sched_job *s_job; }; struct amd_sched_job { - struct kref refcount; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - bool use_sched; /* true if the job goes to scheduler */ - struct fence_cb cb_free_job; - struct work_struct work_free_job; - struct list_head node; - struct delayed_work work_tdr; - void (*timeout_callback) (struct work_struct *work); - void (*free_callback)(struct kref *refcount); + struct fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; }; -extern const struct fence_ops amd_sched_fence_ops; +extern const struct fence_ops amd_sched_fence_ops_scheduled; +extern const struct fence_ops amd_sched_fence_ops_finished; static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) { - struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + if (f->ops == &amd_sched_fence_ops_scheduled) + return container_of(f, struct amd_sched_fence, scheduled); - if (__f->base.ops == &amd_sched_fence_ops) - return __f; + if (f->ops == &amd_sched_fence_ops_finished) + return container_of(f, struct amd_sched_fence, finished); return NULL; } @@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) struct amd_sched_backend_ops { struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job); - void (*begin_job)(struct amd_sched_job *sched_job); - void (*finish_job)(struct amd_sched_job *sched_job); + void (*timedout_job)(struct amd_sched_job *sched_job); + void (*free_job)(struct amd_sched_job *sched_job); }; enum amd_sched_priority { @@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_signal(struct amd_sched_fence *fence); +void amd_sched_fence_finished(struct amd_sched_fence *fence); int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref* refcount), - void *owner, struct fence **fence); -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job); -void amd_sched_job_finish(struct amd_sched_job *s_job); -void amd_sched_job_begin(struct amd_sched_job *s_job); -static inline void amd_sched_job_get(struct amd_sched_job *job) { - if (job) - kref_get(&job->refcount); -} - -static inline void amd_sched_job_put(struct amd_sched_job *job) { - if (job) - kref_put(&job->refcount, job->free_callback); -} - + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner); +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched); +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 2a732c490375..6b63beaf7574 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -27,7 +27,8 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, + void *owner) { struct amd_sched_fence *fence = NULL; unsigned seq; @@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity if (fence == NULL) return NULL; - INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; - fence->sched = s_entity->sched; + fence->sched = entity->sched; spin_lock_init(&fence->lock); - seq = atomic_inc_return(&s_entity->fence_seq); - fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, - s_entity->fence_context, seq); + seq = atomic_inc_return(&entity->fence_seq); + fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + fence_init(&fence->finished, &amd_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); return fence; } -void amd_sched_fence_signal(struct amd_sched_fence *fence) +void amd_sched_fence_scheduled(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->base); + int ret = fence_signal(&fence->scheduled); + if (!ret) - FENCE_TRACE(&fence->base, "signaled from irq context\n"); + FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); else - FENCE_TRACE(&fence->base, "was already signaled\n"); -} - -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job) -{ - unsigned long flags; - spin_lock_irqsave(&sched->job_list_lock, flags); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - sched->ops->begin_job(s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); + FENCE_TRACE(&fence->scheduled, "was already signaled\n"); } -void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +void amd_sched_fence_finished(struct amd_sched_fence *fence) { - struct fence_cb *cur, *tmp; + int ret = fence_signal(&fence->finished); - set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); - list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { - list_del_init(&cur->node); - cur->func(&s_fence->base, cur); - } + if (!ret) + FENCE_TRACE(&fence->finished, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->finished, "was already signaled\n"); } static const char *amd_sched_fence_get_driver_name(struct fence *fence) @@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu) { struct fence *f = container_of(rcu, struct fence, rcu); struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(fence->parent); kmem_cache_free(sched_fence_slab, fence); } @@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amd_sched_fence_release(struct fence *f) +static void amd_sched_fence_release_scheduled(struct fence *f) { - call_rcu(&f->rcu, amd_sched_fence_free); + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + call_rcu(&fence->finished.rcu, amd_sched_fence_free); } -const struct fence_ops amd_sched_fence_ops = { +/** + * amd_sched_fence_release_scheduled - drop extra reference + * + * @f: fence + * + * Drop the extra reference from the scheduled fence to the base fence. + */ +static void amd_sched_fence_release_finished(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(&fence->scheduled); +} + +const struct fence_ops amd_sched_fence_ops_scheduled = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = amd_sched_fence_release_scheduled, +}; + +const struct fence_ops amd_sched_fence_ops_finished = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = amd_sched_fence_release, + .release = amd_sched_fence_release_finished, }; diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig index f9a13b658fea..f47d88ba4fa5 100644 --- a/drivers/gpu/drm/arc/Kconfig +++ b/drivers/gpu/drm/arc/Kconfig @@ -2,7 +2,6 @@ config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF select DRM_KMS_CMA_HELPER - select DRM_KMS_FB_HELPER select DRM_KMS_HELPER help Choose this option if you have an ARC PGU controller. diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile index d48fda70f857..73de56a0139a 100644 --- a/drivers/gpu/drm/arc/Makefile +++ b/drivers/gpu/drm/arc/Makefile @@ -1,2 +1,2 @@ -arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o +arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h index 86574b698a78..e8fcf3ab1d9a 100644 --- a/drivers/gpu/drm/arc/arcpgu.h +++ b/drivers/gpu/drm/arc/arcpgu.h @@ -22,7 +22,6 @@ struct arcpgu_drm_private { struct clk *clk; struct drm_fbdev_cma *fbdev; struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; }; @@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu, int arc_pgu_setup_crtc(struct drm_device *dev); int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np); +int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np); struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 92f8beff8e60..ee0a61c2861b 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc, static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &arcpgu->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 76e187a5bde0..6d4ff34737cb 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -28,21 +28,14 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev) { struct arcpgu_drm_private *arcpgu = dev->dev_private; - if (arcpgu->fbdev) - drm_fbdev_cma_hotplug_event(arcpgu->fbdev); -} - -static int arcpgu_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool async) -{ - return drm_atomic_helper_commit(dev, state, false); + drm_fbdev_cma_hotplug_event(arcpgu->fbdev); } static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = arcpgu_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = arcpgu_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void arcpgu_setup_mode_config(struct drm_device *drm) @@ -55,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm) drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs; } -int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma) +static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; @@ -81,22 +74,6 @@ static const struct file_operations arcpgu_drm_ops = { .mmap = arcpgu_gem_mmap, }; -static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file) -{ - struct arcpgu_drm_private *arcpgu = drm->dev_private; - struct drm_pending_vblank_event *e, *t; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) { - if (e->base.file_priv != file) - continue; - list_del(&e->base.link); - e->base.destroy(&e->base); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static void arcpgu_lastclose(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; @@ -122,16 +99,12 @@ static int arcpgu_load(struct drm_device *drm) if (IS_ERR(arcpgu->clk)) return PTR_ERR(arcpgu->clk); - INIT_LIST_HEAD(&arcpgu->event_list); - arcpgu_setup_mode_config(drm); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(arcpgu->regs)) { - dev_err(drm->dev, "Could not remap IO mem\n"); + if (IS_ERR(arcpgu->regs)) return PTR_ERR(arcpgu->regs); - } dev_info(drm->dev, "arc_pgu ID: 0x%x\n", arc_pgu_read(arcpgu, ARCPGU_REG_ID)); @@ -149,15 +122,17 @@ static int arcpgu_load(struct drm_device *drm) /* find the encoder node and initialize it */ encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); - if (!encoder_node) { - dev_err(drm->dev, "failed to get an encoder slave node\n"); - return -ENODEV; + if (encoder_node) { + ret = arcpgu_drm_hdmi_init(drm, encoder_node); + of_node_put(encoder_node); + if (ret < 0) + return ret; + } else { + ret = arcpgu_drm_sim_init(drm, NULL); + if (ret < 0) + return ret; } - ret = arcpgu_drm_hdmi_init(drm, encoder_node); - if (ret < 0) - return ret; - drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); @@ -174,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm) return 0; } -int arcpgu_unload(struct drm_device *drm) +static int arcpgu_unload(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; @@ -192,7 +167,6 @@ int arcpgu_unload(struct drm_device *drm) static struct drm_driver arcpgu_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .preclose = arcpgu_preclose, .lastclose = arcpgu_lastclose, .name = "drm-arcpgu", .desc = "ARC PGU Controller", @@ -207,7 +181,7 @@ static struct drm_driver arcpgu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, @@ -235,15 +209,8 @@ static int arcpgu_probe(struct platform_device *pdev) if (ret) goto err_unload; - ret = drm_connector_register_all(drm); - if (ret) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(drm); - err_unload: arcpgu_unload(drm); @@ -257,7 +224,6 @@ static int arcpgu_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); - drm_connector_unregister_all(drm); drm_dev_unregister(drm); arcpgu_unload(drm); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index 08b6baeb320d..b7a8b2ac4055 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) return sfuncs->get_modes(&slave->base, connector); } -struct drm_encoder * -arcpgu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct drm_encoder_slave *slave; - struct arcpgu_drm_connector *con = - container_of(connector, struct arcpgu_drm_connector, connector); - - slave = con->encoder_slave; - if (slave == NULL) { - dev_err(connector->dev->dev, - "connector_best_encoder: cannot find slave encoder for connector\n"); - return NULL; - } - - return &slave->base; -} - static enum drm_connector_status arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) { @@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs arcpgu_drm_connector_helper_funcs = { .get_modes = arcpgu_drm_connector_get_modes, - .best_encoder = arcpgu_drm_connector_best_encoder, }; static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c new file mode 100644 index 000000000000..2bf06d71556a --- /dev/null +++ b/drivers/gpu/drm/arc/arcpgu_sim.c @@ -0,0 +1,128 @@ +/* + * ARC PGU DRM driver. + * + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder_slave.h> +#include <drm/drm_atomic_helper.h> + +#include "arcpgu.h" + +#define XRES_DEF 640 +#define YRES_DEF 480 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + + +struct arcpgu_drm_connector { + struct drm_connector connector; + struct drm_encoder_slave *encoder_slave; +}; + +static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + return count; +} + +static enum drm_connector_status +arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void arcpgu_drm_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_helper_funcs +arcpgu_drm_connector_helper_funcs = { + .get_modes = arcpgu_drm_connector_get_modes, +}; + +static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = arcpgu_drm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = arcpgu_drm_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np) +{ + struct arcpgu_drm_connector *arcpgu_connector; + struct drm_encoder_slave *encoder; + struct drm_connector *connector; + int ret; + + encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); + if (encoder == NULL) + return -ENOMEM; + + encoder->base.possible_crtcs = 1; + encoder->base.possible_clones = 0; + + ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) + return ret; + + arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), + GFP_KERNEL); + if (!arcpgu_connector) { + ret = -ENOMEM; + goto error_encoder_cleanup; + } + + connector = &arcpgu_connector->connector; + drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); + + ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret < 0) { + dev_err(drm->dev, "failed to initialize drm connector\n"); + goto error_encoder_cleanup; + } + + ret = drm_mode_connector_attach_encoder(connector, &encoder->base); + if (ret < 0) { + dev_err(drm->dev, "could not attach connector to encoder\n"); + drm_connector_unregister(connector); + goto error_connector_cleanup; + } + + arcpgu_connector->encoder_slave = encoder; + + return 0; + +error_connector_cleanup: + drm_connector_cleanup(connector); + +error_encoder_cleanup: + drm_encoder_cleanup(&encoder->base); + return ret; +} diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index eaed454e043c..9a18e1bd57b4 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -9,7 +9,6 @@ config DRM_HDLCD depends on COMMON_CLK select DRM_ARM select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER help Choose this option if you have an ARM High Definition Colour LCD @@ -25,3 +24,19 @@ config DRM_HDLCD_SHOW_UNDERRUN Enable this option to show in red colour the pixels that the HDLCD device did not fetch from framebuffer due to underrun conditions. + +config DRM_MALI_DISPLAY + tristate "ARM Mali Display Processor" + depends on DRM && OF && (ARM || ARM64) + depends on COMMON_CLK + select DRM_ARM + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you want to compile the ARM Mali Display + Processor driver. It supports the DP500, DP550 and DP650 variants + of the hardware. + + If compiled as a module it will be called mali-dp. diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile index 89dcb7bab93a..bb8b158ff90d 100644 --- a/drivers/gpu/drm/arm/Makefile +++ b/drivers/gpu/drm/arm/Makefile @@ -1,2 +1,4 @@ hdlcd-y := hdlcd_drv.o hdlcd_crtc.o obj-$(CONFIG_DRM_HDLCD) += hdlcd.o +mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o +obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 0813c2f06931..48019ae22ddb 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } } -static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ -} - -static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .mode_fixup = hdlcd_crtc_mode_fixup, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .mode_set_nofb = hdlcd_crtc_mode_set_nofb, .enable = hdlcd_crtc_enable, .disable = hdlcd_crtc_disable, - .prepare = hdlcd_crtc_disable, - .commit = hdlcd_crtc_enable, .atomic_check = hdlcd_crtc_atomic_check, .atomic_begin = hdlcd_crtc_atomic_begin, - .atomic_flush = hdlcd_crtc_atomic_flush, }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index a6ca36f0096f..d83b46a30327 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -102,21 +102,14 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm) { struct hdlcd_drm_private *hdlcd = drm->dev_private; - if (hdlcd->fbdev) - drm_fbdev_cma_hotplug_event(hdlcd->fbdev); -} - -static int hdlcd_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool nonblock) -{ - return drm_atomic_helper_commit(dev, state, false); + drm_fbdev_cma_hotplug_event(hdlcd->fbdev); } static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = hdlcd_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = hdlcd_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void hdlcd_setup_mode_config(struct drm_device *drm) @@ -296,7 +289,7 @@ static struct drm_driver hdlcd_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = hdlcd_enable_vblank, .disable_vblank = hdlcd_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c new file mode 100644 index 000000000000..08e6a71f5d05 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -0,0 +1,216 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 driver (crtc operations) + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <linux/clk.h> +#include <video/videomode.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" + +static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + /* + * check that the hardware can drive the required clock rate, + * but skip the check if the clock is meant to be disabled (req_rate = 0) + */ + long rate, req_rate = mode->crtc_clock * 1000; + + if (req_rate) { + rate = clk_round_rate(hwdev->mclk, req_rate); + if (rate < req_rate) { + DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n", + mode->crtc_clock); + return false; + } + + rate = clk_round_rate(hwdev->pxlclk, req_rate); + if (rate != req_rate) { + DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", + req_rate); + return false; + } + } + + return true; +} + +static void malidp_crtc_enable(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct videomode vm; + + drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); + + clk_prepare_enable(hwdev->pxlclk); + + /* mclk needs to be set to the same or higher rate than pxlclk */ + clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); + clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); + + hwdev->modeset(hwdev, &vm); + hwdev->leave_config_mode(hwdev); + drm_crtc_vblank_on(crtc); +} + +static void malidp_crtc_disable(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + drm_crtc_vblank_off(crtc); + hwdev->enter_config_mode(hwdev); + clk_disable_unprepare(hwdev->pxlclk); +} + +static int malidp_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + u32 rot_mem_free, rot_mem_usable; + int rotated_planes = 0; + + /* + * check if there is enough rotation memory available for planes + * that need 90° and 270° rotation. Each plane has set its required + * memory size in the ->plane_check() callback, here we only make + * sure that the sums are less that the total usable memory. + * + * The rotation memory allocation algorithm (for each plane): + * a. If no more rotated planes exist, all remaining rotate + * memory in the bank is available for use by the plane. + * b. If other rotated planes exist, and plane's layer ID is + * DE_VIDEO1, it can use all the memory from first bank if + * secondary rotation memory bank is available, otherwise it can + * use up to half the bank's memory. + * c. If other rotated planes exist, and plane's layer ID is not + * DE_VIDEO1, it can use half of the available memory + * + * Note: this algorithm assumes that the order in which the planes are + * checked always has DE_VIDEO1 plane first in the list if it is + * rotated. Because that is how we create the planes in the first + * place, under current DRM version things work, but if ever the order + * in which drm_atomic_crtc_state_for_each_plane() iterates over planes + * changes, we need to pre-sort the planes before validation. + */ + + /* first count the number of rotated planes */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + if (pstate->rotation & MALIDP_ROTATED_MASK) + rotated_planes++; + } + + rot_mem_free = hwdev->rotation_memory[0]; + /* + * if we have more than 1 plane using rotation memory, use the second + * block of rotation memory as well + */ + if (rotated_planes > 1) + rot_mem_free += hwdev->rotation_memory[1]; + + /* now validate the rotation memory requirements */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(pstate); + + if (pstate->rotation & MALIDP_ROTATED_MASK) { + /* process current plane */ + rotated_planes--; + + if (!rotated_planes) { + /* no more rotated planes, we can use what's left */ + rot_mem_usable = rot_mem_free; + } else { + if ((mp->layer->id != DE_VIDEO1) || + (hwdev->rotation_memory[1] == 0)) + rot_mem_usable = rot_mem_free / 2; + else + rot_mem_usable = hwdev->rotation_memory[0]; + } + + rot_mem_free -= rot_mem_usable; + + if (ms->rotmem_size > rot_mem_usable) + return -EINVAL; + } + } + + return 0; +} + +static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { + .mode_fixup = malidp_crtc_mode_fixup, + .enable = malidp_crtc_enable, + .disable = malidp_crtc_disable, + .atomic_check = malidp_crtc_atomic_check, +}; + +static const struct drm_crtc_funcs malidp_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +int malidp_crtc_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct drm_plane *primary = NULL, *plane; + int ret; + + ret = malidp_de_planes_init(drm); + if (ret < 0) { + DRM_ERROR("Failed to initialise planes\n"); + return ret; + } + + drm_for_each_plane(plane, drm) { + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + primary = plane; + break; + } + } + + if (!primary) { + DRM_ERROR("no primary plane found\n"); + ret = -EINVAL; + goto crtc_cleanup_planes; + } + + ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, + &malidp_crtc_funcs, NULL); + + if (!ret) { + drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); + return 0; + } + +crtc_cleanup_planes: + malidp_de_planes_destroy(drm); + + return ret; +} diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c new file mode 100644 index 000000000000..82171d223f2d --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -0,0 +1,519 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> + +#include "malidp_drv.h" +#include "malidp_regs.h" +#include "malidp_hw.h" + +#define MALIDP_CONF_VALID_TIMEOUT 250 + +/* + * set the "config valid" bit and wait until the hardware acts on it + */ +static int malidp_set_and_wait_config_valid(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + hwdev->set_config_valid(hwdev); + /* don't wait for config_valid flag if we are in config mode */ + if (hwdev->in_config_mode(hwdev)) + return 0; + + ret = wait_event_interruptible_timeout(malidp->wq, + atomic_read(&malidp->config_valid) == 1, + msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); + + return (ret > 0) ? 0 : -ETIMEDOUT; +} + +static void malidp_output_poll_changed(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + + drm_fbdev_cma_hotplug_event(malidp->fbdev); +} + +static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_pending_vblank_event *event; + struct drm_device *drm = state->dev; + struct malidp_drm *malidp = drm->dev_private; + int ret = malidp_set_and_wait_config_valid(drm); + + if (ret) + DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + + event = malidp->crtc.state->event; + if (event) { + malidp->crtc.state->event = NULL; + + spin_lock_irq(&drm->event_lock); + if (drm_crtc_vblank_get(&malidp->crtc) == 0) + drm_crtc_arm_vblank_event(&malidp->crtc, event); + else + drm_crtc_send_vblank_event(&malidp->crtc, event); + spin_unlock_irq(&drm->event_lock); + } + drm_atomic_helper_commit_hw_done(state); +} + +static void malidp_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *drm = state->dev; + + drm_atomic_helper_commit_modeset_disables(drm, state); + drm_atomic_helper_commit_modeset_enables(drm, state); + drm_atomic_helper_commit_planes(drm, state, true); + + malidp_atomic_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); +} + +static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { + .atomic_commit_tail = malidp_atomic_commit_tail, +}; + +static const struct drm_mode_config_funcs malidp_mode_config_funcs = { + .fb_create = drm_fb_cma_create, + .output_poll_changed = malidp_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.vsync_irq); + return 0; +} + +static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.vsync_irq); +} + +static int malidp_init(struct drm_device *drm) +{ + int ret; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + drm_mode_config_init(drm); + + drm->mode_config.min_width = hwdev->min_line_size; + drm->mode_config.min_height = hwdev->min_line_size; + drm->mode_config.max_width = hwdev->max_line_size; + drm->mode_config.max_height = hwdev->max_line_size; + drm->mode_config.funcs = &malidp_mode_config_funcs; + drm->mode_config.helper_private = &malidp_mode_config_helpers; + + ret = malidp_crtc_init(drm); + if (ret) { + drm_mode_config_cleanup(drm); + return ret; + } + + return 0; +} + +static int malidp_irq_init(struct platform_device *pdev) +{ + int irq_de, irq_se, ret = 0; + struct drm_device *drm = dev_get_drvdata(&pdev->dev); + + /* fetch the interrupts from DT */ + irq_de = platform_get_irq_byname(pdev, "DE"); + if (irq_de < 0) { + DRM_ERROR("no 'DE' IRQ specified!\n"); + return irq_de; + } + irq_se = platform_get_irq_byname(pdev, "SE"); + if (irq_se < 0) { + DRM_ERROR("no 'SE' IRQ specified!\n"); + return irq_se; + } + + ret = malidp_de_irq_init(drm, irq_de); + if (ret) + return ret; + + ret = malidp_se_irq_init(drm, irq_se); + if (ret) { + malidp_de_irq_fini(drm); + return ret; + } + + return 0; +} + +static void malidp_lastclose(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + + drm_fbdev_cma_restore_mode(malidp->fbdev); +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = drm_gem_cma_mmap, +}; + +static struct drm_driver malidp_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | + DRIVER_PRIME, + .lastclose = malidp_lastclose, + .get_vblank_counter = drm_vblank_no_hw_counter, + .enable_vblank = malidp_enable_vblank, + .disable_vblank = malidp_disable_vblank, + .gem_free_object_unlocked = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = drm_gem_cma_dumb_create, + .dumb_map_offset = drm_gem_cma_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .fops = &fops, + .name = "mali-dp", + .desc = "ARM Mali Display Processor driver", + .date = "20160106", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id malidp_drm_of_match[] = { + { + .compatible = "arm,mali-dp500", + .data = &malidp_device[MALIDP_500] + }, + { + .compatible = "arm,mali-dp550", + .data = &malidp_device[MALIDP_550] + }, + { + .compatible = "arm,mali-dp650", + .data = &malidp_device[MALIDP_650] + }, + {}, +}; +MODULE_DEVICE_TABLE(of, malidp_drm_of_match); + +#define MAX_OUTPUT_CHANNELS 3 + +static int malidp_bind(struct device *dev) +{ + struct resource *res; + struct drm_device *drm; + struct device_node *ep; + struct malidp_drm *malidp; + struct malidp_hw_device *hwdev; + struct platform_device *pdev = to_platform_device(dev); + /* number of lines for the R, G and B output */ + u8 output_width[MAX_OUTPUT_CHANNELS]; + int ret = 0, i; + u32 version, out_depth = 0; + + malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); + if (!malidp) + return -ENOMEM; + + hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + /* + * copy the associated data from malidp_drm_of_match to avoid + * having to keep a reference to the OF node after binding + */ + memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); + malidp->dev = hwdev; + + INIT_LIST_HEAD(&malidp->event_list); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hwdev->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hwdev->regs)) + return PTR_ERR(hwdev->regs); + + hwdev->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(hwdev->pclk)) + return PTR_ERR(hwdev->pclk); + + hwdev->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(hwdev->aclk)) + return PTR_ERR(hwdev->aclk); + + hwdev->mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(hwdev->mclk)) + return PTR_ERR(hwdev->mclk); + + hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); + if (IS_ERR(hwdev->pxlclk)) + return PTR_ERR(hwdev->pxlclk); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + + drm = drm_dev_alloc(&malidp_driver, dev); + if (!drm) { + ret = -ENOMEM; + goto alloc_fail; + } + + /* Enable APB clock in order to get access to the registers */ + clk_prepare_enable(hwdev->pclk); + /* + * Enable AXI clock and main clock so that prefetch can start once + * the registers are set + */ + clk_prepare_enable(hwdev->aclk); + clk_prepare_enable(hwdev->mclk); + + ret = hwdev->query_hw(hwdev); + if (ret) { + DRM_ERROR("Invalid HW configuration\n"); + goto query_hw_fail; + } + + version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); + DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, + (version >> 12) & 0xf, (version >> 8) & 0xf); + + /* set the number of lines used for output of RGB data */ + ret = of_property_read_u8_array(dev->of_node, + "arm,malidp-output-port-lines", + output_width, MAX_OUTPUT_CHANNELS); + if (ret) + goto query_hw_fail; + + for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) + out_depth = (out_depth << 8) | (output_width[i] & 0xf); + malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); + + drm->dev_private = malidp; + dev_set_drvdata(dev, drm); + atomic_set(&malidp->config_valid, 0); + init_waitqueue_head(&malidp->wq); + + ret = malidp_init(drm); + if (ret < 0) + goto init_fail; + + ret = drm_dev_register(drm, 0); + if (ret) + goto register_fail; + + /* Set the CRTC's port so that the encoder component can find it */ + ep = of_graph_get_next_endpoint(dev->of_node, NULL); + if (!ep) { + ret = -EINVAL; + goto port_fail; + } + malidp->crtc.port = of_get_next_parent(ep); + + ret = component_bind_all(dev, drm); + if (ret) { + DRM_ERROR("Failed to bind all components\n"); + goto bind_fail; + } + + ret = malidp_irq_init(pdev); + if (ret < 0) + goto irq_init_fail; + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + DRM_ERROR("failed to initialise vblank\n"); + goto vblank_fail; + } + + drm_mode_config_reset(drm); + + malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + + if (IS_ERR(malidp->fbdev)) { + ret = PTR_ERR(malidp->fbdev); + malidp->fbdev = NULL; + goto fbdev_fail; + } + + drm_kms_helper_poll_init(drm); + return 0; + +fbdev_fail: + drm_vblank_cleanup(drm); +vblank_fail: + malidp_se_irq_fini(drm); + malidp_de_irq_fini(drm); +irq_init_fail: + component_unbind_all(dev, drm); +bind_fail: + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; +port_fail: + drm_dev_unregister(drm); +register_fail: + malidp_de_planes_destroy(drm); + drm_mode_config_cleanup(drm); +init_fail: + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); +query_hw_fail: + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + drm_dev_unref(drm); +alloc_fail: + of_reserved_mem_device_release(dev); + + return ret; +} + +static void malidp_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + if (malidp->fbdev) { + drm_fbdev_cma_fini(malidp->fbdev); + malidp->fbdev = NULL; + } + drm_kms_helper_poll_fini(drm); + malidp_se_irq_fini(drm); + malidp_de_irq_fini(drm); + drm_vblank_cleanup(drm); + component_unbind_all(dev, drm); + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; + drm_dev_unregister(drm); + malidp_de_planes_destroy(drm); + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + drm_dev_unref(drm); + of_reserved_mem_device_release(dev); +} + +static const struct component_master_ops malidp_master_ops = { + .bind = malidp_bind, + .unbind = malidp_unbind, +}; + +static int malidp_compare_dev(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static int malidp_platform_probe(struct platform_device *pdev) +{ + struct device_node *port, *ep; + struct component_match *match = NULL; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* there is only one output port inside each device, find it */ + ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (!ep) + return -ENODEV; + + if (!of_device_is_available(ep)) { + of_node_put(ep); + return -ENODEV; + } + + /* add the remote encoder port as component */ + port = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (!port || !of_device_is_available(port)) { + of_node_put(port); + return -EAGAIN; + } + + component_match_add(&pdev->dev, &match, malidp_compare_dev, port); + return component_master_add_with_match(&pdev->dev, &malidp_master_ops, + match); +} + +static int malidp_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &malidp_master_ops); + return 0; +} + +static struct platform_driver malidp_platform_driver = { + .probe = malidp_platform_probe, + .remove = malidp_platform_remove, + .driver = { + .name = "mali-dp", + .of_match_table = malidp_drm_of_match, + }, +}; + +module_platform_driver(malidp_platform_driver); + +MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); +MODULE_DESCRIPTION("ARM Mali DP DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h new file mode 100644 index 000000000000..95558fde214b --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -0,0 +1,54 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures + */ + +#ifndef __MALIDP_DRV_H__ +#define __MALIDP_DRV_H__ + +#include <linux/mutex.h> +#include <linux/wait.h> +#include "malidp_hw.h" + +struct malidp_drm { + struct malidp_hw_device *dev; + struct drm_fbdev_cma *fbdev; + struct list_head event_list; + struct drm_crtc crtc; + wait_queue_head_t wq; + atomic_t config_valid; +}; + +#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) + +struct malidp_plane { + struct drm_plane base; + struct malidp_hw_device *hwdev; + const struct malidp_layer *layer; +}; + +struct malidp_plane_state { + struct drm_plane_state base; + + /* size of the required rotation memory if plane is rotated */ + u32 rotmem_size; +}; + +#define to_malidp_plane(x) container_of(x, struct malidp_plane, base) +#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base) + +int malidp_de_planes_init(struct drm_device *drm); +void malidp_de_planes_destroy(struct drm_device *drm); +int malidp_crtc_init(struct drm_device *drm); + +/* often used combination of rotational bits */ +#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)) + +#endif /* __MALIDP_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c new file mode 100644 index 000000000000..a6132f1d58c1 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -0,0 +1,691 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where + * the difference between various versions of the hardware is being dealt with + * in an attempt to provide to the rest of the driver code a unified view + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <drm/drmP.h> +#include <video/videomode.h> +#include <video/display_timing.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" + +static const struct malidp_input_format malidp500_de_formats[] = { + /* fourcc, layers supporting the format, internal id */ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 }, + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 }, + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 }, + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 }, + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 }, + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 }, + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 }, + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 }, + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 }, + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 }, + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 }, + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 }, + { DRM_FORMAT_UYVY, DE_VIDEO1, 12 }, + { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, + { DRM_FORMAT_NV12, DE_VIDEO1, 14 }, + { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, +}; + +#define MALIDP_ID(__group, __format) \ + ((((__group) & 0x7) << 3) | ((__format) & 0x7)) + +#define MALIDP_COMMON_FORMATS \ + /* fourcc, layers supporting the format, internal id */ \ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \ + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \ + { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \ + { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \ + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \ + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \ + { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \ + { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \ + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \ + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \ + { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \ + { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \ + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \ + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \ + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \ + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ + { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ + { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ + { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ + { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) } + +static const struct malidp_input_format malidp550_de_formats[] = { + MALIDP_COMMON_FORMATS, +}; + +static const struct malidp_layer malidp500_layers[] = { + { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE }, + { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE }, + { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE }, +}; + +static const struct malidp_layer malidp550_layers[] = { + { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE }, + { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE }, + { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE }, +}; + +#define MALIDP_DE_DEFAULT_PREFETCH_START 5 + +static int malidp500_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID); + /* bit 4 of the CONFIG_ID register holds the line size multiplier */ + u8 ln_size_mult = conf & 0x10 ? 2 : 1; + + hwdev->min_line_size = 2; + hwdev->max_line_size = SZ_2K * ln_size_mult; + hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult; + hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */ + + return 0; +} + +static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp500_set_config_valid(struct malidp_hw_device *hwdev) +{ + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); +} + +static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = 0; + + malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP500_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP500_VSYNCPOL; + val |= MALIDP_DE_DEFAULT_PREFETCH_START; + malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL); + + /* + * Mali-DP500 encodes the background color like this: + * - red @ MALIDP500_BGND_COLOR[12:0] + * - green @ MALIDP500_BGND_COLOR[27:16] + * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0] + */ + val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) | + (MALIDP_BGND_COLOR_R & 0xfff); + malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR); + malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +{ + unsigned int depth; + int bpp; + + /* RGB888 or BGR888 can't be rotated */ + if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888)) + return -EINVAL; + + /* + * Each layer needs enough rotation memory to fit 8 lines + * worth of pixel data. Required size is then: + * size = rotated_width * (bpp / 8) * 8; + */ + drm_fb_get_bpp_depth(fmt, &depth, &bpp); + + return w * bpp; +} + +static int malidp550_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 2; + + switch (ln_size) { + case 0: + hwdev->max_line_size = SZ_2K; + /* two banks of 64KB for rotation memory */ + rsize = 64; + break; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 2: + hwdev->max_line_size = 1280; + /* two banks of 40KB for rotation memory */ + rsize = 40; + break; + case 3: + /* reserved value */ + hwdev->max_line_size = 0; + return -EINVAL; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp550_set_config_valid(struct malidp_hw_device *hwdev) +{ + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); +} + +static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = MALIDP_DE_DEFAULT_PREFETCH_START; + + malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL); + /* + * Mali-DP550 and Mali-DP650 encode the background color like this: + * - red @ MALIDP550_DE_BGND_COLOR[23:16] + * - green @ MALIDP550_DE_BGND_COLOR[15:8] + * - blue @ MALIDP550_DE_BGND_COLOR[7:0] + * + * We need to truncate the least significant 4 bits from the default + * MALIDP_BGND_COLOR_x values + */ + val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) | + (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) | + ((MALIDP_BGND_COLOR_B >> 4) & 0xff); + malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP550_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP550_VSYNCPOL; + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +{ + u32 bytes_per_col; + + /* raw RGB888 or BGR888 can't be rotated */ + if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888)) + return -EINVAL; + + switch (fmt) { + /* 8 lines at 4 bytes per pixel */ + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YUYV: + bytes_per_col = 32; + break; + /* 16 lines at 1.5 bytes per pixel */ + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420: + bytes_per_col = 24; + break; + default: + return -EINVAL; + } + + return w * bytes_per_col; +} + +static int malidp650_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 4; + + switch (ln_size) { + case 0: + case 2: + /* reserved values */ + hwdev->max_line_size = 0; + return -EINVAL; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 3: + hwdev->max_line_size = 2560; + /* two banks of 80KB for rotation memory */ + rsize = 80; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { + [MALIDP_500] = { + .map = { + .se_base = MALIDP500_SE_BASE, + .dc_base = MALIDP500_DC_BASE, + .out_depth_base = MALIDP500_OUTPUT_DEPTH, + .features = 0, /* no CLEARIRQ register */ + .n_layers = ARRAY_SIZE(malidp500_layers), + .layers = malidp500_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP500_DE_IRQ_AXI_ERR | + MALIDP500_DE_IRQ_VSYNC | + MALIDP500_DE_IRQ_GLOBAL, + .vsync_irq = MALIDP500_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, + .vsync_irq = 0, + }, + .dc_irq_map = { + .irq_mask = MALIDP500_DE_IRQ_CONF_VALID, + .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID, + }, + .input_formats = malidp500_de_formats, + .n_input_formats = ARRAY_SIZE(malidp500_de_formats), + }, + .query_hw = malidp500_query_hw, + .enter_config_mode = malidp500_enter_config_mode, + .leave_config_mode = malidp500_leave_config_mode, + .in_config_mode = malidp500_in_config_mode, + .set_config_valid = malidp500_set_config_valid, + .modeset = malidp500_modeset, + .rotmem_required = malidp500_rotmem_required, + }, + [MALIDP_550] = { + .map = { + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .n_layers = ARRAY_SIZE(malidp550_layers), + .layers = malidp550_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW | + MALIDP550_SE_IRQ_AXI_ERR, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .input_formats = malidp550_de_formats, + .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + }, + .query_hw = malidp550_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp550_rotmem_required, + }, + [MALIDP_650] = { + .map = { + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .n_layers = ARRAY_SIZE(malidp550_layers), + .layers = malidp550_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP650_DE_IRQ_DRIFT | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW | + MALIDP550_SE_IRQ_AXI_ERR, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .input_formats = malidp550_de_formats, + .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + }, + .query_hw = malidp650_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp550_rotmem_required, + }, +}; + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format) +{ + unsigned int i; + + for (i = 0; i < map->n_input_formats; i++) { + if (((map->input_formats[i].layer & layer_id) == layer_id) && + (map->input_formats[i].format == format)) + return map->input_formats[i].id; + } + + return MALIDP_INVALID_FORMAT_ID; +} + +static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) + malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); + else + malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); +} + +static irqreturn_t malidp_de_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev; + const struct malidp_irq_map *de; + u32 status, mask, dc_status; + irqreturn_t ret = IRQ_NONE; + + if (!drm->dev_private) + return IRQ_HANDLED; + + hwdev = malidp->dev; + de = &hwdev->map.de_irq_map; + + /* first handle the config valid IRQ */ + dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if (dc_status & hwdev->map.dc_irq_map.vsync_irq) { + /* we have a page flip event */ + atomic_set(&malidp->config_valid, 1); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); + ret = IRQ_WAKE_THREAD; + } + + status = malidp_hw_read(hwdev, MALIDP_REG_STATUS); + if (!(status & de->irq_mask)) + return ret; + + mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ); + status &= mask; + if (status & de->vsync_irq) + drm_crtc_handle_vblank(&malidp->crtc); + + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status); + + return (ret == IRQ_NONE) ? IRQ_HANDLED : ret; +} + +static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + + wake_up(&malidp->wq); + + return IRQ_HANDLED; +} + +int malidp_de_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq, + malidp_de_irq_thread_handler, + IRQF_SHARED, "malidp-de", drm); + if (ret < 0) { + DRM_ERROR("failed to install DE IRQ handler\n"); + return ret; + } + + /* first enable the DC block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->map.dc_irq_map.irq_mask); + + /* now enable the DE block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.irq_mask); + + return 0; +} + +void malidp_de_irq_fini(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.irq_mask); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->map.dc_irq_map.irq_mask); +} + +static irqreturn_t malidp_se_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + u32 status, mask; + + status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); + if (!(status & hwdev->map.se_irq_map.irq_mask)) + return IRQ_NONE; + + mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ); + status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); + status &= mask; + /* ToDo: status decoding and firing up of VSYNC and page flip events */ + + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status); + + return IRQ_HANDLED; +} + +static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg) +{ + return IRQ_HANDLED; +} + +int malidp_se_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq, + malidp_se_irq_thread_handler, + IRQF_SHARED, "malidp-se", drm); + if (ret < 0) { + DRM_ERROR("failed to install SE IRQ handler\n"); + return ret; + } + + malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->map.se_irq_map.irq_mask); + + return 0; +} + +void malidp_se_irq_fini(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->map.se_irq_map.irq_mask); +} diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h new file mode 100644 index 000000000000..141743e9f3a6 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -0,0 +1,241 @@ +/* + * + * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP hardware manipulation routines. + */ + +#ifndef __MALIDP_HW_H__ +#define __MALIDP_HW_H__ + +#include <linux/bitops.h> +#include "malidp_regs.h" + +struct videomode; +struct clk; + +/* Mali DP IP blocks */ +enum { + MALIDP_DE_BLOCK = 0, + MALIDP_SE_BLOCK, + MALIDP_DC_BLOCK +}; + +/* Mali DP layer IDs */ +enum { + DE_VIDEO1 = BIT(0), + DE_GRAPHICS1 = BIT(1), + DE_GRAPHICS2 = BIT(2), /* used only in DP500 */ + DE_VIDEO2 = BIT(3), + DE_SMART = BIT(4), +}; + +struct malidp_input_format { + u32 format; /* DRM fourcc */ + u8 layer; /* bitmask of layers supporting it */ + u8 id; /* used internally */ +}; + +#define MALIDP_INVALID_FORMAT_ID 0xff + +/* + * hide the differences between register maps + * by using a common structure to hold the + * base register offsets + */ + +struct malidp_irq_map { + u32 irq_mask; /* mask of IRQs that can be enabled in the block */ + u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */ +}; + +struct malidp_layer { + u16 id; /* layer ID */ + u16 base; /* address offset for the register bank */ + u16 ptr; /* address offset for the pointer register */ +}; + +/* regmap features */ +#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) + +struct malidp_hw_regmap { + /* address offset of the DE register bank */ + /* is always 0x0000 */ + /* address offset of the SE registers bank */ + const u16 se_base; + /* address offset of the DC registers bank */ + const u16 dc_base; + + /* address offset for the output depth register */ + const u16 out_depth_base; + + /* bitmap with register map features */ + const u8 features; + + /* list of supported layers */ + const u8 n_layers; + const struct malidp_layer *layers; + + const struct malidp_irq_map de_irq_map; + const struct malidp_irq_map se_irq_map; + const struct malidp_irq_map dc_irq_map; + + /* list of supported input formats for each layer */ + const struct malidp_input_format *input_formats; + const u8 n_input_formats; +}; + +struct malidp_hw_device { + const struct malidp_hw_regmap map; + void __iomem *regs; + + /* APB clock */ + struct clk *pclk; + /* AXI clock */ + struct clk *aclk; + /* main clock for display core */ + struct clk *mclk; + /* pixel clock for display core */ + struct clk *pxlclk; + + /* + * Validate the driver instance against the hardware bits + */ + int (*query_hw)(struct malidp_hw_device *hwdev); + + /* + * Set the hardware into config mode, ready to accept mode changes + */ + void (*enter_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Tell hardware to exit configuration mode + */ + void (*leave_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Query if hardware is in configuration mode + */ + bool (*in_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Set configuration valid flag for hardware parameters that can + * be changed outside the configuration mode. Hardware will use + * the new settings when config valid is set after the end of the + * current buffer scanout + */ + void (*set_config_valid)(struct malidp_hw_device *hwdev); + + /* + * Set a new mode in hardware. Requires the hardware to be in + * configuration mode before this function is called. + */ + void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m); + + /* + * Calculate the required rotation memory given the active area + * and the buffer format. + */ + int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); + + u8 features; + + u8 min_line_size; + u16 max_line_size; + + /* size of memory used for rotating layers, up to two banks available */ + u32 rotation_memory[2]; +}; + +/* Supported variants of the hardware */ +enum { + MALIDP_500 = 0, + MALIDP_550, + MALIDP_650, + /* keep the next entry last */ + MALIDP_MAX_DEVICES +}; + +extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES]; + +static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) +{ + return readl(hwdev->regs + reg); +} + +static inline void malidp_hw_write(struct malidp_hw_device *hwdev, + u32 value, u32 reg) +{ + writel(value, hwdev->regs + reg); +} + +static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data |= mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data &= ~mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev, + u8 block) +{ + switch (block) { + case MALIDP_SE_BLOCK: + return hwdev->map.se_base; + case MALIDP_DC_BLOCK: + return hwdev->map.dc_base; + } + + return 0; +} + +static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +int malidp_de_irq_init(struct drm_device *drm, int irq); +void malidp_de_irq_fini(struct drm_device *drm); +int malidp_se_irq_init(struct drm_device *drm, int irq); +void malidp_se_irq_fini(struct drm_device *drm); + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format); + +/* + * background color components are defined as 12bits values, + * they will be shifted right when stored on hardware that + * supports only 8bits per channel + */ +#define MALIDP_BGND_COLOR_R 0x000 +#define MALIDP_BGND_COLOR_G 0x000 +#define MALIDP_BGND_COLOR_B 0x000 + +#endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c new file mode 100644 index 000000000000..725098d6179a --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -0,0 +1,298 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP plane manipulation routines. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> + +#include "malidp_hw.h" +#include "malidp_drv.h" + +/* Layer specific register offsets */ +#define MALIDP_LAYER_FORMAT 0x000 +#define MALIDP_LAYER_CONTROL 0x004 +#define LAYER_ENABLE (1 << 0) +#define LAYER_ROT_OFFSET 8 +#define LAYER_H_FLIP (1 << 10) +#define LAYER_V_FLIP (1 << 11) +#define LAYER_ROT_MASK (0xf << 8) +#define MALIDP_LAYER_SIZE 0x00c +#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0) +#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) +#define MALIDP_LAYER_COMP_SIZE 0x010 +#define MALIDP_LAYER_OFFSET 0x014 +#define MALIDP_LAYER_STRIDE 0x018 + +static void malidp_de_plane_destroy(struct drm_plane *plane) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + if (mp->base.fb) + drm_framebuffer_unreference(mp->base.fb); + + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); + devm_kfree(plane->dev->dev, mp); +} + +struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) +{ + struct malidp_plane_state *state, *m_state; + + if (!plane->state) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) { + m_state = to_malidp_plane_state(plane->state); + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + state->rotmem_size = m_state->rotmem_size; + } + + return &state->base; +} + +void malidp_destroy_plane_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane_state *m_state = to_malidp_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(m_state); +} + +static const struct drm_plane_funcs malidp_de_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = malidp_de_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = malidp_duplicate_plane_state, + .atomic_destroy_state = malidp_destroy_plane_state, +}; + +static int malidp_de_plane_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(state); + u8 format_id; + u32 src_w, src_h; + + if (!state->crtc || !state->fb) + return 0; + + format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id, + state->fb->pixel_format); + if (format_id == MALIDP_INVALID_FORMAT_ID) + return -EINVAL; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + if ((state->crtc_w > mp->hwdev->max_line_size) || + (state->crtc_h > mp->hwdev->max_line_size) || + (state->crtc_w < mp->hwdev->min_line_size) || + (state->crtc_h < mp->hwdev->min_line_size) || + (state->crtc_w != src_w) || (state->crtc_h != src_h)) + return -EINVAL; + + /* packed RGB888 / BGR888 can't be rotated or flipped */ + if (state->rotation != BIT(DRM_ROTATE_0) && + (state->fb->pixel_format == DRM_FORMAT_RGB888 || + state->fb->pixel_format == DRM_FORMAT_BGR888)) + return -EINVAL; + + ms->rotmem_size = 0; + if (state->rotation & MALIDP_ROTATED_MASK) { + int val; + + val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h, + state->crtc_w, + state->fb->pixel_format); + if (val < 0) + return val; + + ms->rotmem_size = val; + } + + return 0; +} + +static void malidp_de_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_gem_cma_object *obj; + struct malidp_plane *mp; + const struct malidp_hw_regmap *map; + u8 format_id; + u16 ptr; + u32 format, src_w, src_h, dest_w, dest_h, val = 0; + int num_planes, i; + + mp = to_malidp_plane(plane); + + map = &mp->hwdev->map; + format = plane->state->fb->pixel_format; + format_id = malidp_hw_get_format_id(map, mp->layer->id, format); + num_planes = drm_format_num_planes(format); + + /* convert src values from Q16 fixed point to integer */ + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + if (plane->state->rotation & MALIDP_ROTATED_MASK) { + dest_w = plane->state->crtc_h; + dest_h = plane->state->crtc_w; + } else { + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; + } + + malidp_hw_write(mp->hwdev, format_id, mp->layer->base); + + for (i = 0; i < num_planes; i++) { + /* calculate the offset for the layer's plane registers */ + ptr = mp->layer->ptr + (i << 4); + + obj = drm_fb_cma_get_gem_obj(plane->state->fb, i); + malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr); + malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4); + malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i], + mp->layer->base + MALIDP_LAYER_STRIDE); + } + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP_LAYER_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h), + mp->layer->base + MALIDP_LAYER_COMP_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) | + LAYER_V_VAL(plane->state->crtc_y), + mp->layer->base + MALIDP_LAYER_OFFSET); + + /* first clear the rotation bits in the register */ + malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK, + mp->layer->base + MALIDP_LAYER_CONTROL); + + /* setup the rotation and axis flip bits */ + if (plane->state->rotation & DRM_ROTATE_MASK) + val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET; + if (plane->state->rotation & BIT(DRM_REFLECT_X)) + val |= LAYER_V_FLIP; + if (plane->state->rotation & BIT(DRM_REFLECT_Y)) + val |= LAYER_H_FLIP; + + /* set the 'enable layer' bit */ + val |= LAYER_ENABLE; + + malidp_hw_setbits(mp->hwdev, val, + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static void malidp_de_plane_disable(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE, + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = { + .atomic_check = malidp_de_plane_check, + .atomic_update = malidp_de_plane_update, + .atomic_disable = malidp_de_plane_disable, +}; + +int malidp_de_planes_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->map; + struct malidp_plane *plane = NULL; + enum drm_plane_type plane_type; + unsigned long crtcs = 1 << drm->mode_config.num_crtc; + u32 *formats; + int ret, i, j, n; + + formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL); + if (!formats) { + ret = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < map->n_layers; i++) { + u8 id = map->layers[i].id; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) { + ret = -ENOMEM; + goto cleanup; + } + + /* build the list of DRM supported formats based on the map */ + for (n = 0, j = 0; j < map->n_input_formats; j++) { + if ((map->input_formats[j].layer & id) == id) + formats[n++] = map->input_formats[j].format; + } + + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + ret = drm_universal_plane_init(drm, &plane->base, crtcs, + &malidp_de_plane_funcs, formats, + n, plane_type, NULL); + if (ret < 0) + goto cleanup; + + if (!drm->mode_config.rotation_property) { + unsigned long flags = BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_90) | + BIT(DRM_ROTATE_180) | + BIT(DRM_ROTATE_270) | + BIT(DRM_REFLECT_X) | + BIT(DRM_REFLECT_Y); + drm->mode_config.rotation_property = + drm_mode_create_rotation_property(drm, flags); + } + /* SMART layer can't be rotated */ + if (drm->mode_config.rotation_property && (id != DE_SMART)) + drm_object_attach_property(&plane->base.base, + drm->mode_config.rotation_property, + BIT(DRM_ROTATE_0)); + + drm_plane_helper_add(&plane->base, + &malidp_de_plane_helper_funcs); + plane->hwdev = malidp->dev; + plane->layer = &map->layers[i]; + } + + kfree(formats); + + return 0; + +cleanup: + malidp_de_planes_destroy(drm); + kfree(formats); + + return ret; +} + +void malidp_de_planes_destroy(struct drm_device *drm) +{ + struct drm_plane *p, *pt; + + list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) { + drm_plane_cleanup(p); + kfree(p); + } +} diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h new file mode 100644 index 000000000000..73fecb38f955 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -0,0 +1,172 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 registers definition. + */ + +#ifndef __MALIDP_REGS_H__ +#define __MALIDP_REGS_H__ + +/* + * abbreviations used: + * - DC - display core (general settings) + * - DE - display engine + * - SE - scaling engine + */ + +/* interrupt bit masks */ +#define MALIDP_DE_IRQ_UNDERRUN (1 << 0) + +#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4) +#define MALIDP500_DE_IRQ_VSYNC (1 << 5) +#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6) +#define MALIDP500_DE_IRQ_SATURATION (1 << 7) +#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8) +#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11) +#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19) +#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24) +#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_DE_IRQ_GLOBAL (1 << 31) +#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0) +#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4) +#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5) +#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8) +#define MALIDP500_SE_IRQ_OVERRUN (1 << 9) +#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12) +#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13) +#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_SE_IRQ_GLOBAL (1 << 31) + +#define MALIDP550_DE_IRQ_SATURATION (1 << 8) +#define MALIDP550_DE_IRQ_VSYNC (1 << 12) +#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13) +#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_SE_IRQ_EOW (1 << 0) +#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0) +#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4) +#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16) +#define MALIDP550_DC_IRQ_DE (1 << 20) +#define MALIDP550_DC_IRQ_SE (1 << 24) + +#define MALIDP650_DE_IRQ_DRIFT (1 << 4) + +/* bit masks that are common between products */ +#define MALIDP_CFG_VALID (1 << 0) +#define MALIDP_DISP_FUNC_ILACED (1 << 8) + +/* register offsets for IRQ management */ +#define MALIDP_REG_STATUS 0x00000 +#define MALIDP_REG_SETIRQ 0x00004 +#define MALIDP_REG_MASKIRQ 0x00008 +#define MALIDP_REG_CLEARIRQ 0x0000c + +/* register offsets */ +#define MALIDP_DE_CORE_ID 0x00018 +#define MALIDP_DE_DISPLAY_FUNC 0x00020 + +/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */ +#define MALIDP_DE_H_TIMINGS 0x0 +#define MALIDP_DE_V_TIMINGS 0x4 +#define MALIDP_DE_SYNC_WIDTH 0x8 +#define MALIDP_DE_HV_ACTIVE 0xc + +/* macros to set values into registers */ +#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16) +#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0) +#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0) +#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0) +#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16) + +/* register offsets and bits specific to DP500 */ +#define MALIDP500_DC_BASE 0x00000 +#define MALIDP500_DC_CONTROL 0x0000c +#define MALIDP500_DC_CONFIG_REQ (1 << 17) +#define MALIDP500_HSYNCPOL (1 << 20) +#define MALIDP500_VSYNCPOL (1 << 21) +#define MALIDP500_DC_CLEAR_MASK 0x300fff +#define MALIDP500_DE_LINE_COUNTER 0x00010 +#define MALIDP500_DE_AXI_CONTROL 0x00014 +#define MALIDP500_DE_SECURE_CTRL 0x0001c +#define MALIDP500_DE_CHROMA_KEY 0x00024 +#define MALIDP500_TIMINGS_BASE 0x00028 + +#define MALIDP500_CONFIG_3D 0x00038 +#define MALIDP500_BGND_COLOR 0x0003c +#define MALIDP500_OUTPUT_DEPTH 0x00044 +#define MALIDP500_YUV_RGB_COEF 0x00048 +#define MALIDP500_COLOR_ADJ_COEF 0x00078 +#define MALIDP500_COEF_TABLE_ADDR 0x000a8 +#define MALIDP500_COEF_TABLE_DATA 0x000ac +#define MALIDP500_DE_LV_BASE 0x00100 +#define MALIDP500_DE_LV_PTR_BASE 0x00124 +#define MALIDP500_DE_LG1_BASE 0x00200 +#define MALIDP500_DE_LG1_PTR_BASE 0x0021c +#define MALIDP500_DE_LG2_BASE 0x00300 +#define MALIDP500_DE_LG2_PTR_BASE 0x0031c +#define MALIDP500_SE_BASE 0x00c00 +#define MALIDP500_SE_PTR_BASE 0x00e0c +#define MALIDP500_DC_IRQ_BASE 0x00f00 +#define MALIDP500_CONFIG_VALID 0x00f00 +#define MALIDP500_CONFIG_ID 0x00fd4 + +/* register offsets and bits specific to DP550/DP650 */ +#define MALIDP550_DE_CONTROL 0x00010 +#define MALIDP550_DE_LINE_COUNTER 0x00014 +#define MALIDP550_DE_AXI_CONTROL 0x00018 +#define MALIDP550_DE_QOS 0x0001c +#define MALIDP550_TIMINGS_BASE 0x00030 +#define MALIDP550_HSYNCPOL (1 << 12) +#define MALIDP550_VSYNCPOL (1 << 28) + +#define MALIDP550_DE_DISP_SIDEBAND 0x00040 +#define MALIDP550_DE_BGND_COLOR 0x00044 +#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c +#define MALIDP550_DE_COLOR_COEF 0x00050 +#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080 +#define MALIDP550_DE_COEF_TABLE_DATA 0x00084 +#define MALIDP550_DE_LV1_BASE 0x00100 +#define MALIDP550_DE_LV1_PTR_BASE 0x00124 +#define MALIDP550_DE_LV2_BASE 0x00200 +#define MALIDP550_DE_LV2_PTR_BASE 0x00224 +#define MALIDP550_DE_LG_BASE 0x00300 +#define MALIDP550_DE_LG_PTR_BASE 0x0031c +#define MALIDP550_DE_LS_BASE 0x00400 +#define MALIDP550_DE_LS_PTR_BASE 0x0042c +#define MALIDP550_DE_PERF_BASE 0x00500 +#define MALIDP550_SE_BASE 0x08000 +#define MALIDP550_DC_BASE 0x0c000 +#define MALIDP550_DC_CONTROL 0x0c010 +#define MALIDP550_DC_CONFIG_REQ (1 << 16) +#define MALIDP550_CONFIG_VALID 0x0c014 +#define MALIDP550_CONFIG_ID 0x0ffd4 + +/* + * Starting with DP550 the register map blocks has been standardised to the + * following layout: + * + * Offset Block registers + * 0x00000 Display Engine + * 0x08000 Scaling Engine + * 0x0c000 Display Core + * 0x10000 Secure control + * + * The old DP500 IP mixes some DC with the DE registers, hence the need + * for a mapping structure. + */ + +#endif /* __MALIDP_REGS_H__ */ diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index eb773e9af313..15f3ecfb16f1 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig @@ -1,11 +1,7 @@ config DRM_ARMADA tristate "DRM support for Marvell Armada SoCs" depends on DRM && HAVE_CLK && ARM - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER help Support the "LCD" controllers found on the Marvell Armada 510 devices. There are two controllers on the device, each controller diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 3130aa8bcdd0..2f58e9e2a59c 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc, /* Handle any pending frame work. */ if (work) { work->fn(dcrtc, plane, work); - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); } wake_up(&plane->frame_wait); @@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, { int ret; - ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + ret = drm_crtc_vblank_get(&dcrtc->crtc); if (ret) { DRM_ERROR("failed to acquire vblank counter\n"); return ret; @@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; if (ret) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return ret; } @@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel( struct armada_plane_work *work = xchg(&plane->work, NULL); if (work) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return work; } @@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc, if (fwork->event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, dcrtc->num, fwork->event); + drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num); if (stat & VSYNC_IRQ) - drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_handle_vblank(&dcrtc->crtc); spin_lock(&dcrtc->irq_lock); ovl_plane = dcrtc->plane; @@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, if (interlaced ^ dcrtc->interlaced) { if (adj->flags & DRM_MODE_FLAG_INTERLACE) - drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_get(&dcrtc->crtc); else - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); dcrtc->interlaced = interlaced; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 439824a61aa5..f5ebdd681445 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = { .load = armada_drm_load, .lastclose = armada_drm_lastclose, .unload = armada_drm_unload, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = armada_drm_enable_vblank, .disable_vblank = armada_drm_disable_vblank, @@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = { .debugfs_init = armada_drm_debugfs_init, .debugfs_cleanup = armada_drm_debugfs_cleanup, #endif - .gem_free_object = armada_gem_free_object, + .gem_free_object_unlocked = armada_gem_free_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = armada_gem_prime_export, diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 88e7fc797721..cb8f0347b934 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; - mapping = file_inode(obj->obj.filp)->i_mapping; + mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); @@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; - mapping = file_inode(dobj->obj.filp)->i_mapping; + mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 148e8a42b2c6..1ee707ef6b8d 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, int ret; ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + BIT(DRM_ROTATE_0), 0, INT_MAX, true, false, &visible); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index 8a784c460c89..15f6ce7acb2a 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -2,11 +2,7 @@ config DRM_AST tristate "AST server chips" depends on DRM && PCI select DRM_TTM - select FB_SYS_COPYAREA - select FB_SYS_FILLRECT - select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM help Say yes for experimental AST GPU driver. Do not enable diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index fcd9c0714836..f54afd2113a9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -209,7 +209,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = ast_gem_free_object, + .gem_free_object_unlocked = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 5320f8c57884..c017a9330a18 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; - u32 bpp, depth; u32 size; struct drm_gem_object *gobj; - int ret = 0; - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); size = mode_cmd->pitches[0] * mode_cmd->height; ret = ast_gem_create(dev, size, true, &gobj); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 7bc3aa6dda8c..904beaa932d0 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev) static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); - if (ast_fb->obj) - drm_gem_object_unreference_unlocked(ast_fb->obj); + drm_gem_object_unreference_unlocked(ast_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c337922606e3..5957c3e659fe 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc) } -static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct ast_crtc *ast_crtc = to_ast_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { ast_crtc->lut_r[i] = red[i] >> 8; ast_crtc->lut_g[i] = green[i] >> 8; ast_crtc->lut_b[i] = blue[i] >> 8; } ast_crtc_load_lut(crtc); + + return 0; } diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 59f2f93b6f84..b29a41218fc9 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg * { } -static int ast_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void ast_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = { .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, .init_mem_type = ast_bo_init_mem_type, .evict_flags = ast_bo_evict_flags, - .move = ast_bo_move, + .move = NULL, .verify_access = ast_bo_verify_access, .io_mem_reserve = &ast_ttm_io_mem_reserve, .io_mem_free = &ast_ttm_io_mem_free, diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 99b4f0698a30..32bcc4bad06a 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER select DRM_PANEL help diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index bd12231ab0cd..a978381ef95b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) spin_lock_irqsave(&dev->event_lock, flags); if (crtc->event) { - drm_send_vblank_event(dev, crtc->id, crtc->event); - drm_vblank_put(dev, crtc->id); + drm_crtc_send_vblank_event(&crtc->base, crtc->event); + drm_crtc_vblank_put(&crtc->base); crtc->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) void atmel_hlcdc_crtc_irq(struct drm_crtc *c) { - drm_handle_vblank(c->dev, 0); + drm_crtc_handle_vblank(c); atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 8ded7645747e..d4a3d61b7b06 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (async) queue_work(dc->wq, &commit->work); @@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev) destroy_workqueue(dc->wq); } -static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev) -{ - mutex_lock(&dev->mode_config.mutex); - drm_connector_unregister_all(dev); - mutex_unlock(&dev->mode_config.mutex); -} - static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; @@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = atmel_hlcdc_dc_enable_vblank, .disable_vblank = atmel_hlcdc_dc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) if (ret) goto err_unload; - ret = drm_connector_register_all(ddev); - if (ret) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(ddev); - err_unload: atmel_hlcdc_dc_unload(ddev); @@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); - atmel_hlcdc_dc_connector_unplug_all(ddev); drm_dev_unregister(ddev); atmel_hlcdc_dc_unload(ddev); drm_dev_unref(ddev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 3d34fc4ca826..6119b5085501 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector, return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); } - - -static struct drm_encoder * -atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector) -{ - struct atmel_hlcdc_rgb_output *rgb = - drm_connector_to_atmel_hlcdc_rgb_output(connector); - - return &rgb->encoder; -} - static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { .get_modes = atmel_hlcdc_panel_get_modes, .mode_valid = atmel_hlcdc_rgb_mode_valid, - .best_encoder = atmel_hlcdc_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index 5f8b0c2b9a44..f739763f47ce 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig @@ -2,10 +2,6 @@ config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT select DRM_TTM help Choose this option for qemu. diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index b332b4d3b0e2..abace82de6ea 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - .gem_free_object = bochs_gem_free_object, + .gem_free_object_unlocked = bochs_gem_free_object, .dumb_create = bochs_dumb_create, .dumb_map_offset = bochs_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 6cf912c45e48..5c5638a777a1 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, { } -static int bochs_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); -} - - static void bochs_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = { .ttm_tt_unpopulate = ttm_pool_unpopulate, .init_mem_type = bochs_bo_init_mem_type, .evict_flags = bochs_bo_evict_flags, - .move = bochs_bo_move, + .move = NULL, .verify_access = bochs_bo_verify_access, .io_mem_reserve = &bochs_ttm_io_mem_reserve, .io_mem_free = &bochs_ttm_io_mem_free, @@ -474,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); - if (bochs_fb->obj) - drm_gem_object_unreference_unlocked(bochs_fb->obj); + + drm_gem_object_unreference_unlocked(bochs_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 8f7423f18da5..b590e678052d 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -50,6 +50,25 @@ config DRM_PARADE_PS8622 ---help--- Parade eDP-LVDS bridge chip driver. +config DRM_SII902X + tristate "Silicon Image sii902x RGB/HDMI bridge" + depends on OF + select DRM_KMS_HELPER + select REGMAP_I2C + ---help--- + Silicon Image sii902x bridge chip driver. + +config DRM_TOSHIBA_TC358767 + tristate "Toshiba TC358767 eDP bridge" + depends on OF + select DRM_KMS_HELPER + select REGMAP_I2C + select DRM_PANEL + ---help--- + Toshiba TC358767 eDP bridge chip driver. + source "drivers/gpu/drm/bridge/analogix/Kconfig" +source "drivers/gpu/drm/bridge/adv7511/Kconfig" + endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 96b13b30e6ab..efdb07e878f5 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o +obj-$(CONFIG_DRM_SII902X) += sii902x.o +obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ +obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig new file mode 100644 index 000000000000..d2b0499ab7d7 --- /dev/null +++ b/drivers/gpu/drm/bridge/adv7511/Kconfig @@ -0,0 +1,15 @@ +config DRM_I2C_ADV7511 + tristate "AV7511 encoder" + depends on OF + select DRM_KMS_HELPER + select REGMAP_I2C + help + Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders. + +config DRM_I2C_ADV7533 + bool "ADV7533 encoder" + depends on DRM_I2C_ADV7511 + select DRM_MIPI_DSI + default y + help + Support for the Analog Devices ADV7533 DSI to HDMI encoder. diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile new file mode 100644 index 000000000000..9019327fff4c --- /dev/null +++ b/drivers/gpu/drm/bridge/adv7511/Makefile @@ -0,0 +1,3 @@ +adv7511-y := adv7511_drv.o +adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o +obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 38515b30cedf..161c923d6162 100644 --- a/drivers/gpu/drm/i2c/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -10,6 +10,11 @@ #define __DRM_I2C_ADV7511_H__ #include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/regmap.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_mipi_dsi.h> #define ADV7511_REG_CHIP_REVISION 0x00 #define ADV7511_REG_N0 0x01 @@ -286,4 +291,102 @@ struct adv7511_video_config { struct hdmi_avi_infoframe avi_infoframe; }; +enum adv7511_type { + ADV7511, + ADV7533, +}; + +struct adv7511 { + struct i2c_client *i2c_main; + struct i2c_client *i2c_edid; + struct i2c_client *i2c_cec; + + struct regmap *regmap; + struct regmap *regmap_cec; + enum drm_connector_status status; + bool powered; + + struct drm_display_mode curr_mode; + + unsigned int f_tmds; + + unsigned int current_edid_segment; + uint8_t edid_buf[256]; + bool edid_read; + + wait_queue_head_t wq; + struct drm_bridge bridge; + struct drm_connector connector; + + bool embedded_sync; + enum adv7511_sync_polarity vsync_polarity; + enum adv7511_sync_polarity hsync_polarity; + bool rgb; + + struct edid *edid; + + struct gpio_desc *gpio_pd; + + /* ADV7533 DSI RX related params */ + struct device_node *host_node; + struct mipi_dsi_device *dsi; + u8 num_dsi_lanes; + bool use_timing_gen; + + enum adv7511_type type; +}; + +#ifdef CONFIG_DRM_I2C_ADV7533 +void adv7533_dsi_power_on(struct adv7511 *adv); +void adv7533_dsi_power_off(struct adv7511 *adv); +void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode); +int adv7533_patch_registers(struct adv7511 *adv); +void adv7533_uninit_cec(struct adv7511 *adv); +int adv7533_init_cec(struct adv7511 *adv); +int adv7533_attach_dsi(struct adv7511 *adv); +void adv7533_detach_dsi(struct adv7511 *adv); +int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); +#else +static inline void adv7533_dsi_power_on(struct adv7511 *adv) +{ +} + +static inline void adv7533_dsi_power_off(struct adv7511 *adv) +{ +} + +static inline void adv7533_mode_set(struct adv7511 *adv, + struct drm_display_mode *mode) +{ +} + +static inline int adv7533_patch_registers(struct adv7511 *adv) +{ + return -ENODEV; +} + +static inline void adv7533_uninit_cec(struct adv7511 *adv) +{ +} + +static inline int adv7533_init_cec(struct adv7511 *adv) +{ + return -ENODEV; +} + +static inline int adv7533_attach_dsi(struct adv7511 *adv) +{ + return -ENODEV; +} + +static inline void adv7533_detach_dsi(struct adv7511 *adv) +{ +} + +static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) +{ + return -ENODEV; +} +#endif + #endif /* __DRM_I2C_ADV7511_H__ */ diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index a02112ba1c3d..ec8fb2ed3275 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -8,51 +8,17 @@ #include <linux/device.h> #include <linux/gpio/consumer.h> -#include <linux/i2c.h> #include <linux/module.h> -#include <linux/regmap.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_encoder_slave.h> #include "adv7511.h" -struct adv7511 { - struct i2c_client *i2c_main; - struct i2c_client *i2c_edid; - - struct regmap *regmap; - struct regmap *packet_memory_regmap; - enum drm_connector_status status; - bool powered; - - unsigned int f_tmds; - - unsigned int current_edid_segment; - uint8_t edid_buf[256]; - bool edid_read; - - wait_queue_head_t wq; - struct drm_encoder *encoder; - - bool embedded_sync; - enum adv7511_sync_polarity vsync_polarity; - enum adv7511_sync_polarity hsync_polarity; - bool rgb; - - struct edid *edid; - - struct gpio_desc *gpio_pd; -}; - -static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder) -{ - return to_encoder_slave(encoder)->slave_priv; -} - /* ADI recommended values for proper operation. */ static const struct reg_sequence adv7511_fixed_registers[] = { { 0x98, 0x03 }, @@ -394,6 +360,9 @@ static void adv7511_power_on(struct adv7511 *adv7511) */ regcache_sync(adv7511->regmap); + if (adv7511->type == ADV7533) + adv7533_dsi_power_on(adv7511); + adv7511->powered = true; } @@ -405,6 +374,9 @@ static void adv7511_power_off(struct adv7511 *adv7511) ADV7511_POWER_POWER_DOWN); regcache_mark_dirty(adv7511->regmap); + if (adv7511->type == ADV7533) + adv7533_dsi_power_off(adv7511); + adv7511->powered = false; } @@ -430,7 +402,7 @@ static bool adv7511_hpd(struct adv7511 *adv7511) return false; } -static int adv7511_irq_process(struct adv7511 *adv7511) +static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; int ret; @@ -446,8 +418,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511) regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (irq0 & ADV7511_INT0_HPD && adv7511->encoder) - drm_helper_hpd_irq_event(adv7511->encoder->dev); + if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) + drm_helper_hpd_irq_event(adv7511->connector.dev); if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; @@ -464,7 +436,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid) struct adv7511 *adv7511 = devid; int ret; - ret = adv7511_irq_process(adv7511); + ret = adv7511_irq_process(adv7511, true); return ret < 0 ? IRQ_NONE : IRQ_HANDLED; } @@ -481,7 +453,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout) adv7511->edid_read, msecs_to_jiffies(timeout)); } else { for (; timeout > 0; timeout -= 25) { - ret = adv7511_irq_process(adv7511); + ret = adv7511_irq_process(adv7511, false); if (ret < 0) break; @@ -563,13 +535,12 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, } /* ----------------------------------------------------------------------------- - * Encoder operations + * ADV75xx helpers */ -static int adv7511_get_modes(struct drm_encoder *encoder, +static int adv7511_get_modes(struct adv7511 *adv7511, struct drm_connector *connector) { - struct adv7511 *adv7511 = encoder_to_adv7511(encoder); struct edid *edid; unsigned int count; @@ -606,21 +577,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder, return count; } -static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct adv7511 *adv7511 = encoder_to_adv7511(encoder); - - if (mode == DRM_MODE_DPMS_ON) - adv7511_power_on(adv7511); - else - adv7511_power_off(adv7511); -} - static enum drm_connector_status -adv7511_encoder_detect(struct drm_encoder *encoder, - struct drm_connector *connector) +adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) { - struct adv7511 *adv7511 = encoder_to_adv7511(encoder); enum drm_connector_status status; unsigned int val; bool hpd; @@ -644,7 +603,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder, if (status == connector_status_connected && hpd && adv7511->powered) { regcache_mark_dirty(adv7511->regmap); adv7511_power_on(adv7511); - adv7511_get_modes(encoder, connector); + adv7511_get_modes(adv7511, connector); if (adv7511->status == connector_status_connected) status = connector_status_disconnected; } else { @@ -658,8 +617,8 @@ adv7511_encoder_detect(struct drm_encoder *encoder, return status; } -static int adv7511_encoder_mode_valid(struct drm_encoder *encoder, - struct drm_display_mode *mode) +static int adv7511_mode_valid(struct adv7511 *adv7511, + struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; @@ -667,11 +626,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder, return MODE_OK; } -static void adv7511_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) +static void adv7511_mode_set(struct adv7511 *adv7511, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) { - struct adv7511 *adv7511 = encoder_to_adv7511(encoder); unsigned int low_refresh_rate; unsigned int hsync_polarity = 0; unsigned int vsync_polarity = 0; @@ -754,6 +712,11 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder, regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); + if (adv7511->type == ADV7533) + adv7533_mode_set(adv7511, adj_mode); + + drm_mode_copy(&adv7511->curr_mode, adj_mode); + /* * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is * supposed to give better results. @@ -762,12 +725,114 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder, adv7511->f_tmds = mode->clock; } -static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = { - .dpms = adv7511_encoder_dpms, - .mode_valid = adv7511_encoder_mode_valid, - .mode_set = adv7511_encoder_mode_set, - .detect = adv7511_encoder_detect, - .get_modes = adv7511_get_modes, +/* Connector funcs */ +static struct adv7511 *connector_to_adv7511(struct drm_connector *connector) +{ + return container_of(connector, struct adv7511, connector); +} + +static int adv7511_connector_get_modes(struct drm_connector *connector) +{ + struct adv7511 *adv = connector_to_adv7511(connector); + + return adv7511_get_modes(adv, connector); +} + +static enum drm_mode_status +adv7511_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct adv7511 *adv = connector_to_adv7511(connector); + + return adv7511_mode_valid(adv, mode); +} + +static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = { + .get_modes = adv7511_connector_get_modes, + .mode_valid = adv7511_connector_mode_valid, +}; + +static enum drm_connector_status +adv7511_connector_detect(struct drm_connector *connector, bool force) +{ + struct adv7511 *adv = connector_to_adv7511(connector); + + return adv7511_detect(adv, connector); +} + +static struct drm_connector_funcs adv7511_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = adv7511_connector_detect, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +/* Bridge funcs */ +static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge) +{ + return container_of(bridge, struct adv7511, bridge); +} + +static void adv7511_bridge_enable(struct drm_bridge *bridge) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + adv7511_power_on(adv); +} + +static void adv7511_bridge_disable(struct drm_bridge *bridge) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + adv7511_power_off(adv); +} + +static void adv7511_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + adv7511_mode_set(adv, mode, adj_mode); +} + +static int adv7511_bridge_attach(struct drm_bridge *bridge) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + adv->connector.polled = DRM_CONNECTOR_POLL_HPD; + + ret = drm_connector_init(bridge->dev, &adv->connector, + &adv7511_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + drm_connector_helper_add(&adv->connector, + &adv7511_connector_helper_funcs); + drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder); + + if (adv->type == ADV7533) + ret = adv7533_attach_dsi(adv); + + return ret; +} + +static struct drm_bridge_funcs adv7511_bridge_funcs = { + .enable = adv7511_bridge_enable, + .disable = adv7511_bridge_disable, + .mode_set = adv7511_bridge_mode_set, + .attach = adv7511_bridge_attach, }; /* ----------------------------------------------------------------------------- @@ -780,8 +845,6 @@ static int adv7511_parse_dt(struct device_node *np, const char *str; int ret; - memset(config, 0, sizeof(*config)); - of_property_read_u32(np, "adi,input-depth", &config->input_color_depth); if (config->input_color_depth != 8 && config->input_color_depth != 10 && config->input_color_depth != 12) @@ -881,7 +944,17 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511->powered = false; adv7511->status = connector_status_disconnected; - ret = adv7511_parse_dt(dev->of_node, &link_config); + if (dev->of_node) + adv7511->type = (enum adv7511_type)of_device_get_match_data(dev); + else + adv7511->type = id->driver_data; + + memset(&link_config, 0, sizeof(link_config)); + + if (adv7511->type == ADV7511) + ret = adv7511_parse_dt(dev->of_node, &link_config); + else + ret = adv7533_parse_dt(dev->of_node, adv7511); if (ret) return ret; @@ -907,8 +980,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) return ret; dev_dbg(dev, "Rev. %d\n", val); - ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers, - ARRAY_SIZE(adv7511_fixed_registers)); + if (adv7511->type == ADV7511) + ret = regmap_register_patch(adv7511->regmap, + adv7511_fixed_registers, + ARRAY_SIZE(adv7511_fixed_registers)); + else + ret = adv7533_patch_registers(adv7511); if (ret) return ret; @@ -923,6 +1000,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (!adv7511->i2c_edid) return -ENOMEM; + if (adv7511->type == ADV7533) { + ret = adv7533_init_cec(adv7511); + if (ret) + goto err_i2c_unregister_edid; + } + if (i2c->irq) { init_waitqueue_head(&adv7511->wq); @@ -931,7 +1014,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) IRQF_ONESHOT, dev_name(dev), adv7511); if (ret) - goto err_i2c_unregister_device; + goto err_unregister_cec; } /* CEC is unused for now */ @@ -942,11 +1025,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) i2c_set_clientdata(i2c, adv7511); - adv7511_set_link_config(adv7511, &link_config); + if (adv7511->type == ADV7511) + adv7511_set_link_config(adv7511, &link_config); + + adv7511->bridge.funcs = &adv7511_bridge_funcs; + adv7511->bridge.of_node = dev->of_node; + + ret = drm_bridge_add(&adv7511->bridge); + if (ret) { + dev_err(dev, "failed to add adv7511 bridge\n"); + goto err_unregister_cec; + } return 0; -err_i2c_unregister_device: +err_unregister_cec: + adv7533_uninit_cec(adv7511); +err_i2c_unregister_edid: i2c_unregister_device(adv7511->i2c_edid); return ret; @@ -956,66 +1051,71 @@ static int adv7511_remove(struct i2c_client *i2c) { struct adv7511 *adv7511 = i2c_get_clientdata(i2c); - i2c_unregister_device(adv7511->i2c_edid); - - kfree(adv7511->edid); - - return 0; -} - -static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev, - struct drm_encoder_slave *encoder) -{ + if (adv7511->type == ADV7533) { + adv7533_detach_dsi(adv7511); + adv7533_uninit_cec(adv7511); + } - struct adv7511 *adv7511 = i2c_get_clientdata(i2c); + drm_bridge_remove(&adv7511->bridge); - encoder->slave_priv = adv7511; - encoder->slave_funcs = &adv7511_encoder_funcs; + i2c_unregister_device(adv7511->i2c_edid); - adv7511->encoder = &encoder->base; + kfree(adv7511->edid); return 0; } static const struct i2c_device_id adv7511_i2c_ids[] = { - { "adv7511", 0 }, - { "adv7511w", 0 }, - { "adv7513", 0 }, + { "adv7511", ADV7511 }, + { "adv7511w", ADV7511 }, + { "adv7513", ADV7511 }, +#ifdef CONFIG_DRM_I2C_ADV7533 + { "adv7533", ADV7533 }, +#endif { } }; MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids); static const struct of_device_id adv7511_of_ids[] = { - { .compatible = "adi,adv7511", }, - { .compatible = "adi,adv7511w", }, - { .compatible = "adi,adv7513", }, + { .compatible = "adi,adv7511", .data = (void *)ADV7511 }, + { .compatible = "adi,adv7511w", .data = (void *)ADV7511 }, + { .compatible = "adi,adv7513", .data = (void *)ADV7511 }, +#ifdef CONFIG_DRM_I2C_ADV7533 + { .compatible = "adi,adv7533", .data = (void *)ADV7533 }, +#endif { } }; MODULE_DEVICE_TABLE(of, adv7511_of_ids); -static struct drm_i2c_encoder_driver adv7511_driver = { - .i2c_driver = { - .driver = { - .name = "adv7511", - .of_match_table = adv7511_of_ids, - }, - .id_table = adv7511_i2c_ids, - .probe = adv7511_probe, - .remove = adv7511_remove, - }, +static struct mipi_dsi_driver adv7533_dsi_driver = { + .driver.name = "adv7533", +}; - .encoder_init = adv7511_encoder_init, +static struct i2c_driver adv7511_driver = { + .driver = { + .name = "adv7511", + .of_match_table = adv7511_of_ids, + }, + .id_table = adv7511_i2c_ids, + .probe = adv7511_probe, + .remove = adv7511_remove, }; static int __init adv7511_init(void) { - return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver); + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) + mipi_dsi_driver_register(&adv7533_dsi_driver); + + return i2c_add_driver(&adv7511_driver); } module_init(adv7511_init); static void __exit adv7511_exit(void) { - drm_i2c_encoder_unregister(&adv7511_driver); + i2c_del_driver(&adv7511_driver); + + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) + mipi_dsi_driver_unregister(&adv7533_dsi_driver); } module_exit(adv7511_exit); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c new file mode 100644 index 000000000000..5eebd15899b1 --- /dev/null +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of_graph.h> + +#include "adv7511.h" + +static const struct reg_sequence adv7533_fixed_registers[] = { + { 0x16, 0x20 }, + { 0x9a, 0xe0 }, + { 0xba, 0x70 }, + { 0xde, 0x82 }, + { 0xe4, 0x40 }, + { 0xe5, 0x80 }, +}; + +static const struct reg_sequence adv7533_cec_fixed_registers[] = { + { 0x15, 0xd0 }, + { 0x17, 0xd0 }, + { 0x24, 0x20 }, + { 0x57, 0x11 }, +}; + +static const struct regmap_config adv7533_cec_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, + .cache_type = REGCACHE_RBTREE, +}; + +static void adv7511_dsi_config_timing_gen(struct adv7511 *adv) +{ + struct mipi_dsi_device *dsi = adv->dsi; + struct drm_display_mode *mode = &adv->curr_mode; + unsigned int hsw, hfp, hbp, vsw, vfp, vbp; + u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */ + + hsw = mode->hsync_end - mode->hsync_start; + hfp = mode->hsync_start - mode->hdisplay; + hbp = mode->htotal - mode->hsync_end; + vsw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; + vbp = mode->vtotal - mode->vsync_end; + + /* set pixel clock divider mode */ + regmap_write(adv->regmap_cec, 0x16, + clock_div_by_lanes[dsi->lanes - 2] << 3); + + /* horizontal porch params */ + regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4); + regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x2a, hsw >> 4); + regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x2c, hfp >> 4); + regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x2e, hbp >> 4); + regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff); + + /* vertical porch params */ + regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4); + regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x32, vsw >> 4); + regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x34, vfp >> 4); + regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff); + regmap_write(adv->regmap_cec, 0x36, vbp >> 4); + regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff); +} + +void adv7533_dsi_power_on(struct adv7511 *adv) +{ + struct mipi_dsi_device *dsi = adv->dsi; + + if (adv->use_timing_gen) + adv7511_dsi_config_timing_gen(adv); + + /* set number of dsi lanes */ + regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4); + + if (adv->use_timing_gen) { + /* reset internal timing generator */ + regmap_write(adv->regmap_cec, 0x27, 0xcb); + regmap_write(adv->regmap_cec, 0x27, 0x8b); + regmap_write(adv->regmap_cec, 0x27, 0xcb); + } else { + /* disable internal timing generator */ + regmap_write(adv->regmap_cec, 0x27, 0x0b); + } + + /* enable hdmi */ + regmap_write(adv->regmap_cec, 0x03, 0x89); + /* disable test mode */ + regmap_write(adv->regmap_cec, 0x55, 0x00); + + regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers, + ARRAY_SIZE(adv7533_cec_fixed_registers)); +} + +void adv7533_dsi_power_off(struct adv7511 *adv) +{ + /* disable hdmi */ + regmap_write(adv->regmap_cec, 0x03, 0x0b); + /* disable internal timing generator */ + regmap_write(adv->regmap_cec, 0x27, 0x0b); +} + +void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode) +{ + struct mipi_dsi_device *dsi = adv->dsi; + int lanes, ret; + + if (adv->num_dsi_lanes != 4) + return; + + if (mode->clock > 80000) + lanes = 4; + else + lanes = 3; + + if (lanes != dsi->lanes) { + mipi_dsi_detach(dsi); + dsi->lanes = lanes; + ret = mipi_dsi_attach(dsi); + if (ret) + dev_err(&dsi->dev, "failed to change host lanes\n"); + } +} + +int adv7533_patch_registers(struct adv7511 *adv) +{ + return regmap_register_patch(adv->regmap, + adv7533_fixed_registers, + ARRAY_SIZE(adv7533_fixed_registers)); +} + +void adv7533_uninit_cec(struct adv7511 *adv) +{ + i2c_unregister_device(adv->i2c_cec); +} + +static const int cec_i2c_addr = 0x78; + +int adv7533_init_cec(struct adv7511 *adv) +{ + int ret; + + adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1); + if (!adv->i2c_cec) + return -ENOMEM; + + adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec, + &adv7533_cec_regmap_config); + if (IS_ERR(adv->regmap_cec)) { + ret = PTR_ERR(adv->regmap_cec); + goto err; + } + + ret = regmap_register_patch(adv->regmap_cec, + adv7533_cec_fixed_registers, + ARRAY_SIZE(adv7533_cec_fixed_registers)); + if (ret) + goto err; + + return 0; +err: + adv7533_uninit_cec(adv); + return ret; +} + +int adv7533_attach_dsi(struct adv7511 *adv) +{ + struct device *dev = &adv->i2c_main->dev; + struct mipi_dsi_host *host; + struct mipi_dsi_device *dsi; + int ret = 0; + const struct mipi_dsi_device_info info = { .type = "adv7533", + .channel = 0, + .node = NULL, + }; + + host = of_find_mipi_dsi_host_by_node(adv->host_node); + if (!host) { + dev_err(dev, "failed to find dsi host\n"); + return -EPROBE_DEFER; + } + + dsi = mipi_dsi_device_register_full(host, &info); + if (IS_ERR(dsi)) { + dev_err(dev, "failed to create dsi device\n"); + ret = PTR_ERR(dsi); + goto err_dsi_device; + } + + adv->dsi = dsi; + + dsi->lanes = adv->num_dsi_lanes; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "failed to attach dsi to host\n"); + goto err_dsi_attach; + } + + return 0; + +err_dsi_attach: + mipi_dsi_device_unregister(dsi); +err_dsi_device: + return ret; +} + +void adv7533_detach_dsi(struct adv7511 *adv) +{ + mipi_dsi_detach(adv->dsi); + mipi_dsi_device_unregister(adv->dsi); +} + +int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) +{ + u32 num_lanes; + struct device_node *endpoint; + + of_property_read_u32(np, "adi,dsi-lanes", &num_lanes); + + if (num_lanes < 1 || num_lanes > 4) + return -EINVAL; + + adv->num_dsi_lanes = num_lanes; + + endpoint = of_graph_get_next_endpoint(np, NULL); + if (!endpoint) + return -ENODEV; + + adv->host_node = of_graph_get_remote_port_parent(endpoint); + if (!adv->host_node) { + of_node_put(endpoint); + return -ENODEV; + } + + of_node_put(endpoint); + of_node_put(adv->host_node); + + adv->use_timing_gen = !of_property_read_bool(np, + "adi,disable-timing-generator"); + + /* TODO: Check if these need to be parsed by DT or not */ + adv->rgb = true; + adv->embedded_sync = false; + + return 0; +} diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index d087b054c360..f9f03bcba0af 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -986,16 +986,8 @@ unlock: return num_modes; } -static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector) -{ - struct anx78xx *anx78xx = connector_to_anx78xx(connector); - - return anx78xx->bridge.encoder; -} - static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { .get_modes = anx78xx_get_modes, - .best_encoder = anx78xx_best_encoder, }; static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 7699597070a1..32715daf73cb 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector) num_modes += drm_panel_get_modes(dp->plat_data->panel); if (dp->plat_data->get_modes) - num_modes += dp->plat_data->get_modes(dp->plat_data); + num_modes += dp->plat_data->get_modes(dp->plat_data, connector); return num_modes; } @@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) switch (dp->plat_data->dev_type) { case RK3288_DP: + case RK3399_EDP: /* * Like Rk3288 DisplayPort TRM indicate that "Main link * containing 4 physical lanes of 2.7/1.62 Gbps/lane". diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index f09275d40f70..b45638043ec4 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -127,10 +127,10 @@ enum analog_power_block { }; enum dp_irq_type { - DP_IRQ_TYPE_HP_CABLE_IN, - DP_IRQ_TYPE_HP_CABLE_OUT, - DP_IRQ_TYPE_HP_CHANGE, - DP_IRQ_TYPE_UNKNOWN, + DP_IRQ_TYPE_HP_CABLE_IN = BIT(0), + DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1), + DP_IRQ_TYPE_HP_CHANGE = BIT(2), + DP_IRQ_TYPE_UNKNOWN = BIT(3), }; struct video_info { diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 49205ef02be3..48030f0cf497 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp) reg = SEL_24M | TX_DVDD_BIT_1_0625V; writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); - if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) { - writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1); + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { + reg = REF_CLK_24M; + if (dp->plat_data->dev_type == RK3288_DP) + reg ^= REF_CLK_MASK; + + writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1); writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); @@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, u32 reg; u32 phy_pd_addr = ANALOGIX_DP_PHY_PD; - if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) phy_pd_addr = ANALOGIX_DP_PD; switch (block) { @@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp) analogix_dp_reset_aux(dp); /* Disable AUX transaction H/W retry */ - if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) + if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) | AUX_HW_RETRY_COUNT_SEL(3) | AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index 337912b0aeab..cdcc6c5add5e 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -163,8 +163,9 @@ #define HSYNC_POLARITY_CFG (0x1 << 0) /* ANALOGIX_DP_PLL_REG_1 */ -#define REF_CLK_24M (0x1 << 1) -#define REF_CLK_27M (0x0 << 1) +#define REF_CLK_24M (0x1 << 0) +#define REF_CLK_27M (0x0 << 0) +#define REF_CLK_MASK (0x1 << 0) /* ANALOGIX_DP_LANE_MAP */ #define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6) diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index c9d941283d30..77ab47341658 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector, return mode_status; } -static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector - *connector) -{ - struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, - connector); - - return hdmi->encoder; -} - static void dw_hdmi_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); @@ -1504,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector) } static const struct drm_connector_funcs dw_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = dw_hdmi_connector_detect, - .destroy = dw_hdmi_connector_destroy, - .force = dw_hdmi_connector_force, -}; - -static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = dw_hdmi_connector_detect, @@ -1525,7 +1508,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = { static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, .mode_valid = dw_hdmi_connector_mode_valid, - .best_encoder = dw_hdmi_connector_best_encoder, + .best_encoder = drm_atomic_helper_best_encoder, }; static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { @@ -1643,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) drm_connector_helper_add(&hdmi->connector, &dw_hdmi_connector_helper_funcs); - if (drm_core_check_feature(drm, DRIVER_ATOMIC)) - drm_connector_init(drm, &hdmi->connector, - &dw_hdmi_atomic_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - else - drm_connector_init(drm, &hdmi->connector, - &dw_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); + drm_connector_init(drm, &hdmi->connector, + &dw_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); drm_mode_connector_attach_encoder(&hdmi->connector, encoder); diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 7ecd59f70b8e..93f3dacf9e27 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -235,16 +235,8 @@ out: return num_modes; } -static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) -{ - struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - - return ptn_bridge->bridge.encoder; -} - static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, - .best_encoder = ptn3460_best_encoder, }; static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index be881e9fef8f..583b8ce614e3 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector) return drm_panel_get_modes(ps8622->panel); } -static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) -{ - struct ps8622_bridge *ps8622; - - ps8622 = connector_to_ps8622(connector); - - return ps8622->bridge.encoder; -} - static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { .get_modes = ps8622_get_modes, - .best_encoder = ps8622_best_encoder, }; static enum drm_connector_status ps8622_detect(struct drm_connector *connector, @@ -646,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client) { struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); - if (ps8622->bl) - backlight_device_unregister(ps8622->bl); - + backlight_device_unregister(ps8622->bl); drm_bridge_remove(&ps8622->bridge); return 0; diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c new file mode 100644 index 000000000000..9126d0306ab5 --- /dev/null +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2016 Atmel + * Bo Shen <voice.shen@atmel.com> + * + * Authors: Bo Shen <voice.shen@atmel.com> + * Boris Brezillon <boris.brezillon@free-electrons.com> + * Wu, Songjun <Songjun.Wu@atmel.com> + * + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +#define SII902X_TPI_VIDEO_DATA 0x0 + +#define SII902X_TPI_PIXEL_REPETITION 0x8 +#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5) +#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4) +#define SII902X_TPI_AVI_PIXEL_REP_4X 3 +#define SII902X_TPI_AVI_PIXEL_REP_2X 1 +#define SII902X_TPI_AVI_PIXEL_REP_NONE 0 +#define SII902X_TPI_CLK_RATIO_HALF (0 << 6) +#define SII902X_TPI_CLK_RATIO_1X (1 << 6) +#define SII902X_TPI_CLK_RATIO_2X (2 << 6) +#define SII902X_TPI_CLK_RATIO_4X (3 << 6) + +#define SII902X_TPI_AVI_IN_FORMAT 0x9 +#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7) +#define SII902X_TPI_AVI_INPUT_DITHER BIT(6) +#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2) +#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2) +#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0) + +#define SII902X_TPI_AVI_INFOFRAME 0x0c + +#define SII902X_SYS_CTRL_DATA 0x1a +#define SII902X_SYS_CTRL_PWR_DWN BIT(4) +#define SII902X_SYS_CTRL_AV_MUTE BIT(3) +#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2) +#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1) +#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0) +#define SII902X_SYS_CTRL_OUTPUT_HDMI 1 +#define SII902X_SYS_CTRL_OUTPUT_DVI 0 + +#define SII902X_REG_CHIPID(n) (0x1b + (n)) + +#define SII902X_PWR_STATE_CTRL 0x1e +#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0) +#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK) + +#define SII902X_INT_ENABLE 0x3c +#define SII902X_INT_STATUS 0x3d +#define SII902X_HOTPLUG_EVENT BIT(0) +#define SII902X_PLUGGED_STATUS BIT(2) + +#define SII902X_REG_TPI_RQB 0xc7 + +#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500 + +struct sii902x { + struct i2c_client *i2c; + struct regmap *regmap; + struct drm_bridge bridge; + struct drm_connector connector; + struct gpio_desc *reset_gpio; +}; + +static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sii902x, bridge); +} + +static inline struct sii902x *connector_to_sii902x(struct drm_connector *con) +{ + return container_of(con, struct sii902x, connector); +} + +static void sii902x_reset(struct sii902x *sii902x) +{ + if (!sii902x->reset_gpio) + return; + + gpiod_set_value(sii902x->reset_gpio, 1); + + /* The datasheet says treset-min = 100us. Make it 150us to be sure. */ + usleep_range(150, 200); + + gpiod_set_value(sii902x->reset_gpio, 0); +} + +static enum drm_connector_status +sii902x_connector_detect(struct drm_connector *connector, bool force) +{ + struct sii902x *sii902x = connector_to_sii902x(connector); + unsigned int status; + + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + + return (status & SII902X_PLUGGED_STATUS) ? + connector_status_connected : connector_status_disconnected; +} + +static const struct drm_connector_funcs sii902x_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = sii902x_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int sii902x_get_modes(struct drm_connector *connector) +{ + struct sii902x *sii902x = connector_to_sii902x(connector); + struct regmap *regmap = sii902x->regmap; + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + unsigned long timeout; + unsigned int status; + struct edid *edid; + int num = 0; + int ret; + + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_DDC_BUS_REQ, + SII902X_SYS_CTRL_DDC_BUS_REQ); + if (ret) + return ret; + + timeout = jiffies + + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) && + time_before(jiffies, timeout)); + + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { + dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus"); + return -ETIMEDOUT; + } + + ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status); + if (ret) + return ret; + + edid = drm_get_edid(connector, sii902x->i2c->adapter); + drm_mode_connector_update_edid_property(connector, edid); + if (edid) { + num = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + ret = drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + if (ret) + return ret; + + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD, 0); + if (ret) + return ret; + + timeout = jiffies + + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD) && + time_before(jiffies, timeout)); + + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD)) { + dev_err(&sii902x->i2c->dev, "failed to release the i2c bus"); + return -ETIMEDOUT; + } + + return num; +} + +static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO: check mode */ + + return MODE_OK; +} + +static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = { + .get_modes = sii902x_get_modes, + .mode_valid = sii902x_mode_valid, +}; + +static void sii902x_bridge_disable(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + + regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_PWR_DWN, + SII902X_SYS_CTRL_PWR_DWN); +} + +static void sii902x_bridge_enable(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + + regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL, + SII902X_AVI_POWER_STATE_MSK, + SII902X_AVI_POWER_STATE_D(0)); + regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_PWR_DWN, 0); +} + +static void sii902x_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adj) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + struct regmap *regmap = sii902x->regmap; + u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; + struct hdmi_avi_infoframe frame; + int ret; + + buf[0] = adj->clock; + buf[1] = adj->clock >> 8; + buf[2] = adj->vrefresh; + buf[3] = 0x00; + buf[4] = adj->hdisplay; + buf[5] = adj->hdisplay >> 8; + buf[6] = adj->vdisplay; + buf[7] = adj->vdisplay >> 8; + buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE | + SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT; + buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO | + SII902X_TPI_AVI_INPUT_COLORSPACE_RGB; + + ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10); + if (ret) + return; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return; + } + + ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); + if (ret < 0) { + DRM_ERROR("failed to pack AVI infoframe: %d\n", ret); + return; + } + + /* Do not send the infoframe header, but keep the CRC field. */ + regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME, + buf + HDMI_INFOFRAME_HEADER_SIZE - 1, + HDMI_AVI_INFOFRAME_SIZE + 1); +} + +static int sii902x_bridge_attach(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + struct drm_device *drm = bridge->dev; + int ret; + + drm_connector_helper_add(&sii902x->connector, + &sii902x_connector_helper_funcs); + + if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) { + dev_err(&sii902x->i2c->dev, + "sii902x driver is only compatible with DRM devices supporting atomic updates"); + return -ENOTSUPP; + } + + ret = drm_connector_init(drm, &sii902x->connector, + &sii902x_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) + return ret; + + if (sii902x->i2c->irq > 0) + sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD; + else + sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT; + + drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder); + + return 0; +} + +static const struct drm_bridge_funcs sii902x_bridge_funcs = { + .attach = sii902x_bridge_attach, + .mode_set = sii902x_bridge_mode_set, + .disable = sii902x_bridge_disable, + .enable = sii902x_bridge_enable, +}; + +static const struct regmap_range sii902x_volatile_ranges[] = { + { .range_min = 0, .range_max = 0xff }, +}; + +static const struct regmap_access_table sii902x_volatile_table = { + .yes_ranges = sii902x_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges), +}; + +static const struct regmap_config sii902x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &sii902x_volatile_table, + .cache_type = REGCACHE_NONE, +}; + +static irqreturn_t sii902x_interrupt(int irq, void *data) +{ + struct sii902x *sii902x = data; + unsigned int status = 0; + + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); + + if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev) + drm_helper_hpd_irq_event(sii902x->bridge.dev); + + return IRQ_HANDLED; +} + +static int sii902x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + unsigned int status = 0; + struct sii902x *sii902x; + u8 chipid[4]; + int ret; + + sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); + if (!sii902x) + return -ENOMEM; + + sii902x->i2c = client; + sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); + if (IS_ERR(sii902x->regmap)) + return PTR_ERR(sii902x->regmap); + + sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(sii902x->reset_gpio)) { + dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", + PTR_ERR(sii902x->reset_gpio)); + return PTR_ERR(sii902x->reset_gpio); + } + + sii902x_reset(sii902x); + + ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0); + if (ret) + return ret; + + ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0), + &chipid, 4); + if (ret) { + dev_err(dev, "regmap_read failed %d\n", ret); + return ret; + } + + if (chipid[0] != 0xb0) { + dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n", + chipid[0]); + return -EINVAL; + } + + /* Clear all pending interrupts */ + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); + + if (client->irq > 0) { + regmap_write(sii902x->regmap, SII902X_INT_ENABLE, + SII902X_HOTPLUG_EVENT); + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + sii902x_interrupt, + IRQF_ONESHOT, dev_name(dev), + sii902x); + if (ret) + return ret; + } + + sii902x->bridge.funcs = &sii902x_bridge_funcs; + sii902x->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&sii902x->bridge); + if (ret) { + dev_err(dev, "Failed to add drm_bridge\n"); + return ret; + } + + i2c_set_clientdata(client, sii902x); + + return 0; +} + +static int sii902x_remove(struct i2c_client *client) + +{ + struct sii902x *sii902x = i2c_get_clientdata(client); + + drm_bridge_remove(&sii902x->bridge); + + return 0; +} + +static const struct of_device_id sii902x_dt_ids[] = { + { .compatible = "sil,sii9022", }, + { } +}; +MODULE_DEVICE_TABLE(of, sii902x_dt_ids); + +static const struct i2c_device_id sii902x_i2c_ids[] = { + { "sii9022", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids); + +static struct i2c_driver sii902x_driver = { + .probe = sii902x_probe, + .remove = sii902x_remove, + .driver = { + .name = "sii902x", + .of_match_table = sii902x_dt_ids, + }, + .id_table = sii902x_i2c_ids, +}; +module_i2c_driver(sii902x_driver); + +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); +MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c new file mode 100644 index 000000000000..a09825d8c94a --- /dev/null +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -0,0 +1,1413 @@ +/* + * tc358767 eDP bridge driver + * + * Copyright (C) 2016 CogentEmbedded Inc + * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com> + * + * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c + * + * Copyright (C) 2012 Texas Instruments + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> + +/* Registers */ + +/* Display Parallel Interface */ +#define DPIPXLFMT 0x0440 +#define VS_POL_ACTIVE_LOW (1 << 10) +#define HS_POL_ACTIVE_LOW (1 << 9) +#define DE_POL_ACTIVE_HIGH (0 << 8) +#define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */ +#define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */ +#define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */ +#define DPI_BPP_RGB888 (0 << 0) +#define DPI_BPP_RGB666 (1 << 0) +#define DPI_BPP_RGB565 (2 << 0) + +/* Video Path */ +#define VPCTRL0 0x0450 +#define OPXLFMT_RGB666 (0 << 8) +#define OPXLFMT_RGB888 (1 << 8) +#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */ +#define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */ +#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */ +#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */ +#define HTIM01 0x0454 +#define HTIM02 0x0458 +#define VTIM01 0x045c +#define VTIM02 0x0460 +#define VFUEN0 0x0464 +#define VFUEN BIT(0) /* Video Frame Timing Upload */ + +/* System */ +#define TC_IDREG 0x0500 +#define SYSCTRL 0x0510 +#define DP0_AUDSRC_NO_INPUT (0 << 3) +#define DP0_AUDSRC_I2S_RX (1 << 3) +#define DP0_VIDSRC_NO_INPUT (0 << 0) +#define DP0_VIDSRC_DSI_RX (1 << 0) +#define DP0_VIDSRC_DPI_RX (2 << 0) +#define DP0_VIDSRC_COLOR_BAR (3 << 0) + +/* Control */ +#define DP0CTL 0x0600 +#define VID_MN_GEN BIT(6) /* Auto-generate M/N values */ +#define EF_EN BIT(5) /* Enable Enhanced Framing */ +#define VID_EN BIT(1) /* Video transmission enable */ +#define DP_EN BIT(0) /* Enable DPTX function */ + +/* Clocks */ +#define DP0_VIDMNGEN0 0x0610 +#define DP0_VIDMNGEN1 0x0614 +#define DP0_VMNGENSTATUS 0x0618 + +/* Main Channel */ +#define DP0_SECSAMPLE 0x0640 +#define DP0_VIDSYNCDELAY 0x0644 +#define DP0_TOTALVAL 0x0648 +#define DP0_STARTVAL 0x064c +#define DP0_ACTIVEVAL 0x0650 +#define DP0_SYNCVAL 0x0654 +#define DP0_MISC 0x0658 +#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ +#define BPC_6 (0 << 5) +#define BPC_8 (1 << 5) + +/* AUX channel */ +#define DP0_AUXCFG0 0x0660 +#define DP0_AUXCFG1 0x0664 +#define AUX_RX_FILTER_EN BIT(16) + +#define DP0_AUXADDR 0x0668 +#define DP0_AUXWDATA(i) (0x066c + (i) * 4) +#define DP0_AUXRDATA(i) (0x067c + (i) * 4) +#define DP0_AUXSTATUS 0x068c +#define AUX_STATUS_MASK 0xf0 +#define AUX_STATUS_SHIFT 4 +#define AUX_TIMEOUT BIT(1) +#define AUX_BUSY BIT(0) +#define DP0_AUXI2CADR 0x0698 + +/* Link Training */ +#define DP0_SRCCTRL 0x06a0 +#define DP0_SRCCTRL_SCRMBLDIS BIT(13) +#define DP0_SRCCTRL_EN810B BIT(12) +#define DP0_SRCCTRL_NOTP (0 << 8) +#define DP0_SRCCTRL_TP1 (1 << 8) +#define DP0_SRCCTRL_TP2 (2 << 8) +#define DP0_SRCCTRL_LANESKEW BIT(7) +#define DP0_SRCCTRL_SSCG BIT(3) +#define DP0_SRCCTRL_LANES_1 (0 << 2) +#define DP0_SRCCTRL_LANES_2 (1 << 2) +#define DP0_SRCCTRL_BW27 (1 << 1) +#define DP0_SRCCTRL_BW162 (0 << 1) +#define DP0_SRCCTRL_AUTOCORRECT BIT(0) +#define DP0_LTSTAT 0x06d0 +#define LT_LOOPDONE BIT(13) +#define LT_STATUS_MASK (0x1f << 8) +#define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4) +#define LT_INTERLANE_ALIGN_DONE BIT(3) +#define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS) +#define DP0_SNKLTCHGREQ 0x06d4 +#define DP0_LTLOOPCTRL 0x06d8 +#define DP0_SNKLTCTRL 0x06e4 + +/* PHY */ +#define DP_PHY_CTRL 0x0800 +#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ +#define BGREN BIT(25) /* AUX PHY BGR Enable */ +#define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */ +#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ +#define PHY_RDY BIT(16) /* PHY Main Channels Ready */ +#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ +#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ +#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ + +/* PLL */ +#define DP0_PLLCTRL 0x0900 +#define DP1_PLLCTRL 0x0904 /* not defined in DS */ +#define PXL_PLLCTRL 0x0908 +#define PLLUPDATE BIT(2) +#define PLLBYP BIT(1) +#define PLLEN BIT(0) +#define PXL_PLLPARAM 0x0914 +#define IN_SEL_REFCLK (0 << 14) +#define SYS_PLLPARAM 0x0918 +#define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */ +#define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */ +#define REF_FREQ_26M (2 << 8) /* 26 MHz */ +#define REF_FREQ_13M (3 << 8) /* 13 MHz */ +#define SYSCLK_SEL_LSCLK (0 << 4) +#define LSCLK_DIV_1 (0 << 0) +#define LSCLK_DIV_2 (1 << 0) + +/* Test & Debug */ +#define TSTCTL 0x0a00 +#define PLL_DBG 0x0a04 + +static bool tc_test_pattern; +module_param_named(test, tc_test_pattern, bool, 0644); + +struct tc_edp_link { + struct drm_dp_link base; + u8 assr; + int scrambler_dis; + int spread; + int coding8b10b; + u8 swing; + u8 preemp; +}; + +struct tc_data { + struct device *dev; + struct regmap *regmap; + struct drm_dp_aux aux; + + struct drm_bridge bridge; + struct drm_connector connector; + struct drm_panel *panel; + + /* link settings */ + struct tc_edp_link link; + + /* display edid */ + struct edid *edid; + /* current mode */ + struct drm_display_mode *mode; + + u32 rev; + u8 assr; + + struct gpio_desc *sd_gpio; + struct gpio_desc *reset_gpio; + struct clk *refclk; +}; + +static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a) +{ + return container_of(a, struct tc_data, aux); +} + +static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) +{ + return container_of(b, struct tc_data, bridge); +} + +static inline struct tc_data *connector_to_tc(struct drm_connector *c) +{ + return container_of(c, struct tc_data, connector); +} + +/* Simple macros to avoid repeated error checks */ +#define tc_write(reg, var) \ + do { \ + ret = regmap_write(tc->regmap, reg, var); \ + if (ret) \ + goto err; \ + } while (0) +#define tc_read(reg, var) \ + do { \ + ret = regmap_read(tc->regmap, reg, var); \ + if (ret) \ + goto err; \ + } while (0) + +static inline int tc_poll_timeout(struct regmap *map, unsigned int addr, + unsigned int cond_mask, + unsigned int cond_value, + unsigned long sleep_us, u64 timeout_us) +{ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); + unsigned int val; + int ret; + + for (;;) { + ret = regmap_read(map, addr, &val); + if (ret) + break; + if ((val & cond_mask) == cond_value) + break; + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { + ret = regmap_read(map, addr, &val); + break; + } + if (sleep_us) + usleep_range((sleep_us >> 2) + 1, sleep_us); + } + return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT); +} + +static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms) +{ + return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0, + 1000, 1000 * timeout_ms); +} + +static int tc_aux_get_status(struct tc_data *tc, u8 *reply) +{ + int ret; + u32 value; + + ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value); + if (ret < 0) + return ret; + if (value & AUX_BUSY) { + if (value & AUX_TIMEOUT) { + dev_err(tc->dev, "i2c access timeout!\n"); + return -ETIMEDOUT; + } + return -EBUSY; + } + + *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT; + return 0; +} + +static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct tc_data *tc = aux_to_tc(aux); + size_t size = min_t(size_t, 8, msg->size); + u8 request = msg->request & ~DP_AUX_I2C_MOT; + u8 *buf = msg->buffer; + u32 tmp = 0; + int i = 0; + int ret; + + if (size == 0) + return 0; + + ret = tc_aux_wait_busy(tc, 100); + if (ret) + goto err; + + if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) { + /* Store data */ + while (i < size) { + if (request == DP_AUX_NATIVE_WRITE) + tmp = tmp | (buf[i] << (8 * (i & 0x3))); + else + tmp = (tmp << 8) | buf[i]; + i++; + if (((i % 4) == 0) || (i == size)) { + tc_write(DP0_AUXWDATA(i >> 2), tmp); + tmp = 0; + } + } + } else if (request != DP_AUX_I2C_READ && + request != DP_AUX_NATIVE_READ) { + return -EINVAL; + } + + /* Store address */ + tc_write(DP0_AUXADDR, msg->address); + /* Start transfer */ + tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request); + + ret = tc_aux_wait_busy(tc, 100); + if (ret) + goto err; + + ret = tc_aux_get_status(tc, &msg->reply); + if (ret) + goto err; + + if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) { + /* Read data */ + while (i < size) { + if ((i % 4) == 0) + tc_read(DP0_AUXRDATA(i >> 2), &tmp); + buf[i] = tmp & 0xff; + tmp = tmp >> 8; + i++; + } + } + + return size; +err: + return ret; +} + +static const char * const training_pattern1_errors[] = { + "No errors", + "Aux write error", + "Aux read error", + "Max voltage reached error", + "Loop counter expired error", + "res", "res", "res" +}; + +static const char * const training_pattern2_errors[] = { + "No errors", + "Aux write error", + "Aux read error", + "Clock recovery failed error", + "Loop counter expired error", + "res", "res", "res" +}; + +static u32 tc_srcctrl(struct tc_data *tc) +{ + /* + * No training pattern, skew lane 1 data by two LSCLK cycles with + * respect to lane 0 data, AutoCorrect Mode = 0 + */ + u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW; + + if (tc->link.scrambler_dis) + reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */ + if (tc->link.coding8b10b) + /* Enable 8/10B Encoder (TxData[19:16] not used) */ + reg |= DP0_SRCCTRL_EN810B; + if (tc->link.spread) + reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */ + if (tc->link.base.num_lanes == 2) + reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */ + if (tc->link.base.rate != 162000) + reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */ + return reg; +} + +static void tc_wait_pll_lock(struct tc_data *tc) +{ + /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */ + usleep_range(3000, 6000); +} + +static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock) +{ + int ret; + int i_pre, best_pre = 1; + int i_post, best_post = 1; + int div, best_div = 1; + int mul, best_mul = 1; + int delta, best_delta; + int ext_div[] = {1, 2, 3, 5, 7}; + int best_pixelclock = 0; + int vco_hi = 0; + + dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock, + refclk); + best_delta = pixelclock; + /* Loop over all possible ext_divs, skipping invalid configurations */ + for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) { + /* + * refclk / ext_pre_div should be in the 1 to 200 MHz range. + * We don't allow any refclk > 200 MHz, only check lower bounds. + */ + if (refclk / ext_div[i_pre] < 1000000) + continue; + for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) { + for (div = 1; div <= 16; div++) { + u32 clk; + u64 tmp; + + tmp = pixelclock * ext_div[i_pre] * + ext_div[i_post] * div; + do_div(tmp, refclk); + mul = tmp; + + /* Check limits */ + if ((mul < 1) || (mul > 128)) + continue; + + clk = (refclk / ext_div[i_pre] / div) * mul; + /* + * refclk * mul / (ext_pre_div * pre_div) + * should be in the 150 to 650 MHz range + */ + if ((clk > 650000000) || (clk < 150000000)) + continue; + + clk = clk / ext_div[i_post]; + delta = clk - pixelclock; + + if (abs(delta) < abs(best_delta)) { + best_pre = i_pre; + best_post = i_post; + best_div = div; + best_mul = mul; + best_delta = delta; + best_pixelclock = clk; + } + } + } + } + if (best_pixelclock == 0) { + dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n", + pixelclock); + return -EINVAL; + } + + dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock, + best_delta); + dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk, + ext_div[best_pre], best_div, best_mul, ext_div[best_post]); + + /* if VCO >= 300 MHz */ + if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000) + vco_hi = 1; + /* see DS */ + if (best_div == 16) + best_div = 0; + if (best_mul == 128) + best_mul = 0; + + /* Power up PLL and switch to bypass */ + tc_write(PXL_PLLCTRL, PLLBYP | PLLEN); + + tc_write(PXL_PLLPARAM, + (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */ + (ext_div[best_pre] << 20) | /* External Pre-divider */ + (ext_div[best_post] << 16) | /* External Post-divider */ + IN_SEL_REFCLK | /* Use RefClk as PLL input */ + (best_div << 8) | /* Divider for PLL RefClk */ + (best_mul << 0)); /* Multiplier for PLL */ + + /* Force PLL parameter update and disable bypass */ + tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN); + + tc_wait_pll_lock(tc); + + return 0; +err: + return ret; +} + +static int tc_pxl_pll_dis(struct tc_data *tc) +{ + /* Enable PLL bypass, power down PLL */ + return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP); +} + +static int tc_stream_clock_calc(struct tc_data *tc) +{ + int ret; + /* + * If the Stream clock and Link Symbol clock are + * asynchronous with each other, the value of M changes over + * time. This way of generating link clock and stream + * clock is called Asynchronous Clock mode. The value M + * must change while the value N stays constant. The + * value of N in this Asynchronous Clock mode must be set + * to 2^15 or 32,768. + * + * LSCLK = 1/10 of high speed link clock + * + * f_STRMCLK = M/N * f_LSCLK + * M/N = f_STRMCLK / f_LSCLK + * + */ + tc_write(DP0_VIDMNGEN1, 32768); + + return 0; +err: + return ret; +} + +static int tc_aux_link_setup(struct tc_data *tc) +{ + unsigned long rate; + u32 value; + int ret; + + rate = clk_get_rate(tc->refclk); + switch (rate) { + case 38400000: + value = REF_FREQ_38M4; + break; + case 26000000: + value = REF_FREQ_26M; + break; + case 19200000: + value = REF_FREQ_19M2; + break; + case 13000000: + value = REF_FREQ_13M; + break; + default: + dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate); + return -EINVAL; + } + + /* Setup DP-PHY / PLL */ + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; + tc_write(SYS_PLLPARAM, value); + + tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); + + /* + * Initially PLLs are in bypass. Force PLL parameter update, + * disable PLL bypass, enable PLL + */ + tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN); + tc_wait_pll_lock(tc); + + tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN); + tc_wait_pll_lock(tc); + + ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, + 1000); + if (ret == -ETIMEDOUT) { + dev_err(tc->dev, "Timeout waiting for PHY to become ready"); + return ret; + } else if (ret) + goto err; + + /* Setup AUX link */ + tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN | + (0x06 << 8) | /* Aux Bit Period Calculator Threshold */ + (0x3f << 0)); /* Aux Response Timeout Timer */ + + return 0; +err: + dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret); + return ret; +} + +static int tc_get_display_props(struct tc_data *tc) +{ + int ret; + /* temp buffer */ + u8 tmp[8]; + + /* Read DP Rx Link Capability */ + ret = drm_dp_link_probe(&tc->aux, &tc->link.base); + if (ret < 0) + goto err_dpcd_read; + if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) + goto err_dpcd_inval; + + ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); + if (ret < 0) + goto err_dpcd_read; + tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */ + + ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp); + if (ret < 0) + goto err_dpcd_read; + tc->link.coding8b10b = tmp[0] & BIT(0); + tc->link.scrambler_dis = 0; + /* read assr */ + ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp); + if (ret < 0) + goto err_dpcd_read; + tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; + + dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n", + tc->link.base.revision >> 4, tc->link.base.revision & 0x0f, + (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps", + tc->link.base.num_lanes, + (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? + "enhanced" : "non-enhanced"); + dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b); + dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n", + tc->link.assr, tc->assr); + + return 0; + +err_dpcd_read: + dev_err(tc->dev, "failed to read DPCD: %d\n", ret); + return ret; +err_dpcd_inval: + dev_err(tc->dev, "invalid DPCD\n"); + return -EINVAL; +} + +static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) +{ + int ret; + int vid_sync_dly; + int max_tu_symbol; + + int left_margin = mode->htotal - mode->hsync_end; + int right_margin = mode->hsync_start - mode->hdisplay; + int hsync_len = mode->hsync_end - mode->hsync_start; + int upper_margin = mode->vtotal - mode->vsync_end; + int lower_margin = mode->vsync_start - mode->vdisplay; + int vsync_len = mode->vsync_end - mode->vsync_start; + + dev_dbg(tc->dev, "set mode %dx%d\n", + mode->hdisplay, mode->vdisplay); + dev_dbg(tc->dev, "H margin %d,%d sync %d\n", + left_margin, right_margin, hsync_len); + dev_dbg(tc->dev, "V margin %d,%d sync %d\n", + upper_margin, lower_margin, vsync_len); + dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); + + + /* LCD Ctl Frame Size */ + tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | + OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); + tc_write(HTIM01, (left_margin << 16) | /* H back porch */ + (hsync_len << 0)); /* Hsync */ + tc_write(HTIM02, (right_margin << 16) | /* H front porch */ + (mode->hdisplay << 0)); /* width */ + tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ + (vsync_len << 0)); /* Vsync */ + tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ + (mode->vdisplay << 0)); /* height */ + tc_write(VFUEN0, VFUEN); /* update settings */ + + /* Test pattern settings */ + tc_write(TSTCTL, + (120 << 24) | /* Red Color component value */ + (20 << 16) | /* Green Color component value */ + (99 << 8) | /* Blue Color component value */ + (1 << 4) | /* Enable I2C Filter */ + (2 << 0) | /* Color bar Mode */ + 0); + + /* DP Main Stream Attributes */ + vid_sync_dly = hsync_len + left_margin + mode->hdisplay; + tc_write(DP0_VIDSYNCDELAY, + (0x003e << 16) | /* thresh_dly */ + (vid_sync_dly << 0)); + + tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); + + tc_write(DP0_STARTVAL, + ((upper_margin + vsync_len) << 16) | + ((left_margin + hsync_len) << 0)); + + tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); + + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); + + tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | + DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); + + /* + * Recommended maximum number of symbols transferred in a transfer unit: + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, + * (output active video bandwidth in bytes)) + * Must be less than tu_size. + */ + max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); + + return 0; +err: + return ret; +} + +static int tc_link_training(struct tc_data *tc, int pattern) +{ + const char * const *errors; + u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | + DP0_SRCCTRL_AUTOCORRECT; + int timeout; + int retry; + u32 value; + int ret; + + if (pattern == DP_TRAINING_PATTERN_1) { + srcctrl |= DP0_SRCCTRL_TP1; + errors = training_pattern1_errors; + } else { + srcctrl |= DP0_SRCCTRL_TP2; + errors = training_pattern2_errors; + } + + /* Set DPCD 0x102 for Training Part 1 or 2 */ + tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern); + + tc_write(DP0_LTLOOPCTRL, + (0x0f << 28) | /* Defer Iteration Count */ + (0x0f << 24) | /* Loop Iteration Count */ + (0x0d << 0)); /* Loop Timer Delay */ + + retry = 5; + do { + /* Set DP0 Training Pattern */ + tc_write(DP0_SRCCTRL, srcctrl); + + /* Enable DP0 to start Link Training */ + tc_write(DP0CTL, DP_EN); + + /* wait */ + timeout = 1000; + do { + tc_read(DP0_LTSTAT, &value); + udelay(1); + } while ((!(value & LT_LOOPDONE)) && (--timeout)); + if (timeout == 0) { + dev_err(tc->dev, "Link training timeout!\n"); + } else { + int pattern = (value >> 11) & 0x3; + int error = (value >> 8) & 0x7; + + dev_dbg(tc->dev, + "Link training phase %d done after %d uS: %s\n", + pattern, 1000 - timeout, errors[error]); + if (pattern == DP_TRAINING_PATTERN_1 && error == 0) + break; + if (pattern == DP_TRAINING_PATTERN_2) { + value &= LT_CHANNEL1_EQ_BITS | + LT_INTERLANE_ALIGN_DONE | + LT_CHANNEL0_EQ_BITS; + /* in case of two lanes */ + if ((tc->link.base.num_lanes == 2) && + (value == (LT_CHANNEL1_EQ_BITS | + LT_INTERLANE_ALIGN_DONE | + LT_CHANNEL0_EQ_BITS))) + break; + /* in case of one line */ + if ((tc->link.base.num_lanes == 1) && + (value == (LT_INTERLANE_ALIGN_DONE | + LT_CHANNEL0_EQ_BITS))) + break; + } + } + /* restart */ + tc_write(DP0CTL, 0); + usleep_range(10, 20); + } while (--retry); + if (retry == 0) { + dev_err(tc->dev, "Failed to finish training phase %d\n", + pattern); + } + + return 0; +err: + return ret; +} + +static int tc_main_link_setup(struct tc_data *tc) +{ + struct drm_dp_aux *aux = &tc->aux; + struct device *dev = tc->dev; + unsigned int rate; + u32 dp_phy_ctrl; + int timeout; + bool aligned; + bool ready; + u32 value; + int ret; + u8 tmp[8]; + + /* display mode should be set at this point */ + if (!tc->mode) + return -EINVAL; + + /* from excel file - DP0_SrcCtrl */ + tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | + DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | + DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); + /* from excel file - DP1_SrcCtrl */ + tc_write(0x07a0, 0x00003083); + + rate = clk_get_rate(tc->refclk); + switch (rate) { + case 38400000: + value = REF_FREQ_38M4; + break; + case 26000000: + value = REF_FREQ_26M; + break; + case 19200000: + value = REF_FREQ_19M2; + break; + case 13000000: + value = REF_FREQ_13M; + break; + default: + return -EINVAL; + } + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; + tc_write(SYS_PLLPARAM, value); + /* Setup Main Link */ + dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; + tc_write(DP_PHY_CTRL, dp_phy_ctrl); + msleep(100); + + /* PLL setup */ + tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN); + tc_wait_pll_lock(tc); + + tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN); + tc_wait_pll_lock(tc); + + /* PXL PLL setup */ + if (tc_test_pattern) { + ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk), + 1000 * tc->mode->clock); + if (ret) + goto err; + } + + /* Reset/Enable Main Links */ + dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST; + tc_write(DP_PHY_CTRL, dp_phy_ctrl); + usleep_range(100, 200); + dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST); + tc_write(DP_PHY_CTRL, dp_phy_ctrl); + + timeout = 1000; + do { + tc_read(DP_PHY_CTRL, &value); + udelay(1); + } while ((!(value & PHY_RDY)) && (--timeout)); + + if (timeout == 0) { + dev_err(dev, "timeout waiting for phy become ready"); + return -ETIMEDOUT; + } + + /* Set misc: 8 bits per color */ + ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8); + if (ret) + goto err; + + /* + * ASSR mode + * on TC358767 side ASSR configured through strap pin + * seems there is no way to change this setting from SW + * + * check is tc configured for same mode + */ + if (tc->assr != tc->link.assr) { + dev_dbg(dev, "Trying to set display to ASSR: %d\n", + tc->assr); + /* try to set ASSR on display side */ + tmp[0] = tc->assr; + ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]); + if (ret < 0) + goto err_dpcd_read; + /* read back */ + ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp); + if (ret < 0) + goto err_dpcd_read; + + if (tmp[0] != tc->assr) { + dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n", + tc->assr); + /* trying with disabled scrambler */ + tc->link.scrambler_dis = 1; + } + } + + /* Setup Link & DPRx Config for Training */ + ret = drm_dp_link_configure(aux, &tc->link.base); + if (ret < 0) + goto err_dpcd_write; + + /* DOWNSPREAD_CTRL */ + tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00; + /* MAIN_LINK_CHANNEL_CODING_SET */ + tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00; + ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2); + if (ret < 0) + goto err_dpcd_write; + + ret = tc_link_training(tc, DP_TRAINING_PATTERN_1); + if (ret) + goto err; + + ret = tc_link_training(tc, DP_TRAINING_PATTERN_2); + if (ret) + goto err; + + /* Clear DPCD 0x102 */ + /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */ + tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00; + ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]); + if (ret < 0) + goto err_dpcd_write; + + /* Clear Training Pattern, set AutoCorrect Mode = 1 */ + tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT); + + /* Wait */ + timeout = 100; + do { + udelay(1); + /* Read DPCD 0x202-0x207 */ + ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); + if (ret < 0) + goto err_dpcd_read; + ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ + DP_CHANNEL_EQ_BITS)); /* Lane0 */ + aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; + } while ((--timeout) && !(ready && aligned)); + + if (timeout == 0) { + /* Read DPCD 0x200-0x201 */ + ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); + if (ret < 0) + goto err_dpcd_read; + dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); + dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", + tmp[1]); + dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]); + dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n", + tmp[4]); + dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]); + dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", + tmp[6]); + + if (!ready) + dev_err(dev, "Lane0/1 not ready\n"); + if (!aligned) + dev_err(dev, "Lane0/1 not aligned\n"); + return -EAGAIN; + } + + ret = tc_set_video_mode(tc, tc->mode); + if (ret) + goto err; + + /* Set M/N */ + ret = tc_stream_clock_calc(tc); + if (ret) + goto err; + + return 0; +err_dpcd_read: + dev_err(tc->dev, "Failed to read DPCD: %d\n", ret); + return ret; +err_dpcd_write: + dev_err(tc->dev, "Failed to write DPCD: %d\n", ret); +err: + return ret; +} + +static int tc_main_link_stream(struct tc_data *tc, int state) +{ + int ret; + u32 value; + + dev_dbg(tc->dev, "stream: %d\n", state); + + if (state) { + value = VID_MN_GEN | DP_EN; + if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + value |= EF_EN; + tc_write(DP0CTL, value); + /* + * VID_EN assertion should be delayed by at least N * LSCLK + * cycles from the time VID_MN_GEN is enabled in order to + * generate stable values for VID_M. LSCLK is 270 MHz or + * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(), + * so a delay of at least 203 us should suffice. + */ + usleep_range(500, 1000); + value |= VID_EN; + tc_write(DP0CTL, value); + /* Set input interface */ + value = DP0_AUDSRC_NO_INPUT; + if (tc_test_pattern) + value |= DP0_VIDSRC_COLOR_BAR; + else + value |= DP0_VIDSRC_DPI_RX; + tc_write(SYSCTRL, value); + } else { + tc_write(DP0CTL, 0); + } + + return 0; +err: + return ret; +} + +static enum drm_connector_status +tc_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void tc_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct tc_data *tc = bridge_to_tc(bridge); + + drm_panel_prepare(tc->panel); +} + +static void tc_bridge_enable(struct drm_bridge *bridge) +{ + struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + ret = tc_main_link_setup(tc); + if (ret < 0) { + dev_err(tc->dev, "main link setup error: %d\n", ret); + return; + } + + ret = tc_main_link_stream(tc, 1); + if (ret < 0) { + dev_err(tc->dev, "main link stream start error: %d\n", ret); + return; + } + + drm_panel_enable(tc->panel); +} + +static void tc_bridge_disable(struct drm_bridge *bridge) +{ + struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + drm_panel_disable(tc->panel); + + ret = tc_main_link_stream(tc, 0); + if (ret < 0) + dev_err(tc->dev, "main link stream stop error: %d\n", ret); +} + +static void tc_bridge_post_disable(struct drm_bridge *bridge) +{ + struct tc_data *tc = bridge_to_tc(bridge); + + drm_panel_unprepare(tc->panel); +} + +static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adj) +{ + /* Fixup sync polarities, both hsync and vsync are active low */ + adj->flags = mode->flags; + adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); + adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + + return true; +} + +static int tc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* Accept any mode */ + return MODE_OK; +} + +static void tc_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adj) +{ + struct tc_data *tc = bridge_to_tc(bridge); + + tc->mode = mode; +} + +static int tc_connector_get_modes(struct drm_connector *connector) +{ + struct tc_data *tc = connector_to_tc(connector); + struct edid *edid; + unsigned int count; + + if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { + count = tc->panel->funcs->get_modes(tc->panel); + if (count > 0) + return count; + } + + edid = drm_get_edid(connector, &tc->aux.ddc); + + kfree(tc->edid); + tc->edid = edid; + if (!edid) + return 0; + + drm_mode_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + + return count; +} + +static void tc_connector_set_polling(struct tc_data *tc, + struct drm_connector *connector) +{ + /* TODO: add support for HPD */ + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; +} + +static struct drm_encoder * +tc_connector_best_encoder(struct drm_connector *connector) +{ + struct tc_data *tc = connector_to_tc(connector); + + return tc->bridge.encoder; +} + +static const struct drm_connector_helper_funcs tc_connector_helper_funcs = { + .get_modes = tc_connector_get_modes, + .mode_valid = tc_connector_mode_valid, + .best_encoder = tc_connector_best_encoder, +}; + +static void tc_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs tc_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = tc_connector_detect, + .destroy = tc_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int tc_bridge_attach(struct drm_bridge *bridge) +{ + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + struct tc_data *tc = bridge_to_tc(bridge); + struct drm_device *drm = bridge->dev; + int ret; + + /* Create eDP connector */ + drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); + ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + if (ret) + return ret; + + if (tc->panel) + drm_panel_attach(tc->panel, &tc->connector); + + drm_display_info_set_bus_formats(&tc->connector.display_info, + &bus_format, 1); + drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder); + + return 0; +} + +static const struct drm_bridge_funcs tc_bridge_funcs = { + .attach = tc_bridge_attach, + .mode_set = tc_bridge_mode_set, + .pre_enable = tc_bridge_pre_enable, + .enable = tc_bridge_enable, + .disable = tc_bridge_disable, + .post_disable = tc_bridge_post_disable, + .mode_fixup = tc_bridge_mode_fixup, +}; + +static bool tc_readable_reg(struct device *dev, unsigned int reg) +{ + return reg != SYSCTRL; +} + +static const struct regmap_range tc_volatile_ranges[] = { + regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS), + regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ), + regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL), + regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL), + regmap_reg_range(VFUEN0, VFUEN0), +}; + +static const struct regmap_access_table tc_volatile_table = { + .yes_ranges = tc_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges), +}; + +static bool tc_writeable_reg(struct device *dev, unsigned int reg) +{ + return (reg != TC_IDREG) && + (reg != DP0_LTSTAT) && + (reg != DP0_SNKLTCHGREQ); +} + +static const struct regmap_config tc_regmap_config = { + .name = "tc358767", + .reg_bits = 16, + .val_bits = 32, + .reg_stride = 4, + .max_register = PLL_DBG, + .cache_type = REGCACHE_RBTREE, + .readable_reg = tc_readable_reg, + .volatile_table = &tc_volatile_table, + .writeable_reg = tc_writeable_reg, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *ep; + struct tc_data *tc; + int ret; + + tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->dev = dev; + + /* port@2 is the output port */ + ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + if (!remote) { + dev_warn(dev, "endpoint %s not connected\n", + ep->full_name); + of_node_put(ep); + return -ENODEV; + } + of_node_put(ep); + tc->panel = of_drm_find_panel(remote); + if (tc->panel) { + dev_dbg(dev, "found panel %s\n", remote->full_name); + } else { + dev_dbg(dev, "waiting for panel %s\n", + remote->full_name); + of_node_put(remote); + return -EPROBE_DEFER; + } + of_node_put(remote); + } + + /* Shut down GPIO is optional */ + tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH); + if (IS_ERR(tc->sd_gpio)) + return PTR_ERR(tc->sd_gpio); + + if (tc->sd_gpio) { + gpiod_set_value_cansleep(tc->sd_gpio, 0); + usleep_range(5000, 10000); + } + + /* Reset GPIO is optional */ + tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(tc->reset_gpio)) + return PTR_ERR(tc->reset_gpio); + + if (tc->reset_gpio) { + gpiod_set_value_cansleep(tc->reset_gpio, 1); + usleep_range(5000, 10000); + } + + tc->refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(tc->refclk)) { + ret = PTR_ERR(tc->refclk); + dev_err(dev, "Failed to get refclk: %d\n", ret); + return ret; + } + + tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config); + if (IS_ERR(tc->regmap)) { + ret = PTR_ERR(tc->regmap); + dev_err(dev, "Failed to initialize regmap: %d\n", ret); + return ret; + } + + ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev); + if (ret) { + dev_err(tc->dev, "can not read device ID: %d\n", ret); + return ret; + } + + if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) { + dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev); + return -EINVAL; + } + + tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */ + + ret = tc_aux_link_setup(tc); + if (ret) + return ret; + + /* Register DP AUX channel */ + tc->aux.name = "TC358767 AUX i2c adapter"; + tc->aux.dev = tc->dev; + tc->aux.transfer = tc_aux_transfer; + ret = drm_dp_aux_register(&tc->aux); + if (ret) + return ret; + + ret = tc_get_display_props(tc); + if (ret) + goto err_unregister_aux; + + tc_connector_set_polling(tc, &tc->connector); + + tc->bridge.funcs = &tc_bridge_funcs; + tc->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&tc->bridge); + if (ret) { + dev_err(dev, "Failed to add drm_bridge: %d\n", ret); + goto err_unregister_aux; + } + + i2c_set_clientdata(client, tc); + + return 0; +err_unregister_aux: + drm_dp_aux_unregister(&tc->aux); + return ret; +} + +static int tc_remove(struct i2c_client *client) +{ + struct tc_data *tc = i2c_get_clientdata(client); + + drm_bridge_remove(&tc->bridge); + drm_dp_aux_unregister(&tc->aux); + + tc_pxl_pll_dis(tc); + + return 0; +} + +static const struct i2c_device_id tc358767_i2c_ids[] = { + { "tc358767", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids); + +static const struct of_device_id tc358767_of_ids[] = { + { .compatible = "toshiba,tc358767", }, + { } +}; +MODULE_DEVICE_TABLE(of, tc358767_of_ids); + +static struct i2c_driver tc358767_driver = { + .driver = { + .name = "tc358767", + .of_match_table = tc358767_of_ids, + }, + .id_table = tc358767_i2c_ids, + .probe = tc_probe, + .remove = tc_remove, +}; +module_i2c_driver(tc358767_driver); + +MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>"); +MODULE_DESCRIPTION("tc358767 eDP encoder driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index 9864559e5fb9..04b3c161dfae 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig @@ -1,11 +1,7 @@ config DRM_CIRRUS_QEMU tristate "Cirrus driver for QEMU emulated device" depends on DRM && PCI - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM help This is a KMS driver for emulated cirrus device in qemu. diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index dc83f69da6f1..b05f7eae32ce 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -142,7 +142,7 @@ static struct drm_driver driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = cirrus_gem_free_object, + .gem_free_object_unlocked = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 32d32c5b7b17..80446e2d3ab6 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -17,8 +17,8 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb); - if (cirrus_fb->obj) - drm_gem_object_unreference_unlocked(cirrus_fb->obj); + + drm_gem_object_unreference_unlocked(cirrus_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index d3d8d7bfcc57..17c915d9a03e 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); int i; - if (size != CIRRUS_LUT_SIZE) - return; - - for (i = 0; i < CIRRUS_LUT_SIZE; i++) { + for (i = 0; i < size; i++) { cirrus_crtc->lut_r[i] = red[i]; cirrus_crtc->lut_g[i] = green[i]; cirrus_crtc->lut_b[i] = blue[i]; } cirrus_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 6768b7b1af32..1cc9ee607128 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int cirrus_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void cirrus_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = { .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, .init_mem_type = cirrus_bo_init_mem_type, .evict_flags = cirrus_bo_evict_flags, - .move = cirrus_bo_move, + .move = NULL, .verify_access = cirrus_bo_verify_access, .io_mem_reserve = &cirrus_ttm_io_mem_reserve, .io_mem_free = &cirrus_ttm_io_mem_free, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 9bb99e274d23..8d2f111fa113 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -33,6 +33,20 @@ #include "drm_crtc_internal.h" +static void crtc_commit_free(struct kref *kref) +{ + struct drm_crtc_commit *commit = + container_of(kref, struct drm_crtc_commit, ref); + + kfree(commit); +} + +void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, crtc_commit_free); +} +EXPORT_SYMBOL(drm_crtc_commit_put); + /** * drm_atomic_state_default_release - * release memory initialized by drm_atomic_state_init @@ -44,11 +58,8 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) { kfree(state->connectors); - kfree(state->connector_states); kfree(state->crtcs); - kfree(state->crtc_states); kfree(state->planes); - kfree(state->plane_states); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->crtcs), GFP_KERNEL); if (!state->crtcs) goto fail; - state->crtc_states = kcalloc(dev->mode_config.num_crtc, - sizeof(*state->crtc_states), GFP_KERNEL); - if (!state->crtc_states) - goto fail; state->planes = kcalloc(dev->mode_config.num_total_plane, sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; - state->plane_states = kcalloc(dev->mode_config.num_total_plane, - sizeof(*state->plane_states), GFP_KERNEL); - if (!state->plane_states) - goto fail; state->dev = dev; @@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; + struct drm_connector *connector = state->connectors[i].ptr; if (!connector) continue; connector->funcs->atomic_destroy_state(connector, - state->connector_states[i]); - state->connectors[i] = NULL; - state->connector_states[i] = NULL; + state->connectors[i].state); + state->connectors[i].ptr = NULL; + state->connectors[i].state = NULL; drm_connector_unreference(connector); } for (i = 0; i < config->num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; + struct drm_crtc *crtc = state->crtcs[i].ptr; if (!crtc) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtc_states[i]); - state->crtcs[i] = NULL; - state->crtc_states[i] = NULL; + state->crtcs[i].state); + + if (state->crtcs[i].commit) { + kfree(state->crtcs[i].commit->event); + state->crtcs[i].commit->event = NULL; + drm_crtc_commit_put(state->crtcs[i].commit); + } + + state->crtcs[i].commit = NULL; + state->crtcs[i].ptr = NULL; + state->crtcs[i].state = NULL; } for (i = 0; i < config->num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + struct drm_plane *plane = state->planes[i].ptr; if (!plane) continue; plane->funcs->atomic_destroy_state(plane, - state->plane_states[i]); - state->planes[i] = NULL; - state->plane_states[i] = NULL; + state->planes[i].state); + state->planes[i].ptr = NULL; + state->planes[i].state = NULL; } } EXPORT_SYMBOL(drm_atomic_state_default_clear); @@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtc_states[index] = crtc_state; - state->crtcs[index] = crtc; + state->crtcs[index].state = crtc_state; + state->crtcs[index].ptr = crtc; crtc_state->state = state; DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", @@ -393,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob, if (old_blob == new_blob) return; - if (old_blob) - drm_property_unreference_blob(old_blob); + drm_property_unreference_blob(old_blob); if (new_blob) drm_property_reference_blob(new_blob); *blob = new_blob; @@ -632,8 +642,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->plane_states[index] = plane_state; - state->planes[index] = plane; + state->planes[index].state = plane_state; + state->planes[index].ptr = plane; plane_state->state = state; DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", @@ -897,8 +907,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, index = drm_connector_index(connector); if (index >= state->num_connector) { - struct drm_connector **c; - struct drm_connector_state **cs; + struct __drm_connnectors_state *c; int alloc = max(index + 1, config->num_connector); c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); @@ -909,26 +918,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, memset(&state->connectors[state->num_connector], 0, sizeof(*state->connectors) * (alloc - state->num_connector)); - cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL); - if (!cs) - return ERR_PTR(-ENOMEM); - - state->connector_states = cs; - memset(&state->connector_states[state->num_connector], 0, - sizeof(*state->connector_states) * (alloc - state->num_connector)); state->num_connector = alloc; } - if (state->connector_states[index]) - return state->connector_states[index]; + if (state->connectors[index].state) + return state->connectors[index].state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_reference(connector); - state->connector_states[index] = connector_state; - state->connectors[index] = connector; + state->connectors[index].state = connector_state; + state->connectors[index].ptr = connector; connector_state->state = state; DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", @@ -1457,7 +1459,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); */ static struct drm_pending_vblank_event *create_vblank_event( - struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) + struct drm_device *dev, struct drm_file *file_priv, + struct fence *fence, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; int ret; @@ -1470,12 +1473,17 @@ static struct drm_pending_vblank_event *create_vblank_event( e->event.base.length = sizeof(e->event); e->event.user_data = user_data; - ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); - if (ret) { - kfree(e); - return NULL; + if (file_priv) { + ret = drm_event_reserve_init(dev, file_priv, &e->base, + &e->event.base); + if (ret) { + kfree(e); + return NULL; + } } + e->base.fence = fence; + return e; } @@ -1715,7 +1723,8 @@ retry: for_each_crtc_in_state(state, crtc, crtc_state, i) { struct drm_pending_vblank_event *e; - e = create_vblank_event(dev, file_priv, arg->user_data); + e = create_vblank_event(dev, file_priv, NULL, + arg->user_data); if (!e) { ret = -ENOMEM; goto out; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ddfa0d120e39..de7fddce3cef 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, conn_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (new_encoder) { if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { @@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, connector_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; + if (!crtc_state->enable) + continue; + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) continue; @@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state) * times for the same update, e.g. when the ->atomic_check functions depend upon * the adjusted dotclock for fifo space allocation and watermark computation. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * It also sets crtc_state->planes_changed to indicate that a crtc has * updated planes. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(crtc, state->crtc_states[i]); + ret = funcs->atomic_check(crtc, crtc_state); if (ret) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name); @@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes); * ->atomic_check functions depend upon an updated adjusted_mode.clock to * e.g. properly compute watermarks. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int drm_atomic_helper_check(struct drm_device *dev, @@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); /** - * drm_atomic_helper_commit - commit validated state object - * @dev: DRM device - * @state: the driver state object - * @nonblocking: whether nonblocking behavior is requested. + * drm_atomic_helper_commit_tail - commit atomic update to hardware + * @state: new modeset state to be committed * - * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement nonblocking commits. + * This is the default implemenation for the ->atomic_commit_tail() hook of the + * &drm_mode_config_helper_funcs vtable. * - * Note that right now this function does not support nonblocking commits, hence - * driver writers must implement their own version for now. Also note that the - * default ordering of how the various stages are called is to match the legacy - * modeset helper library closest. One peculiarity of that is that it doesn't - * mesh well with runtime PM at all. + * Note that the default ordering of how the various stages are called is to + * match the legacy modeset helper library closest. One peculiarity of that is + * that it doesn't mesh well with runtime PM at all. * - * For drivers supporting runtime PM the recommended sequence is + * For drivers supporting runtime PM the recommended sequence is instead :: * * drm_atomic_helper_commit_modeset_disables(dev, state); * @@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * * drm_atomic_helper_commit_planes(dev, state, true); * - * See the kerneldoc entries for these three functions for more details. + * for committing the atomic update to hardware. See the kerneldoc entries for + * these three functions for more details. + */ +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, false); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_tail); + +static void commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config_helper_funcs *funcs; + + funcs = dev->mode_config.helper_private; + + drm_atomic_helper_wait_for_fences(dev, state); + + drm_atomic_helper_wait_for_dependencies(state); + + if (funcs && funcs->atomic_commit_tail) + funcs->atomic_commit_tail(state); + else + drm_atomic_helper_commit_tail(state); + + drm_atomic_helper_commit_cleanup_done(state); + + drm_atomic_state_free(state); +} + +static void commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + commit_tail(state); +} + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @nonblock: whether nonblocking behavior is requested. + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. This + * function implements nonblocking commits, using + * drm_atomic_helper_setup_commit() and related functions. + * + * Note that right now this function does not support nonblocking commits, hence + * driver writers must implement their own version for now. + * + * Committing the actual hardware state is done through the + * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable, + * or it's default implementation drm_atomic_helper_commit_tail(). * - * RETURNS + * RETURNS: * Zero for success or -errno. */ int drm_atomic_helper_commit(struct drm_device *dev, @@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev, { int ret; - if (nonblock) - return -EBUSY; + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, commit_work); ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab @@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev, * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. + * + * NOTE: Commit work has multiple phases, first hardware commit, then + * cleanup. We want them to overlap, hence need system_unbound_wq to + * make sure work items don't artifically stall on each another. */ - drm_atomic_helper_wait_for_fences(dev, state); - - drm_atomic_helper_commit_modeset_disables(dev, state); - - drm_atomic_helper_commit_planes(dev, state, false); - - drm_atomic_helper_commit_modeset_enables(dev, state); - - drm_atomic_helper_wait_for_vblanks(dev, state); - - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + commit_tail(state); return 0; } @@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); /** * DOC: implementing nonblocking commit * - * For now the atomic helpers don't support nonblocking commit directly. If - * there is real need it could be added though, using the dma-buf fence - * infrastructure for generic synchronization with outstanding rendering. - * - * For now drivers have to implement nonblocking commit themselves, with the - * following sequence being the recommended one: + * Nonblocking atomic commits have to be implemented in the following sequence: * * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function * which commit needs to call which can fail, so we want to run it first and @@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * cancelled updates. Note that it is important to ensure that the framebuffer * cleanup is still done when cancelling. * - * For sufficient parallelism it is recommended to have a work item per crtc - * (for updates which don't touch global state) and a global one. Then we only - * need to synchronize with the crtc work items for changed crtcs and the global - * work item, which allows nice concurrent updates on disjoint sets of crtcs. + * Asynchronous workers need to have sufficient parallelism to be able to run + * different atomic commits on different CRTCs in parallel. The simplest way to + * achive this is by running them on the &system_unbound_wq work queue. Note + * that drivers are not required to split up atomic commits and run an + * individual commit in parallel - userspace is supposed to do that if it cares. + * But it might be beneficial to do that for modesets, since those necessarily + * must be done as one global operation, and enabling or disabling a CRTC can + * take a long time. But even that is not required. * * 3. The software state is updated synchronously with * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset @@ -1232,8 +1297,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and * then cleaning up the framebuffers after the old framebuffer is no longer * being displayed. + * + * The above scheme is implemented in the atomic helper libraries in + * drm_atomic_helper_commit() using a bunch of helper functions. See + * drm_atomic_helper_setup_commit() for a starting point. */ +static int stall_checks(struct drm_crtc *crtc, bool nonblock) +{ + struct drm_crtc_commit *commit, *stall_commit = NULL; + bool completed = true; + int i; + long ret = 0; + + spin_lock(&crtc->commit_lock); + i = 0; + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + if (i == 0) { + completed = try_wait_for_completion(&commit->flip_done); + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (!completed && nonblock) { + spin_unlock(&crtc->commit_lock); + return -EBUSY; + } + } else if (i == 1) { + stall_commit = commit; + drm_crtc_commit_get(stall_commit); + break; + } + + i++; + } + spin_unlock(&crtc->commit_lock); + + if (!stall_commit) + return 0; + + /* We don't want to let commits get ahead of cleanup work too much, + * stalling on 2nd previous commit means triple-buffer won't ever stall. + */ + ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(stall_commit); + + return ret < 0 ? ret : 0; +} + +/** + * drm_atomic_helper_setup_commit - setup possibly nonblocking commit + * @state: new modeset state to be committed + * @nonblock: whether nonblocking behavior is requested. + * + * This function prepares @state to be used by the atomic helper's support for + * nonblocking commits. Drivers using the nonblocking commit infrastructure + * should always call this function from their ->atomic_commit hook. + * + * To be able to use this support drivers need to use a few more helper + * functions. drm_atomic_helper_wait_for_dependencies() must be called before + * actually committing the hardware state, and for nonblocking commits this call + * must be placed in the async worker. See also drm_atomic_helper_swap_state() + * and it's stall parameter, for when a driver's commit hooks look at the + * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly. + * + * Completion of the hardware commit step must be signalled using + * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed + * to read or change any permanent software or hardware modeset state. The only + * exception is state protected by other means than &drm_modeset_lock locks. + * Only the free standing @state with pointers to the old state structures can + * be inspected, e.g. to clean up old buffers using + * drm_atomic_helper_cleanup_planes(). + * + * At the very end, before cleaning up @state drivers must call + * drm_atomic_helper_commit_cleanup_done(). + * + * This is all implemented by in drm_atomic_helper_commit(), giving drivers a + * complete and esay-to-use default implementation of the atomic_commit() hook. + * + * The tracking of asynchronously executed and still pending commits is done + * using the core structure &drm_crtc_commit. + * + * By default there's no need to clean up resources allocated by this function + * explicitly: drm_atomic_state_default_clear() will take care of that + * automatically. + * + * Returns: + * + * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, + * -ENOMEM on allocation failures and -EINTR when a signal is pending. + */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i, ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (!commit) + return -ENOMEM; + + init_completion(&commit->flip_done); + init_completion(&commit->hw_done); + init_completion(&commit->cleanup_done); + INIT_LIST_HEAD(&commit->commit_entry); + kref_init(&commit->ref); + commit->crtc = crtc; + + state->crtcs[i].commit = commit; + + ret = stall_checks(crtc, nonblock); + if (ret) + return ret; + + /* Drivers only send out events when at least either current or + * new CRTC state is active. Complete right away if everything + * stays off. */ + if (!crtc->state->active && !crtc_state->active) { + complete_all(&commit->flip_done); + continue; + } + + /* Legacy cursor updates are fully unsynced. */ + if (state->legacy_cursor_update) { + complete_all(&commit->flip_done); + continue; + } + + if (!crtc_state->event) { + commit->event = kzalloc(sizeof(*commit->event), + GFP_KERNEL); + if (!commit->event) + return -ENOMEM; + + crtc_state->event = commit->event; + } + + crtc_state->event->base.completion = &commit->flip_done; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_setup_commit); + + +static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) +{ + struct drm_crtc_commit *commit; + int i = 0; + + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + /* skip the first entry, that's the current commit */ + if (i == 1) + return commit; + i++; + } + + return NULL; +} + +/** + * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * @state: new modeset state to be committed + * + * This function waits for all preceeding commits that touch the same CRTC as + * @state to both be committed to the hardware (as signalled by + * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled + * by calling drm_crtc_vblank_send_event on the event member of + * &drm_crtc_state). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = preceeding_commit(crtc); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(commit); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); + +/** + * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit + * @state: new modeset state to be committed + * + * This function is used to signal completion of the hardware commit step. After + * this step the driver is not allowed to read or change any permanent software + * or hardware modeset state. The only exception is state protected by other + * means than &drm_modeset_lock locks. + * + * Drivers should try to postpone any expensive or delayed cleanup work after + * this function is called. + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (!commit) + continue; + + /* backend must have consumed any event by now */ + WARN_ON(crtc->state->event); + spin_lock(&crtc->commit_lock); + complete_all(&commit->hw_done); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); + +/** + * drm_atomic_helper_commit_cleanup_done - signal completion of commit + * @state: new modeset state to be committed + * + * This signals completion of the atomic update @state, including any cleanup + * work. If used, it must be called right before calling + * drm_atomic_state_free(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (WARN_ON(!commit)) + continue; + + spin_lock(&crtc->commit_lock); + complete_all(&commit->cleanup_done); + WARN_ON(!try_wait_for_completion(&commit->hw_done)); + + /* commit_list borrows our reference, need to remove before we + * clean up our drm_atomic_state. But only after it actually + * completed, otherwise subsequent commits won't stall properly. */ + if (try_wait_for_completion(&commit->flip_done)) + goto del_commit; + + spin_unlock(&crtc->commit_lock); + + /* We must wait for the vblank event to signal our completion + * before releasing our reference, since the vblank work does + * not hold a reference of its own. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + spin_lock(&crtc->commit_lock); +del_commit: + list_del(&commit->commit_entry); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); + /** * drm_atomic_helper_prepare_planes - prepare plane resources before commit * @dev: DRM device @@ -1249,16 +1616,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state) { - int nplanes = dev->mode_config.num_total_plane; - int ret, i; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int ret, i, j; - for (i = 0; i < nplanes; i++) { + for_each_plane_in_state(state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; funcs = plane->helper_private; @@ -1272,12 +1635,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, return 0; fail: - for (i--; i >= 0; i--) { + for_each_plane_in_state(state, plane, plane_state, j) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - if (!plane) + if (j >= i) continue; funcs = plane->helper_private; @@ -1537,8 +1898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); /** * drm_atomic_helper_swap_state - store atomic state into current sw state - * @dev: DRM device * @state: atomic state + * @stall: stall for proceeding commits * * This function stores the atomic state into the current state pointers in all * driver objects. It should be called after all failing steps have been done @@ -1559,42 +1920,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); * * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 * contains the old state. Also do any other cleanup required with that state. + * + * @stall must be set when nonblocking commits for this driver directly access + * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the + * current atomic helpers this is almost always the case, since the helpers + * don't pass the right state structures to the callbacks. */ -void drm_atomic_helper_swap_state(struct drm_device *dev, - struct drm_atomic_state *state) +void drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall) { int i; + long ret; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc_commit *commit; + + if (stall) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; - for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; - - if (!connector) - continue; + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + drm_crtc_commit_put(commit); + } + } + for_each_connector_in_state(state, connector, conn_state, i) { connector->state->state = state; - swap(state->connector_states[i], connector->state); + swap(state->connectors[i].state, connector->state); connector->state->state = NULL; } - for (i = 0; i < dev->mode_config.num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { crtc->state->state = state; - swap(state->crtc_states[i], crtc->state); + swap(state->crtcs[i].state, crtc->state); crtc->state->state = NULL; - } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + if (state->crtcs[i].commit) { + spin_lock(&crtc->commit_lock); + list_add(&state->crtcs[i].commit->commit_entry, + &crtc->commit_list); + spin_unlock(&crtc->commit_lock); - if (!plane) - continue; + state->crtcs[i].commit->event = NULL; + } + } + for_each_plane_in_state(state, plane, plane_state, i) { plane->state->state = state; - swap(state->plane_states[i], plane->state); + swap(state->planes[i].state, plane->state); plane->state->state = NULL; } } @@ -2409,7 +2798,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * This is the main helper function provided by the atomic helper framework for * implementing the legacy DPMS connector interface. It computes the new desired * ->active state for the corresponding CRTC (if the connector is enabled) and - * updates it. + * updates it. * * Returns: * Returns 0 on success, negative errno numbers on failure. @@ -2930,16 +3319,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); * @red: red correction table * @green: green correction table * @blue: green correction table - * @start: * @size: size of the tables * * Implements support for legacy gamma correction table for drivers * that support color management through the DEGAMMA_LUT/GAMMA_LUT * properties. */ -void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, - uint32_t start, uint32_t size) +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size) { struct drm_device *dev = crtc->dev; struct drm_mode_config *config = &dev->mode_config; @@ -2951,7 +3339,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, state = drm_atomic_state_alloc(crtc->dev); if (!state) - return; + return -ENOMEM; blob = drm_property_create_blob(dev, sizeof(struct drm_color_lut) * size, @@ -3002,7 +3390,7 @@ retry: drm_property_unreference_blob(blob); - return; + return 0; fail: if (ret == -EDEADLK) goto backoff; @@ -3010,7 +3398,7 @@ fail: drm_atomic_state_free(state); drm_property_unreference_blob(blob); - return; + return ret; backoff: drm_atomic_state_clear(state); drm_atomic_legacy_backoff(state); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 50d0baa06db0..4153e8a193af 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -30,25 +30,36 @@ #include <drm/drmP.h> #include "drm_internal.h" +#include "drm_legacy.h" /** - * drm_getmagic - Get unique magic of a client - * @dev: DRM device to operate on - * @data: ioctl data containing the drm_auth object - * @file_priv: DRM file that performs the operation + * DOC: master and authentication * - * This looks up the unique magic of the passed client and returns it. If the - * client did not have a magic assigned, yet, a new one is registered. The magic - * is stored in the passed drm_auth object. + * struct &drm_master is used to track groups of clients with open + * primary/legacy device nodes. For every struct &drm_file which has had at + * least once successfully became the device master (either through the + * SET_MASTER IOCTL, or implicitly through opening the primary device node when + * no one else is the current master that time) there exists one &drm_master. + * This is noted in the is_master member of &drm_file. All other clients have + * just a pointer to the &drm_master they are associated with. * - * Returns: 0 on success, negative error code on failure. + * In addition only one &drm_master can be the current master for a &drm_device. + * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or + * implicitly through closing/openeing the primary device node. See also + * drm_is_current_master(). + * + * Clients can authenticate against the current master (if it matches their own) + * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters, + * this allows controlled access to the device for an entire group of mutually + * trusted clients. */ + int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_auth *auth = data; int ret = 0; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->master_mutex); if (!file_priv->magic) { ret = idr_alloc(&file_priv->master->magic_map, file_priv, 1, 0, GFP_KERNEL); @@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) file_priv->magic = ret; } auth->magic = file_priv->magic; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->master_mutex); DRM_DEBUG("%u\n", auth->magic); return ret < 0 ? ret : 0; } -/** - * drm_authmagic - Authenticate client with a magic - * @dev: DRM device to operate on - * @data: ioctl data containing the drm_auth object - * @file_priv: DRM file that performs the operation - * - * This looks up a DRM client by the passed magic and authenticates it. - * - * Returns: 0 on success, negative error code on failure. - */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data, DRM_DEBUG("%u\n", auth->magic); - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->master_mutex); file = idr_find(&file_priv->master->magic_map, auth->magic); if (file) { file->authenticated = 1; idr_replace(&file_priv->master->magic_map, NULL, auth->magic); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->master_mutex); return file ? 0 : -EINVAL; } + +static struct drm_master *drm_master_create(struct drm_device *dev) +{ + struct drm_master *master; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return NULL; + + kref_init(&master->refcount); + spin_lock_init(&master->lock.spinlock); + init_waitqueue_head(&master->lock.lock_queue); + idr_init(&master->magic_map); + master->dev = dev; + + return master; +} + +static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv, + bool new_master) +{ + int ret = 0; + + dev->master = drm_master_get(fpriv->master); + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, fpriv, new_master); + if (unlikely(ret != 0)) { + drm_master_put(&dev->master); + } + } + + return ret; +} + +static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) +{ + struct drm_master *old_master; + int ret; + + lockdep_assert_held_once(&dev->master_mutex); + + old_master = fpriv->master; + fpriv->master = drm_master_create(dev); + if (!fpriv->master) { + fpriv->master = old_master; + return -ENOMEM; + } + + if (dev->driver->master_create) { + ret = dev->driver->master_create(dev, fpriv->master); + if (ret) + goto out_err; + } + fpriv->is_master = 1; + fpriv->authenticated = 1; + + ret = drm_set_master(dev, fpriv, true); + if (ret) + goto out_err; + + if (old_master) + drm_master_put(&old_master); + + return 0; + +out_err: + /* drop references and restore old master on failure */ + drm_master_put(&fpriv->master); + fpriv->master = old_master; + + return ret; +} + +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = 0; + + mutex_lock(&dev->master_mutex); + if (drm_is_current_master(file_priv)) + goto out_unlock; + + if (dev->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->is_master) { + ret = drm_new_set_master(dev, file_priv); + goto out_unlock; + } + + ret = drm_set_master(dev, file_priv, false); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +static void drm_drop_master(struct drm_device *dev, + struct drm_file *fpriv) +{ + if (dev->driver->master_drop) + dev->driver->master_drop(dev, fpriv); + drm_master_put(&dev->master); +} + +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = -EINVAL; + + mutex_lock(&dev->master_mutex); + if (!drm_is_current_master(file_priv)) + goto out_unlock; + + if (!dev->master) + goto out_unlock; + + ret = 0; + drm_drop_master(dev, file_priv); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +int drm_master_open(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + int ret = 0; + + /* if there is no current master make this fd it, but do not create + * any master object for render clients */ + mutex_lock(&dev->master_mutex); + if (!dev->master) + ret = drm_new_set_master(dev, file_priv); + else + file_priv->master = drm_master_get(dev->master); + mutex_unlock(&dev->master_mutex); + + return ret; +} + +void drm_master_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_master *master = file_priv->master; + + mutex_lock(&dev->master_mutex); + if (file_priv->magic) + idr_remove(&file_priv->master->magic_map, file_priv->magic); + + if (!drm_is_current_master(file_priv)) + goto out; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + /* + * Since the master is disappearing, so is the + * possibility to lock. + */ + mutex_lock(&dev->struct_mutex); + if (master->lock.hw_lock) { + if (dev->sigdata.lock == master->lock.hw_lock) + dev->sigdata.lock = NULL; + master->lock.hw_lock = NULL; + master->lock.file_priv = NULL; + wake_up_interruptible_all(&master->lock.lock_queue); + } + mutex_unlock(&dev->struct_mutex); + } + + if (dev->master == file_priv->master) + drm_drop_master(dev, file_priv); +out: + /* drop the master reference held by the file priv */ + if (file_priv->master) + drm_master_put(&file_priv->master); + mutex_unlock(&dev->master_mutex); +} + +/** + * drm_is_current_master - checks whether @priv is the current master + * @fpriv: DRM file private + * + * Checks whether @fpriv is current master on its device. This decides whether a + * client is allowed to run DRM_MASTER IOCTLs. + * + * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting + * - the current master is assumed to own the non-shareable display hardware. + */ +bool drm_is_current_master(struct drm_file *fpriv) +{ + return fpriv->is_master && fpriv->master == fpriv->minor->dev->master; +} +EXPORT_SYMBOL(drm_is_current_master); + +/** + * drm_master_get - reference a master pointer + * @master: struct &drm_master + * + * Increments the reference count of @master and returns a pointer to @master. + */ +struct drm_master *drm_master_get(struct drm_master *master) +{ + kref_get(&master->refcount); + return master; +} +EXPORT_SYMBOL(drm_master_get); + +static void drm_master_destroy(struct kref *kref) +{ + struct drm_master *master = container_of(kref, struct drm_master, refcount); + struct drm_device *dev = master->dev; + + if (dev->driver->master_destroy) + dev->driver->master_destroy(dev, master); + + drm_legacy_master_rmmaps(dev, master); + + idr_destroy(&master->magic_map); + kfree(master->unique); + kfree(master); +} + +/** + * drm_master_put - unreference and clear a master pointer + * @master: pointer to a pointer of struct &drm_master + * + * This decrements the &drm_master behind @master and sets it to NULL. + */ +void drm_master_put(struct drm_master **master) +{ + kref_put(&(*master)->refcount, drm_master_destroy); + *master = NULL; +} +EXPORT_SYMBOL(drm_master_put); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index b3654404abd0..255543086590 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -36,7 +36,7 @@ * encoder chain. * * A bridge is always attached to a single &drm_encoder at a time, but can be - * either connected to it directly, or through an intermediate bridge: + * either connected to it directly, or through an intermediate bridge:: * * encoder ---> bridge B ---> bridge A * diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 9b34158c0f77..c3a12cd8bd0d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, */ if (!entry->map || map->type != entry->map->type || - entry->master != dev->primary->master) + entry->master != dev->master) continue; switch (map->type) { case _DRM_SHM: @@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, map->offset = (unsigned long)map->handle; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ - if (dev->primary->master->lock.hw_lock != NULL) { + if (dev->master->lock.hw_lock != NULL) { vfree(map->handle); kfree(map); return -EBUSY; } - dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ + dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ } break; case _DRM_AGP: { @@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, mutex_unlock(&dev->struct_mutex); if (!(map->flags & _DRM_DRIVER)) - list->master = dev->primary->master; + list->master = dev->master; *maplist = list; return 0; } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 059f7c39c582..a7916e5f8864 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) mb(); for (; addr < end; addr += size) clflushopt(addr); + clflushopt(end - 1); /* force serialisation */ mb(); return; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 0e3cc66aa8b7..f1d9f0569d7f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -39,6 +39,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_atomic.h> +#include <drm/drm_auth.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order) } EXPORT_SYMBOL(drm_get_subpixel_order_name); -static char printable_char(int c) -{ - return isascii(c) && isprint(c) ? c : '?'; -} - -/** - * drm_get_format_name - return a string for drm fourcc format - * @format: format to compute name of - * - * Note that the buffer used by this function is globally shared and owned by - * the function itself. - * - * FIXME: This isn't really multithreading safe. - */ -const char *drm_get_format_name(uint32_t format) -{ - static char buf[32]; - - snprintf(buf, sizeof(buf), - "%c%c%c%c %s-endian (0x%08x)", - printable_char(format & 0xff), - printable_char((format >> 8) & 0xff), - printable_char((format >> 16) & 0xff), - printable_char((format >> 24) & 0x7f), - format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", - format); - - return buf; -} -EXPORT_SYMBOL(drm_get_format_name); - /* * Internal function to assign a slot in the object idr and optionally * register the object into the idr. @@ -426,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj) } EXPORT_SYMBOL(drm_mode_object_reference); +/** + * drm_crtc_force_disable - Forcibly turn off a CRTC + * @crtc: CRTC to turn off + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_force_disable(struct drm_crtc *crtc) +{ + struct drm_mode_set set = { + .crtc = crtc, + }; + + return drm_mode_set_config_internal(&set); +} +EXPORT_SYMBOL(drm_crtc_force_disable); + +/** + * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs + * @dev: DRM device whose CRTCs to turn off + * + * Drivers may want to call this on unload to ensure that all displays are + * unlit and the GPU is in a consistent, low power state. Takes modeset locks. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_force_disable_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_modeset_lock_all(dev); + drm_for_each_crtc(crtc, dev) + if (crtc->enabled) { + ret = drm_crtc_force_disable(crtc); + if (ret) + goto out; + } +out: + drm_modeset_unlock_all(dev); + return ret; +} +EXPORT_SYMBOL(drm_crtc_force_disable_all); + static void drm_framebuffer_free(struct kref *kref) { struct drm_framebuffer *fb = @@ -535,7 +550,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); * * Cleanup framebuffer. This function is intended to be used from the drivers * ->destroy callback. It can also be used to clean up driver private - * framebuffers embedded into a larger structure. + * framebuffers embedded into a larger structure. * * Note that this function does not remove the fb from active usuage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on @@ -574,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) struct drm_device *dev; struct drm_crtc *crtc; struct drm_plane *plane; - struct drm_mode_set set; - int ret; if (!fb) return; @@ -605,11 +618,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) drm_for_each_crtc(crtc, dev) { if (crtc->primary->fb == fb) { /* should turn off the crtc */ - memset(&set, 0, sizeof(struct drm_mode_set)); - set.crtc = crtc; - set.fb = NULL; - ret = drm_mode_set_config_internal(&set); - if (ret) + if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } @@ -639,6 +648,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev) return num; } +static int drm_crtc_register_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_for_each_crtc(crtc, dev) { + if (crtc->funcs->late_register) + ret = crtc->funcs->late_register(crtc); + if (ret) + return ret; + } + + return 0; +} + +static void drm_crtc_unregister_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, dev) { + if (crtc->funcs->early_unregister) + crtc->funcs->early_unregister(crtc); + } +} + /** * drm_crtc_init_with_planes - Initialise a new CRTC object with * specified primary and cursor planes. @@ -669,6 +703,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; + INIT_LIST_HEAD(&crtc->commit_list); + spin_lock_init(&crtc->commit_lock); + drm_modeset_lock_init(&crtc->mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) @@ -692,7 +729,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); - config->num_crtc++; + crtc->index = config->num_crtc++; crtc->primary = primary; crtc->cursor = cursor; @@ -722,6 +759,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + /* Note that the crtc_list is considered to be static; should we + * remove the drm_crtc at runtime we would have to decrement all + * the indices on the drm_crtc after us in the crtc_list. + */ + kfree(crtc->gamma_store); crtc->gamma_store = NULL; @@ -741,29 +783,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_cleanup); -/** - * drm_crtc_index - find the index of a registered CRTC - * @crtc: CRTC to find index for - * - * Given a registered CRTC, return the index of that CRTC within a DRM - * device's list of CRTCs. - */ -unsigned int drm_crtc_index(struct drm_crtc *crtc) -{ - unsigned int index = 0; - struct drm_crtc *tmp; - - drm_for_each_crtc(tmp, crtc->dev) { - if (tmp == crtc) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_crtc_index); - /* * drm_mode_remove - remove and free a mode * @connector: connector list to modify @@ -909,11 +928,11 @@ int drm_connector_init(struct drm_device *dev, connector->dev = dev; connector->funcs = funcs; - connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL); - if (connector->connector_id < 0) { - ret = connector->connector_id; + ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL); + if (ret < 0) goto out_put; - } + connector->index = ret; + ret = 0; connector->connector_type = connector_type; connector->connector_type_id = @@ -961,7 +980,7 @@ out_put_type_id: ida_remove(connector_ida, connector->connector_type_id); out_put_id: if (ret) - ida_remove(&config->connector_ida, connector->connector_id); + ida_remove(&config->connector_ida, connector->index); out_put: if (ret) drm_mode_object_unregister(dev, &connector->base); @@ -984,6 +1003,12 @@ void drm_connector_cleanup(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; + /* The connector should have been removed from userspace long before + * it is finally destroyed. + */ + if (WARN_ON(connector->registered)) + drm_connector_unregister(connector); + if (connector->tile_group) { drm_mode_put_tile_group(dev, connector->tile_group); connector->tile_group = NULL; @@ -999,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector) connector->connector_type_id); ida_remove(&dev->mode_config.connector_ida, - connector->connector_id); + connector->index); kfree(connector->display_info.bus_formats); drm_mode_object_unregister(dev, &connector->base); @@ -1030,19 +1055,34 @@ int drm_connector_register(struct drm_connector *connector) { int ret; + if (connector->registered) + return 0; + ret = drm_sysfs_connector_add(connector); if (ret) return ret; ret = drm_debugfs_connector_add(connector); if (ret) { - drm_sysfs_connector_remove(connector); - return ret; + goto err_sysfs; + } + + if (connector->funcs->late_register) { + ret = connector->funcs->late_register(connector); + if (ret) + goto err_debugfs; } drm_mode_object_register(connector->dev, &connector->base); + connector->registered = true; return 0; + +err_debugfs: + drm_debugfs_connector_remove(connector); +err_sysfs: + drm_sysfs_connector_remove(connector); + return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -1054,28 +1094,29 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { + if (!connector->registered) + return; + + if (connector->funcs->early_unregister) + connector->funcs->early_unregister(connector); + drm_sysfs_connector_remove(connector); drm_debugfs_connector_remove(connector); + + connector->registered = false; } EXPORT_SYMBOL(drm_connector_unregister); -/** - * drm_connector_register_all - register all connectors - * @dev: drm device - * - * This function registers all connectors in sysfs and other places so that - * userspace can start to access them. Drivers can call it after calling - * drm_dev_register() to complete the device registration, if they don't call - * drm_connector_register() on each connector individually. - * - * When a device is unplugged and should be removed from userspace access, - * call drm_connector_unregister_all(), which is the inverse of this - * function. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_connector_register_all(struct drm_device *dev) +static void drm_connector_unregister_all(struct drm_device *dev) +{ + struct drm_connector *connector; + + /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_connector_unregister(connector); +} + +static int drm_connector_register_all(struct drm_device *dev) { struct drm_connector *connector; int ret; @@ -1097,27 +1138,31 @@ err: drm_connector_unregister_all(dev); return ret; } -EXPORT_SYMBOL(drm_connector_register_all); -/** - * drm_connector_unregister_all - unregister connector userspace interfaces - * @dev: drm device - * - * This functions unregisters all connectors from sysfs and other places so - * that userspace can no longer access them. Drivers should call this as the - * first step tearing down the device instace, or when the underlying - * physical device disappeared (e.g. USB unplug), right before calling - * drm_dev_unregister(). - */ -void drm_connector_unregister_all(struct drm_device *dev) +static int drm_encoder_register_all(struct drm_device *dev) { - struct drm_connector *connector; + struct drm_encoder *encoder; + int ret = 0; - /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - drm_connector_unregister(connector); + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->late_register) + ret = encoder->funcs->late_register(encoder); + if (ret) + return ret; + } + + return 0; +} + +static void drm_encoder_unregister_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->early_unregister) + encoder->funcs->early_unregister(encoder); + } } -EXPORT_SYMBOL(drm_connector_unregister_all); /** * drm_encoder_init - Init a preallocated encoder @@ -1166,7 +1211,7 @@ int drm_encoder_init(struct drm_device *dev, } list_add_tail(&encoder->head, &dev->mode_config.encoder_list); - dev->mode_config.num_encoder++; + encoder->index = dev->mode_config.num_encoder++; out_put: if (ret) @@ -1180,29 +1225,6 @@ out_unlock: EXPORT_SYMBOL(drm_encoder_init); /** - * drm_encoder_index - find the index of a registered encoder - * @encoder: encoder to find index for - * - * Given a registered encoder, return the index of that encoder within a DRM - * device's list of encoders. - */ -unsigned int drm_encoder_index(struct drm_encoder *encoder) -{ - unsigned int index = 0; - struct drm_encoder *tmp; - - drm_for_each_encoder(tmp, encoder->dev) { - if (tmp == encoder) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_encoder_index); - -/** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * @@ -1212,6 +1234,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; + /* Note that the encoder_list is considered to be static; should we + * remove the drm_encoder at runtime we would have to decrement all + * the indices on the drm_encoder after us in the encoder_list. + */ + drm_modeset_lock_all(dev); drm_mode_object_unregister(dev, &encoder->base); kfree(encoder->name); @@ -1300,7 +1327,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, plane->type = type; list_add_tail(&plane->head, &config->plane_list); - config->num_total_plane++; + plane->index = config->num_total_plane++; if (plane->type == DRM_PLANE_TYPE_OVERLAY) config->num_overlay_plane++; @@ -1325,6 +1352,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, } EXPORT_SYMBOL(drm_universal_plane_init); +static int drm_plane_register_all(struct drm_device *dev) +{ + struct drm_plane *plane; + int ret = 0; + + drm_for_each_plane(plane, dev) { + if (plane->funcs->late_register) + ret = plane->funcs->late_register(plane); + if (ret) + return ret; + } + + return 0; +} + +static void drm_plane_unregister_all(struct drm_device *dev) +{ + struct drm_plane *plane; + + drm_for_each_plane(plane, dev) { + if (plane->funcs->early_unregister) + plane->funcs->early_unregister(plane); + } +} + /** * drm_plane_init - Initialize a legacy plane * @dev: DRM device @@ -1374,6 +1426,11 @@ void drm_plane_cleanup(struct drm_plane *plane) BUG_ON(list_empty(&plane->head)); + /* Note that the plane_list is considered to be static; should we + * remove the drm_plane at runtime we would have to decrement all + * the indices on the drm_plane after us in the plane_list. + */ + list_del(&plane->head); dev->mode_config.num_total_plane--; if (plane->type == DRM_PLANE_TYPE_OVERLAY) @@ -1391,29 +1448,6 @@ void drm_plane_cleanup(struct drm_plane *plane) EXPORT_SYMBOL(drm_plane_cleanup); /** - * drm_plane_index - find the index of a registered plane - * @plane: plane to find index for - * - * Given a registered plane, return the index of that CRTC within a DRM - * device's list of planes. - */ -unsigned int drm_plane_index(struct drm_plane *plane) -{ - unsigned int index = 0; - struct drm_plane *tmp; - - drm_for_each_plane(tmp, plane->dev) { - if (tmp == plane) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_plane_index); - -/** * drm_plane_from_index - find the registered plane at an index * @dev: DRM device * @idx: index of registered plane to find for @@ -1425,13 +1459,11 @@ struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx) { struct drm_plane *plane; - unsigned int i = 0; - drm_for_each_plane(plane, dev) { - if (i == idx) + drm_for_each_plane(plane, dev) + if (idx == plane->index) return plane; - i++; - } + return NULL; } EXPORT_SYMBOL(drm_plane_from_index); @@ -1467,6 +1499,46 @@ void drm_plane_force_disable(struct drm_plane *plane) } EXPORT_SYMBOL(drm_plane_force_disable); +int drm_modeset_register_all(struct drm_device *dev) +{ + int ret; + + ret = drm_plane_register_all(dev); + if (ret) + goto err_plane; + + ret = drm_crtc_register_all(dev); + if (ret) + goto err_crtc; + + ret = drm_encoder_register_all(dev); + if (ret) + goto err_encoder; + + ret = drm_connector_register_all(dev); + if (ret) + goto err_connector; + + return 0; + +err_connector: + drm_encoder_unregister_all(dev); +err_encoder: + drm_crtc_unregister_all(dev); +err_crtc: + drm_plane_unregister_all(dev); +err_plane: + return ret; +} + +void drm_modeset_unregister_all(struct drm_device *dev) +{ + drm_connector_unregister_all(dev); + drm_encoder_unregister_all(dev); + drm_crtc_unregister_all(dev); + drm_plane_unregister_all(dev); +} + static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; @@ -2975,6 +3047,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } + fb->hot_x = req->hot_x; + fb->hot_y = req->hot_y; } else { fb = NULL; } @@ -3581,7 +3655,7 @@ int drm_mode_getfb(struct drm_device *dev, r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; if (fb->funcs->create_handle) { - if (file_priv->is_master || capable(CAP_SYS_ADMIN) || + if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || drm_is_control_client(file_priv)) { ret = fb->funcs->create_handle(fb, file_priv, &r->handle); @@ -3738,6 +3812,13 @@ void drm_fb_release(struct drm_file *priv) } } +static bool drm_property_type_valid(struct drm_property *property) +{ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE); + return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE); +} + /** * drm_property_create - create a new property type * @dev: drm device @@ -5138,6 +5219,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder); int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { + uint16_t *r_base, *g_base, *b_base; + int i; + crtc->gamma_size = gamma_size; crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, @@ -5147,6 +5231,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, return -ENOMEM; } + r_base = crtc->gamma_store; + g_base = r_base + gamma_size; + b_base = g_base + gamma_size; + for (i = 0; i < gamma_size; i++) { + r_base[i] = i << 8; + g_base[i] = i << 8; + b_base[i] = i << 8; + } + + return 0; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); @@ -5214,7 +5308,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, goto out; } - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); out: drm_modeset_unlock_all(dev); @@ -5544,264 +5638,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, } /** - * drm_fb_get_bpp_depth - get the bpp/depth values for format - * @format: pixel format (DRM_FORMAT_*) - * @depth: storage for the depth value - * @bpp: storage for the bpp value - * - * This only supports RGB formats here for compat with code that doesn't use - * pixel formats directly yet. - */ -void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, - int *bpp) -{ - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB332: - case DRM_FORMAT_BGR233: - *depth = 8; - *bpp = 8; - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - *depth = 15; - *bpp = 16; - break; - case DRM_FORMAT_RGB565: - case DRM_FORMAT_BGR565: - *depth = 16; - *bpp = 16; - break; - case DRM_FORMAT_RGB888: - case DRM_FORMAT_BGR888: - *depth = 24; - *bpp = 24; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_BGRX8888: - *depth = 24; - *bpp = 32; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_RGBX1010102: - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_BGRA1010102: - *depth = 30; - *bpp = 32; - break; - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - *depth = 32; - *bpp = 32; - break; - default: - DRM_DEBUG_KMS("unsupported pixel format %s\n", - drm_get_format_name(format)); - *depth = 0; - *bpp = 0; - break; - } -} -EXPORT_SYMBOL(drm_fb_get_bpp_depth); - -/** - * drm_format_num_planes - get the number of planes for format - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The number of planes used by the specified pixel format. - */ -int drm_format_num_planes(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 3; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_num_planes); - -/** - * drm_format_plane_cpp - determine the bytes per pixel value - * @format: pixel format (DRM_FORMAT_*) - * @plane: plane index - * - * Returns: - * The bytes per pixel value for the specified plane. - */ -int drm_format_plane_cpp(uint32_t format, int plane) -{ - unsigned int depth; - int bpp; - - if (plane >= drm_format_num_planes(format)) - return 0; - - switch (format) { - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - return 2; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return plane ? 2 : 1; - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 1; - default: - drm_fb_get_bpp_depth(format, &depth, &bpp); - return bpp >> 3; - } -} -EXPORT_SYMBOL(drm_format_plane_cpp); - -/** - * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The horizontal chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_horz_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); - -/** - * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The vertical chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_vert_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); - -/** - * drm_format_plane_width - width of the plane given the first plane - * @width: width of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The width of @plane, given that the width of the first plane is @width. - */ -int drm_format_plane_width(int width, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return width; - - return width / drm_format_horz_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_width); - -/** - * drm_format_plane_height - height of the plane given the first plane - * @height: height of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The height of @plane, given that the height of the first plane is @height. - */ -int drm_format_plane_height(int height, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return height; - - return height / drm_format_vert_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_height); - -/** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations @@ -6064,3 +5900,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, return tg; } EXPORT_SYMBOL(drm_mode_create_tile_group); + +/** + * drm_crtc_enable_color_mgmt - enable color management properties + * @crtc: DRM CRTC + * @degamma_lut_size: the size of the degamma lut (before CSC) + * @has_ctm: whether to attach ctm_property for CSC matrix + * @gamma_lut_size: the size of the gamma lut (after CSC) + * + * This function lets the driver enable the color correction + * properties on a CRTC. This includes 3 degamma, csc and gamma + * properties that userspace can set and 2 size properties to inform + * the userspace of the lut sizes. Each of the properties are + * optional. The gamma and degamma properties are only attached if + * their size is not 0 and ctm_property is only attached if has_ctm is + * true. + */ +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (degamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->degamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->degamma_lut_size_property, + degamma_lut_size); + } + + if (has_ctm) + drm_object_attach_property(&crtc->base, + config->ctm_property, 0); + + if (gamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->gamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->gamma_lut_size_property, + gamma_lut_size); + } +} +EXPORT_SYMBOL(drm_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 26feb2f8453f..604d3ef72ffa 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) */ void drm_helper_disable_unused_functions(struct drm_device *dev) { + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) + DRM_ERROR("Called for atomic driver, this is not what you want.\n"); + drm_modeset_lock_all(dev); __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); @@ -1123,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, return drm_plane_helper_commit(plane, plane_state, old_fb); } EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); - -/** - * drm_helper_crtc_enable_color_mgmt - enable color management properties - * @crtc: DRM CRTC - * @degamma_lut_size: the size of the degamma lut (before CSC) - * @gamma_lut_size: the size of the gamma lut (after CSC) - * - * This function lets the driver enable the color correction properties on a - * CRTC. This includes 3 degamma, csc and gamma properties that userspace can - * set and 2 size properties to inform the userspace of the lut sizes. - */ -void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc, - int degamma_lut_size, - int gamma_lut_size) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *config = &dev->mode_config; - - drm_object_attach_property(&crtc->base, - config->degamma_lut_property, 0); - drm_object_attach_property(&crtc->base, - config->ctm_property, 0); - drm_object_attach_property(&crtc->base, - config->gamma_lut_property, 0); - - drm_object_attach_property(&crtc->base, - config->degamma_lut_size_property, - degamma_lut_size); - drm_object_attach_property(&crtc->base, - config->gamma_lut_size_property, - gamma_lut_size); -} -EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index a78c138282ea..47a500b90fd7 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -31,14 +31,100 @@ * and are not exported to drivers. */ + +/* drm_crtc.c */ +void drm_connector_ida_init(void); +void drm_connector_ida_destroy(void); int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type); void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object); +bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, + struct drm_mode_object **ref); +void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref); + +int drm_plane_check_pixel_format(const struct drm_plane *plane, + u32 format); +int drm_crtc_check_viewport(const struct drm_crtc *crtc, + int x, int y, + const struct drm_display_mode *mode, + const struct drm_framebuffer *fb); + +void drm_fb_release(struct drm_file *file_priv); +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); + +/* dumb buffer support IOCTLs */ +int drm_mode_create_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* framebuffer IOCTLs */ +extern int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* IOCTLs */ +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +int drm_mode_getresources(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getplane_res(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getconnector(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getproperty_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getencoder(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_get_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); /* drm_atomic.c */ int drm_atomic_get_property(struct drm_mode_object *obj, - struct drm_property *property, uint64_t *val); + struct drm_property *property, uint64_t *val); int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_modeset_register_all(struct drm_device *dev); +void drm_modeset_unregister_all(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 3bcf8e6a85b3..fa10cef2ba37 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -46,11 +46,8 @@ static const struct drm_info_list drm_debugfs_list[] = { {"name", drm_name_info, 0}, - {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, - {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"vma", drm_vma_info, 0}, }; #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 3334baacf43d..734f86a345f6 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void) drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); if (IS_ERR(drm_dp_aux_dev_class)) { - res = PTR_ERR(drm_dp_aux_dev_class); - goto out; + return PTR_ERR(drm_dp_aux_dev_class); } drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index eeaf5a7c3aa7..eae5ef963cb7 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, ret = aux->transfer(aux, &msg); - if (ret > 0) { + if (ret >= 0) { native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { if (ret == size) @@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, memset(&msg, 0, sizeof(msg)); - mutex_lock(&aux->hw_mutex); - for (i = 0; i < num; i++) { msg.address = msgs[i].addr; drm_dp_i2c_msg_set_request(&msg, &msgs[i]); @@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, msg.size = 0; (void)drm_dp_i2c_do_msg(aux, &msg); - mutex_unlock(&aux->hw_mutex); - return err; } @@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = { .master_xfer = drm_dp_i2c_xfer, }; +static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct drm_dp_aux, ddc); +} + +static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_lock(&i2c_to_aux(i2c)->hw_mutex); +} + +static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex); +} + +static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_unlock(&i2c_to_aux(i2c)->hw_mutex); +} + /** - * drm_dp_aux_register() - initialise and register aux channel + * drm_dp_aux_init() - minimally initialise an aux channel * @aux: DisplayPort AUX channel * - * Returns 0 on success or a negative error code on failure. + * If you need to use the drm_dp_aux's i2c adapter prior to registering it + * with the outside world, call drm_dp_aux_init() first. You must still + * call drm_dp_aux_register() once the connector has been registered to + * allow userspace access to the auxiliary DP channel. */ -int drm_dp_aux_register(struct drm_dp_aux *aux) +void drm_dp_aux_init(struct drm_dp_aux *aux) { - int ret; - mutex_init(&aux->hw_mutex); aux->ddc.algo = &drm_dp_i2c_algo; aux->ddc.algo_data = aux; aux->ddc.retries = 3; + aux->ddc.lock_bus = lock_bus; + aux->ddc.trylock_bus = trylock_bus; + aux->ddc.unlock_bus = unlock_bus; +} +EXPORT_SYMBOL(drm_dp_aux_init); + +/** + * drm_dp_aux_register() - initialise and register aux channel + * @aux: DisplayPort AUX channel + * + * Automatically calls drm_dp_aux_init() if this hasn't been done yet. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register(struct drm_dp_aux *aux) +{ + int ret; + + if (!aux->ddc.algo) + drm_dp_aux_init(aux); + aux->ddc.class = I2C_CLASS_DDC; aux->ddc.owner = THIS_MODULE; aux->ddc.dev.parent = aux->dev; @@ -822,3 +860,35 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux) i2c_del_adapter(&aux->ddc); } EXPORT_SYMBOL(drm_dp_aux_unregister); + +#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x) + +/** + * drm_dp_psr_setup_time() - PSR setup in time usec + * @psr_cap: PSR capabilities from DPCD + * + * Returns: + * PSR setup time for the panel in microseconds, negative + * error code on failure. + */ +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) +{ + static const u16 psr_setup_time_us[] = { + PSR_SETUP_TIME(330), + PSR_SETUP_TIME(275), + PSR_SETUP_TIME(165), + PSR_SETUP_TIME(110), + PSR_SETUP_TIME(55), + PSR_SETUP_TIME(0), + }; + int i; + + i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT; + if (i >= ARRAY_SIZE(psr_setup_time_us)) + return -EINVAL; + + return psr_setup_time_us[i]; +} +EXPORT_SYMBOL(drm_dp_psr_setup_time); + +#undef PSR_SETUP_TIME diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6537908050d7..04e457117980 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) WARN_ON(!mutex_is_locked(&mgr->qlock)); /* construct a chunk from the first msg in the tx_msg queue */ - if (list_empty(&mgr->tx_msg_downq)) { - mgr->tx_down_in_progress = false; + if (list_empty(&mgr->tx_msg_downq)) return; - } - mgr->tx_down_in_progress = true; txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); ret = process_single_tx_qlock(mgr, txmsg, false); @@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; wake_up(&mgr->tx_waitq); } - if (list_empty(&mgr->tx_msg_downq)) { - mgr->tx_down_in_progress = false; - return; - } } /* called holding qlock */ @@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, { mutex_lock(&mgr->qlock); list_add_tail(&txmsg->next, &mgr->tx_msg_downq); - if (!mgr->tx_down_in_progress) + if (list_is_singular(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port + * @connector: DRM connector for this port * @mgr: manager for this port * @port: unverified pointer to a port * @@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (mgr->tx_down_in_progress) + if (!list_empty(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bff89226a344..be27ed36f56e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -34,8 +34,10 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/drm_core.h> +#include "drm_crtc_internal.h" #include "drm_legacy.h" #include "drm_internal.h" +#include "drm_crtc_internal.h" /* * drm_debug: Enable debug output. @@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...) } EXPORT_SYMBOL(drm_ut_debug_printk); -struct drm_master *drm_master_create(struct drm_minor *minor) -{ - struct drm_master *master; - - master = kzalloc(sizeof(*master), GFP_KERNEL); - if (!master) - return NULL; - - kref_init(&master->refcount); - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); - idr_init(&master->magic_map); - master->minor = minor; - - return master; -} - -struct drm_master *drm_master_get(struct drm_master *master) -{ - kref_get(&master->refcount); - return master; -} -EXPORT_SYMBOL(drm_master_get); - -static void drm_master_destroy(struct kref *kref) -{ - struct drm_master *master = container_of(kref, struct drm_master, refcount); - struct drm_device *dev = master->minor->dev; - - if (dev->driver->master_destroy) - dev->driver->master_destroy(dev, master); - - drm_legacy_master_rmmaps(dev, master); - - idr_destroy(&master->magic_map); - kfree(master->unique); - kfree(master); -} - -void drm_master_put(struct drm_master **master) -{ - kref_put(&(*master)->refcount, drm_master_destroy); - *master = NULL; -} -EXPORT_SYMBOL(drm_master_put); - -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = 0; - - mutex_lock(&dev->master_mutex); - if (file_priv->is_master) - goto out_unlock; - - if (file_priv->minor->master) { - ret = -EINVAL; - goto out_unlock; - } - - if (!file_priv->master) { - ret = -EINVAL; - goto out_unlock; - } - - if (!file_priv->allowed_master) { - ret = drm_new_set_master(dev, file_priv); - goto out_unlock; - } - - file_priv->minor->master = drm_master_get(file_priv->master); - file_priv->is_master = 1; - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, file_priv, false); - if (unlikely(ret != 0)) { - file_priv->is_master = 0; - drm_master_put(&file_priv->minor->master); - } - } - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = -EINVAL; - - mutex_lock(&dev->master_mutex); - if (!file_priv->is_master) - goto out_unlock; - - if (!file_priv->minor->master) - goto out_unlock; - - ret = 0; - if (dev->driver->master_drop) - dev->driver->master_drop(dev, file_priv, false); - drm_master_put(&file_priv->minor->master); - file_priv->is_master = 0; - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - /* * DRM Minors * A DRM device can provide several char-dev interfaces on the DRM-Major. Each @@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor) * callbacks implemented by the driver. The driver then needs to initialize all * the various subsystems for the drm device like memory management, vblank * handling, modesetting support and intial output configuration plus obviously - * initialize all the corresponding hardware bits. An important part of this is - * also calling drm_dev_set_unique() to set the userspace-visible unique name of - * this device instance. Finally when everything is up and running and ready for - * userspace the device instance can be published using drm_dev_register(). + * initialize all the corresponding hardware bits. Finally when everything is up + * and running and ready for userspace the device instance can be published + * using drm_dev_register(). * * There is also deprecated support for initalizing device instances using * bus-specific helpers and the ->load() callback. But due to @@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor) * dev_priv field of &drm_device. */ +static int drm_dev_set_unique(struct drm_device *dev, const char *name) +{ + kfree(dev->unique); + dev->unique = kstrdup(name, GFP_KERNEL); + + return dev->unique ? 0 : -ENOMEM; +} + /** * drm_put_dev - Unregister and release a DRM device * @dev: DRM device @@ -461,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev); void drm_unplug_dev(struct drm_device *dev) { /* for a USB device */ - drm_minor_unregister(dev, DRM_MINOR_LEGACY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); + drm_dev_unregister(dev); mutex_lock(&drm_global_mutex); @@ -549,11 +448,12 @@ static void drm_fs_inode_free(struct inode *inode) } /** - * drm_dev_alloc - Allocate new DRM device - * @driver: DRM driver to allocate device for + * drm_dev_init - Initialise new DRM device + * @dev: DRM device + * @driver: DRM driver * @parent: Parent device object * - * Allocate and initialize a new DRM device. No device registration is done. + * Initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent @@ -564,19 +464,18 @@ static void drm_fs_inode_free(struct inode *inode) * * Note that for purely virtual devices @parent can be NULL. * + * Drivers that do not want to allocate their own device struct + * embedding struct &drm_device can call drm_dev_alloc() instead. + * * RETURNS: - * Pointer to new DRM device, or NULL if out of memory. + * 0 on success, or error code on failure. */ -struct drm_device *drm_dev_alloc(struct drm_driver *driver, - struct device *parent) +int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent) { - struct drm_device *dev; int ret; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; - kref_init(&dev->ref); dev->dev = parent; dev->driver = driver; @@ -605,8 +504,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); if (ret) goto err_minors; - - WARN_ON(driver->suspend || driver->resume); } if (drm_core_check_feature(dev, DRIVER_RENDER)) { @@ -619,7 +516,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, if (ret) goto err_minors; - if (drm_ht_create(&dev->map_hash, 12)) + ret = drm_ht_create(&dev->map_hash, 12); + if (ret) goto err_minors; drm_legacy_ctxbitmap_init(dev); @@ -632,13 +530,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, } } - if (parent) { - ret = drm_dev_set_unique(dev, dev_name(parent)); - if (ret) - goto err_setunique; - } + /* Use the parent device name as DRM device unique identifier, but fall + * back to the driver name for virtual devices like vgem. */ + ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name); + if (ret) + goto err_setunique; - return dev; + return 0; err_setunique: if (drm_core_check_feature(dev, DRIVER_GEM)) @@ -653,8 +551,49 @@ err_minors: drm_fs_inode_free(dev->anon_inode); err_free: mutex_destroy(&dev->master_mutex); - kfree(dev); - return NULL; + return ret; +} +EXPORT_SYMBOL(drm_dev_init); + +/** + * drm_dev_alloc - Allocate new DRM device + * @driver: DRM driver to allocate device for + * @parent: Parent device object + * + * Allocate and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_ref() and + * drm_dev_unref() to take and drop further ref-counts. + * + * Note that for purely virtual devices @parent can be NULL. + * + * Drivers that wish to subclass or embed struct &drm_device into their + * own struct should look at using drm_dev_init() instead. + * + * RETURNS: + * Pointer to new DRM device, or NULL if out of memory. + */ +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent) +{ + struct drm_device *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + ret = drm_dev_init(dev, driver, parent); + if (ret) { + kfree(dev); + return NULL; + } + + return dev; } EXPORT_SYMBOL(drm_dev_alloc); @@ -718,11 +657,7 @@ EXPORT_SYMBOL(drm_dev_unref); * * Register the DRM device @dev with the system, advertise device to user-space * and start normal device operation. @dev must be allocated via drm_dev_alloc() - * previously. Right after drm_dev_register() the driver should call - * drm_connector_register_all() to register all connectors in sysfs. This is - * a separate call for backward compatibility with drivers still using - * the deprecated ->load() callback, where connectors are registered from within - * the ->load() callback. + * previously. * * Never call this twice on any device! * @@ -759,6 +694,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_register_all(dev); + ret = 0; goto out_unlock; @@ -789,6 +727,9 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_unregister_all(dev); + if (dev->driver->unload) dev->driver->unload(dev); @@ -806,26 +747,6 @@ void drm_dev_unregister(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unregister); -/** - * drm_dev_set_unique - Set the unique name of a DRM device - * @dev: device of which to set the unique name - * @name: unique name - * - * Sets the unique name of a DRM device using the specified string. Drivers - * can use this at driver probe time if the unique name of the devices they - * drive is static. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_dev_set_unique(struct drm_device *dev, const char *name) -{ - kfree(dev->unique); - dev->unique = kstrdup(name, GFP_KERNEL); - - return dev->unique ? 0 : -ENOMEM; -} -EXPORT_SYMBOL(drm_dev_set_unique); - /* * DRM Core * The DRM core module initializes all global DRM objects and makes them diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 9a401aed98e0..622f788bff46 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) * by commas, search through the list looking for one that * matches the connector. * - * If there's one or more that don't't specify a connector, keep + * If there's one or more that doesn't specify a connector, keep * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 5075fae3c4e2..1fd6eac1400c 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -23,6 +23,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #define DEFAULT_FBDEFIO_DELAY_MS 50 @@ -52,7 +53,7 @@ struct drm_fbdev_cma { * will be set up automatically. dirty() is called by * drm_fb_helper_deferred_io() in process context (struct delayed_work). * - * Example fbdev deferred io code: + * Example fbdev deferred io code:: * * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, * struct drm_file *file_priv, @@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, * drm_fb_cma_create_with_funcs() - helper function for the * &drm_mode_config_funcs ->fb_create * callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request + * @funcs: vtable to be used for the new framebuffer object * * This can be used to set &drm_framebuffer_funcs for drivers that need the * dirty() callback. Use drm_fb_cma_create() if you don't need to change @@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs); /** * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request * * If your hardware has special alignment or pitch requirements these should be * checked before calling this function. Use drm_fb_cma_create_with_funcs() if @@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create); * This function will usually be called from the CRTC callback functions. */ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, - unsigned int plane) + unsigned int plane) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); #ifdef CONFIG_DEBUG_FS -/* - * drm_fb_cma_describe() - Helper to dump information about a single - * CMA framebuffer object - */ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) /** * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects - * in debugfs. + * in debugfs. + * @m: output file + * @arg: private data for the callback */ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) { @@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); #endif +static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(info->device, vma, info->screen_base, + info->fix.smem_start, info->fix.smem_len); +} + static struct fb_ops drm_fbdev_cma_ops = { .owner = THIS_MODULE, .fb_fillrect = drm_fb_helper_sys_fillrect, @@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = { .fb_blank = drm_fb_helper_blank, .fb_pan_display = drm_fb_helper_pan_display, .fb_setcmap = drm_fb_helper_setcmap, + .fb_mmap = drm_fb_cma_mmap, }; static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, @@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi, fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); if (!fbdefio || !fbops) { kfree(fbdefio); + kfree(fbops); return -ENOMEM; } @@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma) drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); } EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); + +/** + * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend + * @fbdev_cma: The drm_fbdev_cma struct, may be NULL + * @state: desired state, zero to resume, non-zero to suspend + * + * Calls drm_fb_helper_set_suspend, which is a wrapper around + * fb_set_suspend implemented by fbdev core. + */ +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state) +{ + if (fbdev_cma) + drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state); +} +EXPORT_SYMBOL(drm_fbdev_cma_set_suspend); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7c2eb75db60f..ce54e985d91b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); } /** @@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_warn_on_modeset_not_all_locked(dev); - if (fb_helper->atomic) + if (dev->mode_config.funcs->atomic_commit) return restore_fbdev_mode_atomic(fb_helper); drm_for_each_plane(plane, dev) { @@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) /* Sometimes user space wants everything disabled, so don't steal the * display if there's a master. */ - if (dev->primary->master) + if (lockless_dereference(dev->master)) return false; drm_for_each_crtc(crtc, dev) { @@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev, i++; } - fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC); - return 0; out_free: drm_fb_helper_crtc_free(fb_helper); @@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - int pindex; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *palette; @@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, !fb_helper->funcs->gamma_get)) return -EINVAL; - pindex = regno; - - if (fb->bits_per_pixel == 16) { - pindex = regno << 3; - - if (fb->depth == 16 && regno > 63) - return -EINVAL; - if (fb->depth == 15 && regno > 31) - return -EINVAL; - - if (fb->depth == 16) { - u16 r, g, b; - int i; - if (regno < 32) { - for (i = 0; i < 8; i++) - fb_helper->funcs->gamma_set(crtc, red, - green, blue, pindex + i); - } + WARN_ON(fb->bits_per_pixel != 8); - fb_helper->funcs->gamma_get(crtc, &r, - &g, &b, - pindex >> 1); + fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); - for (i = 0; i < 4; i++) - fb_helper->funcs->gamma_set(crtc, r, - green, b, - (pindex >> 1) + i); - } - } - - if (fb->depth != 16) - fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } @@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; } - if (fb_helper->atomic) { + if (dev->mode_config.funcs->atomic_commit) { ret = pan_display_atomic(var, info); goto unlock; } @@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, my_score++; connector_funcs = connector->helper_private; - encoder = connector_funcs->best_encoder(connector); + + /* + * If the DRM device implements atomic hooks and ->best_encoder() is + * NULL we fallback to the default drm_atomic_helper_best_encoder() + * helper. + */ + if (fb_helper->dev->mode_config.funcs->atomic_commit && + !connector_funcs->best_encoder) + encoder = drm_atomic_helper_best_encoder(connector); + else + encoder = connector_funcs->best_encoder(connector); + if (!encoder) goto out; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7af7f8bcb355..323c238fcac7 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -40,6 +40,7 @@ #include <linux/module.h> #include "drm_legacy.h" #include "drm_internal.h" +#include "drm_crtc_internal.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex); * specific implementations. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the - * following is an example #file_operations structure: + * following is an example #file_operations structure:: * * static const example_drm_fops = { * .owner = THIS_MODULE, @@ -168,60 +169,6 @@ static int drm_cpu_valid(void) } /* - * drm_new_set_master - Allocate a new master object and become master for the - * associated master realm. - * - * @dev: The associated device. - * @fpriv: File private identifying the client. - * - * This function must be called with dev::struct_mutex held. - * Returns negative error code on failure. Zero on success. - */ -int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) -{ - struct drm_master *old_master; - int ret; - - lockdep_assert_held_once(&dev->master_mutex); - - /* create a new master */ - fpriv->minor->master = drm_master_create(fpriv->minor); - if (!fpriv->minor->master) - return -ENOMEM; - - /* take another reference for the copy in the local file priv */ - old_master = fpriv->master; - fpriv->master = drm_master_get(fpriv->minor->master); - - if (dev->driver->master_create) { - ret = dev->driver->master_create(dev, fpriv->master); - if (ret) - goto out_err; - } - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, fpriv, true); - if (ret) - goto out_err; - } - - fpriv->is_master = 1; - fpriv->allowed_master = 1; - fpriv->authenticated = 1; - if (old_master) - drm_master_put(&old_master); - - return 0; - -out_err: - /* drop both references and restore old master on failure */ - drm_master_put(&fpriv->minor->master); - drm_master_put(&fpriv->master); - fpriv->master = old_master; - - return ret; -} - -/* * Called whenever a process opens /dev/drm. * * \param filp file pointer. @@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) goto out_prime_destroy; } - /* if there is no current master make this fd it, but do not create - * any master object for render clients */ - mutex_lock(&dev->master_mutex); - if (drm_is_primary_client(priv) && !priv->minor->master) { - /* create a new master */ - ret = drm_new_set_master(dev, priv); + if (drm_is_primary_client(priv)) { + ret = drm_master_open(priv); if (ret) goto out_close; - } else if (drm_is_primary_client(priv)) { - /* get a reference to the master */ - priv->master = drm_master_get(priv->minor->master); } - mutex_unlock(&dev->master_mutex); mutex_lock(&dev->filelist_mutex); list_add(&priv->lhead, &dev->filelist); @@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) return 0; out_close: - mutex_unlock(&dev->master_mutex); if (dev->driver->postclose) dev->driver->postclose(dev, priv); out_prime_destroy: @@ -338,18 +276,6 @@ out_prime_destroy: return ret; } -static void drm_master_release(struct drm_device *dev, struct file *filp) -{ - struct drm_file *file_priv = filp->private_data; - - if (drm_legacy_i_have_hw_lock(dev, file_priv)) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - drm_legacy_lock_free(&file_priv->master->lock, - _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - } -} - static void drm_events_release(struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; @@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv) /* Remove unconsumed events */ list_for_each_entry_safe(e, et, &file_priv->event_list, link) { list_del(&e->link); - e->destroy(e); + kfree(e); } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp) list_del(&file_priv->lhead); mutex_unlock(&dev->filelist_mutex); - mutex_lock(&dev->struct_mutex); - if (file_priv->magic) - idr_remove(&file_priv->master->magic_map, file_priv->magic); - mutex_unlock(&dev->struct_mutex); - if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); @@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp) (long)old_encode_dev(file_priv->minor->kdev->devt), dev->open_count); - /* if the master has gone away we can't do anything with the lock */ - if (file_priv->minor->master) - drm_master_release(dev, filp); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + drm_legacy_lock_release(dev, filp); if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) drm_legacy_reclaim_buffers(dev, file_priv); @@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp) drm_legacy_ctxbitmap_flush(dev, file_priv); - mutex_lock(&dev->master_mutex); - - if (file_priv->is_master) { - struct drm_master *master = file_priv->master; - - /* - * Since the master is disappearing, so is the - * possibility to lock. - */ - mutex_lock(&dev->struct_mutex); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - mutex_unlock(&dev->struct_mutex); - - if (file_priv->minor->master == file_priv->master) { - /* drop the reference held my the minor */ - if (dev->driver->master_drop) - dev->driver->master_drop(dev, file_priv, true); - drm_master_put(&file_priv->minor->master); - } - } - - /* drop the master reference held by the file priv */ - if (file_priv->master) - drm_master_put(&file_priv->master); - file_priv->is_master = 0; - mutex_unlock(&dev->master_mutex); + if (drm_is_primary_client(file_priv)) + drm_master_release(file_priv); if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); - if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&file_priv->prime); @@ -636,7 +525,7 @@ put_back_event: } ret += length; - e->destroy(e); + kfree(e); } } mutex_unlock(&file_priv->event_read_lock); @@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev, list_add(&p->pending_link, &file_priv->pending_event_list); p->file_priv = file_priv; - /* we *could* pass this in as arg, but everyone uses kfree: */ - p->destroy = (void (*) (struct drm_pending_event *)) kfree; - return 0; } EXPORT_SYMBOL(drm_event_reserve_init_locked); @@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev, list_del(&p->pending_link); } spin_unlock_irqrestore(&dev->event_lock, flags); - p->destroy(p); + kfree(p); } EXPORT_SYMBOL(drm_event_cancel_free); @@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) { assert_spin_locked(&dev->event_lock); + if (e->completion) { + /* ->completion might disappear as soon as it signalled. */ + complete_all(e->completion); + e->completion = NULL; + } + + if (e->fence) { + fence_signal(e->fence); + fence_put(e->fence); + } + if (!e->file_priv) { - e->destroy(e); + kfree(e); return; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c new file mode 100644 index 000000000000..0645c85d5f95 --- /dev/null +++ b/drivers/gpu/drm/drm_fourcc.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * DRM core format related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/export.h> +#include <linux/kernel.h> + +#include <drm/drmP.h> +#include <drm/drm_fourcc.h> + +static char printable_char(int c) +{ + return isascii(c) && isprint(c) ? c : '?'; +} + +/** + * drm_get_format_name - return a string for drm fourcc format + * @format: format to compute name of + * + * Note that the buffer used by this function is globally shared and owned by + * the function itself. + * + * FIXME: This isn't really multithreading safe. + */ +const char *drm_get_format_name(uint32_t format) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "%c%c%c%c %s-endian (0x%08x)", + printable_char(format & 0xff), + printable_char((format >> 8) & 0xff), + printable_char((format >> 16) & 0xff), + printable_char((format >> 24) & 0x7f), + format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", + format); + + return buf; +} +EXPORT_SYMBOL(drm_get_format_name); + +/** + * drm_fb_get_bpp_depth - get the bpp/depth values for format + * @format: pixel format (DRM_FORMAT_*) + * @depth: storage for the depth value + * @bpp: storage for the bpp value + * + * This only supports RGB formats here for compat with code that doesn't use + * pixel formats directly yet. + */ +void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, + int *bpp) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + *depth = 8; + *bpp = 8; + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + *depth = 15; + *bpp = 16; + break; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + *depth = 16; + *bpp = 16; + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + *depth = 24; + *bpp = 24; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + *depth = 24; + *bpp = 32; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + *depth = 30; + *bpp = 32; + break; + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + *depth = 32; + *bpp = 32; + break; + default: + DRM_DEBUG_KMS("unsupported pixel format %s\n", + drm_get_format_name(format)); + *depth = 0; + *bpp = 0; + break; + } +} +EXPORT_SYMBOL(drm_fb_get_bpp_depth); + +/** + * drm_format_num_planes - get the number of planes for format + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The number of planes used by the specified pixel format. + */ +int drm_format_num_planes(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_num_planes); + +/** + * drm_format_plane_cpp - determine the bytes per pixel value + * @format: pixel format (DRM_FORMAT_*) + * @plane: plane index + * + * Returns: + * The bytes per pixel value for the specified plane. + */ +int drm_format_plane_cpp(uint32_t format, int plane) +{ + unsigned int depth; + int bpp; + + if (plane >= drm_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + drm_fb_get_bpp_depth(format, &depth, &bpp); + return bpp >> 3; + } +} +EXPORT_SYMBOL(drm_format_plane_cpp); + +/** + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The horizontal chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_horz_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); + +/** + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The vertical chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_vert_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); + +/** + * drm_format_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_format_plane_width(int width, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / drm_format_horz_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_width); + +/** + * drm_format_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_format_plane_height(int height, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / drm_format_vert_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_height); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 32156060b9c9..9134ae134667 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) int i, npages; /* This is the shared memory object that backs the GEM resource */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release); * @kref: kref of the object to free * * Called after the last reference to the object has been lost. - * Must be called holding struct_ mutex + * Must be called holding &drm_device->struct_mutex. * * Frees the object */ diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 5d469b2f26f4..9ae353f4dd06 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_minor *minor = node->minor; struct drm_device *dev = minor->dev; - struct drm_master *master = minor->master; - if (!master) - return 0; - - if (master->unique) { - seq_printf(m, "%s %s %s\n", - dev->driver->name, - dev_name(dev->dev), master->unique); - } else { - seq_printf(m, "%s %s\n", - dev->driver->name, dev_name(dev->dev)); - } - return 0; -} - -/** - * Called when "/proc/dri/.../vm" is read. - * - * Prints information about all mappings in drm_device::maplist. - */ -int drm_vm_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_local_map *map; - struct drm_map_list *r_list; - - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; - const char *type; - int i; - - mutex_lock(&dev->struct_mutex); - seq_printf(m, "slot offset size type flags address mtrr\n\n"); - i = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - map = r_list->map; - if (!map) - continue; - if (map->type < 0 || map->type > 5) - type = "??"; - else - type = types[map->type]; - - seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ", - i, - (unsigned long long)map->offset, - map->size, type, map->flags, - (unsigned long) r_list->user_token); - if (map->mtrr < 0) - seq_printf(m, "none\n"); - else - seq_printf(m, "%4d\n", map->mtrr); - i++; - } - mutex_unlock(&dev->struct_mutex); - return 0; -} + struct drm_master *master; -/** - * Called when "/proc/dri/.../bufs" is read. - */ -int drm_bufs_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_device_dma *dma; - int i, seg_pages; - - mutex_lock(&dev->struct_mutex); - dma = dev->dma; - if (!dma) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - seq_printf(m, " o size count free segs pages kB\n\n"); - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].buf_count) { - seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order); - seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n", - i, - dma->bufs[i].buf_size, - dma->bufs[i].buf_count, - 0, - dma->bufs[i].seg_count, - seg_pages, - seg_pages * PAGE_SIZE / 1024); - } - } - seq_printf(m, "\n"); - for (i = 0; i < dma->buf_count; i++) { - if (i && !(i % 32)) - seq_printf(m, "\n"); - seq_printf(m, " %d", dma->buflist[i]->list); - } + mutex_lock(&dev->master_mutex); + master = dev->master; + if (!master) + goto out_unlock; + + seq_printf(m, "%s", dev->driver->name); + if (dev->dev) + seq_printf(m, " dev=%s", dev_name(dev->dev)); + if (master && master->unique) + seq_printf(m, " master=%s", master->unique); + if (dev->unique) + seq_printf(m, " unique=%s", dev->unique); seq_printf(m, "\n"); - mutex_unlock(&dev->struct_mutex); +out_unlock: + mutex_unlock(&dev->master_mutex); + return 0; } @@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data) task ? task->comm : "<unknown>", pid_vnr(priv->pid), priv->minor->index, - priv->is_master ? 'y' : 'n', + drm_is_current_master(priv) ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), priv->uid), priv->magic); @@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data) return 0; } - static int drm_gem_one_name_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 902cf6a15212..b86dc9b921a5 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex; void drm_lastclose(struct drm_device *dev); /* drm_pci.c */ -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u); int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* drm_vm.c */ -int drm_vma_info(struct seq_file *m, void *data); - /* drm_prime.c */ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr /* drm_info.c */ int drm_name_info(struct seq_file *m, void *data); -int drm_vm_info(struct seq_file *m, void *data); -int drm_bufs_info(struct seq_file *m, void *data); int drm_clients_info(struct seq_file *m, void* data); int drm_gem_name_info(struct seq_file *m, void *data); @@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_master_open(struct drm_file *file_priv); +void drm_master_release(struct drm_file *file_priv); /* drm_sysfs.c */ extern struct class *drm_class; @@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data, void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); -/* drm_drv.c */ -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -struct drm_master *drm_master_create(struct drm_minor *minor); - /* drm_debugfs.c */ #if defined(CONFIG_DEBUG_FS) int drm_debugfs_init(struct drm_minor *minor, int minor_id, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index b7a39771c152..33af4a5ddca1 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_core.h> +#include <drm/drm_auth.h> #include "drm_legacy.h" #include "drm_internal.h" #include "drm_crtc_internal.h" @@ -37,6 +38,64 @@ #include <linux/pci.h> #include <linux/export.h> +/** + * DOC: getunique and setversion story + * + * BEWARE THE DRAGONS! MIND THE TRAPDOORS! + * + * In an attempt to warn anyone else who's trying to figure out what's going + * on here, I'll try to summarize the story. First things first, let's clear up + * the names, because the kernel internals, libdrm and the ioctls are all named + * differently: + * + * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm + * through the drmGetBusid function. + * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All + * that code is nerved in the kernel with drm_invalid_op(). + * - The internal set_busid kernel functions and driver callbacks are + * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is + * nerved) allowed userspace to set the busid through the above ioctl. + * - Other ioctls and functions involved are named consistently. + * + * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly + * handling pci domains in the busid on ppc. Doing this correctly was only + * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's + * special with drm 1.2 and 1.3. + * + * Now the actual horror story of how device lookup in drm works. At large, + * there's 2 different ways, either by busid, or by device driver name. + * + * Opening by busid is fairly simple: + * + * 1. First call SET_VERSION to make sure pci domains are handled properly. As a + * side-effect this fills out the unique name in the master structure. + * 2. Call GET_UNIQUE to read out the unique name from the master structure, + * which matches the busid thanks to step 1. If it doesn't, proceed to try + * the next device node. + * + * Opening by name is slightly different: + * + * 1. Directly call VERSION to get the version and to match against the driver + * name returned by that ioctl. Note that SET_VERSION is not called, which + * means the the unique name for the master node just opening is _not_ filled + * out. This despite that with current drm device nodes are always bound to + * one device, and can't be runtime assigned like with drm 1.0. + * 2. Match driver name. If it mismatches, proceed to the next device node. + * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by + * checking that the first byte in the string is 0). If that's not the case + * libdrm skips and proceeds to the next device node. Probably this is just + * copypasta from drm 1.0 times where a set unique name meant that the driver + * was in use already, but that's just conjecture. + * + * Long story short: To keep the open by name logic working, GET_UNIQUE must + * _not_ return a unique string when SET_VERSION hasn't been called yet, + * otherwise libdrm breaks. Even when that unique string can't ever change, and + * is totally irrelevant for actually opening the device because runtime + * assignable device instances were only support in drm 1.0, which is long dead. + * But the libdrm code in drmOpenByName somehow survived, hence this can't be + * broken. + */ + static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev, master->unique_len = 0; } -/* - * Set the bus id. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_unique structure. - * \return zero on success or a negative number on failure. - * - * Copies the bus id from userspace into drm_device::unique, and verifies that - * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated - * in interface version 1.1 and will return EBUSY when setversion has requested - * version 1.1 or greater. Also note that KMS is all version 1.1 and later and - * UMS was only ever supported on pci devices. - */ -static int drm_setunique(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_unique *u = data; - struct drm_master *master = file_priv->master; - int ret; - - if (master->unique_len || master->unique) - return -EBUSY; - - if (!u->unique_len || u->unique_len > 1024) - return -EINVAL; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return 0; - - if (WARN_ON(!dev->pdev)) - return -EINVAL; - - ret = drm_pci_set_unique(dev, master, u); - if (ret) - goto err; - - return 0; - -err: - drm_unset_busid(dev, master); - return ret; -} - static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; @@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) return ret; } } else { - if (WARN(dev->unique == NULL, - "No drm_driver.set_busid() implementation provided by " - "%ps. Use drm_dev_set_unique() to set the unique " - "name explicitly.", dev->driver)) - return -EINVAL; - + WARN_ON(!dev->unique); master->unique = kstrdup(dev->unique, GFP_KERNEL); if (master->unique) master->unique_len = strlen(dev->unique); @@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return -EACCES; /* MASTER is only for master or control clients */ - if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && + if (unlikely((flags & DRM_MASTER) && + !drm_is_current_master(file_priv) && !drm_is_control_client(file_priv))) return -EACCES; @@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), @@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), @@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -638,7 +648,7 @@ long drm_ioctl(struct file *filp, int retcode = -EINVAL; char stack_kdata[128]; char *kdata = NULL; - unsigned int usize, asize, drv_size; + unsigned int in_size, out_size, drv_size, ksize; bool is_driver_ioctl; dev = file_priv->minor->dev; @@ -661,9 +671,12 @@ long drm_ioctl(struct file *filp, } drv_size = _IOC_SIZE(ioctl->cmd); - usize = _IOC_SIZE(cmd); - asize = max(usize, drv_size); - cmd = ioctl->cmd; + out_size = in_size = _IOC_SIZE(cmd); + if ((cmd & ioctl->cmd & IOC_IN) == 0) + in_size = 0; + if ((cmd & ioctl->cmd & IOC_OUT) == 0) + out_size = 0; + ksize = max(max(in_size, out_size), drv_size); DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", task_pid_nr(current), @@ -683,30 +696,24 @@ long drm_ioctl(struct file *filp, if (unlikely(retcode)) goto err_i1; - if (cmd & (IOC_IN | IOC_OUT)) { - if (asize <= sizeof(stack_kdata)) { - kdata = stack_kdata; - } else { - kdata = kmalloc(asize, GFP_KERNEL); - if (!kdata) { - retcode = -ENOMEM; - goto err_i1; - } + if (ksize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(ksize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; } - if (asize > usize) - memset(kdata + usize, 0, asize - usize); } - if (cmd & IOC_IN) { - if (copy_from_user(kdata, (void __user *)arg, - usize) != 0) { - retcode = -EFAULT; - goto err_i1; - } - } else if (cmd & IOC_OUT) { - memset(kdata, 0, usize); + if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { + retcode = -EFAULT; + goto err_i1; } + if (ksize > in_size) + memset(kdata + in_size, 0, ksize - in_size); + /* Enforce sane locking for kms driver ioctls. Core ioctls are * too messy still. */ if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || @@ -718,11 +725,8 @@ long drm_ioctl(struct file *filp, mutex_unlock(&drm_global_mutex); } - if (cmd & IOC_OUT) { - if (copy_to_user((void __user *)arg, kdata, - usize) != 0) - retcode = -EFAULT; - } + if (copy_to_user((void __user *)arg, kdata, out_size) != 0) + retcode = -EFAULT; err_i1: if (!ioctl) @@ -749,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl); * shouldn't be used by any drivers. * * Returns: - * True if the @nr corresponds to a DRM core ioctl numer, false otherwise. + * True if the @nr corresponds to a DRM core ioctl number, false otherwise. */ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0fac801c18fe..77f357b2c386 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -42,10 +42,6 @@ #include <linux/vgaarb.h> #include <linux/export.h> -/* Access macro for slots in vblank timestamp ringbuffer. */ -#define vblanktimestamp(dev, pipe, count) \ - ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) - /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ @@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, struct timeval *t_vblank, u32 last) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - u32 tslot; assert_spin_locked(&dev->vblank_time_lock); vblank->last = last; - /* All writers hold the spinlock, but readers are serialized by - * the latching of vblank->count below. - */ - tslot = vblank->count + vblank_count_inc; - vblanktimestamp(dev, pipe, tslot) = *t_vblank; - - /* - * vblank timestamp updates are protected on the write side with - * vblank_time_lock, but on the read side done locklessly using a - * sequence-lock on the vblank counter. Ensure correct ordering using - * memory barrriers. We need the barrier both before and also after the - * counter update to synchronize with the next timestamp write. - * The read-side barriers for this are in drm_vblank_count_and_time. - */ - smp_wmb(); + write_seqlock(&vblank->seqlock); + vblank->time = *t_vblank; vblank->count += vblank_count_inc; - smp_wmb(); + write_sequnlock(&vblank->seqlock); } -/** - * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank - * @dev: DRM device - * @pipe: index of CRTC for which to reset the timestamp - * +/* * Reset the stored timestamp for the current vblank count to correspond * to the last vblank occurred. * @@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe spin_unlock(&dev->vblank_time_lock); } -/** - * drm_update_vblank_count - update the master vblank counter - * @dev: DRM device - * @pipe: counter to update - * +/* * Call back into the driver to update the appropriate vblank counter * (specified by @pipe). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next @@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, const struct timeval *t_old; u64 diff_ns; - t_old = &vblanktimestamp(dev, pipe, vblank->count); + t_old = &vblank->time; diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); /* @@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, diff = 1; } - /* - * FIMXE: Need to replace this hack with proper seqlocks. - * - * Restrict the bump of the software vblank counter to a safe maximum - * value of +1 whenever there is the possibility that concurrent readers - * of vblank timestamps could be active at the moment, as the current - * implementation of the timestamp caching and updating is not safe - * against concurrent readers for calls to store_vblank() with a bump - * of anything but +1. A bump != 1 would very likely return corrupted - * timestamps to userspace, because the same slot in the cache could - * be concurrently written by store_vblank() and read by one of those - * readers without the read-retry logic detecting the collision. - * - * Concurrent readers can exist when we are called from the - * drm_vblank_off() or drm_vblank_on() functions and other non-vblank- - * irq callers. However, all those calls to us are happening with the - * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount - * can't increase while we are executing. Therefore a zero refcount at - * this point is safe for arbitrary counter bumps if we are called - * outside vblank irq, a non-zero count is not 100% safe. Unfortunately - * we must also accept a refcount of 1, as whenever we are called from - * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and - * we must let that one pass through in order to not lose vblank counts - * during vblank irq off - which would completely defeat the whole - * point of this routine. - * - * Whenever we are called from vblank irq, we have to assume concurrent - * readers exist or can show up any time during our execution, even if - * the refcount is currently zero, as vblank irqs are usually only - * enabled due to the presence of readers, and because when we are called - * from vblank irq we can't hold the vbl_lock to protect us from sudden - * bumps in vblank refcount. Therefore also restrict bumps to +1 when - * called from vblank irq. - */ - if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 || - (flags & DRM_CALLED_FROM_VBLIRQ))) { - DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u " - "refcount %u, vblirq %u\n", pipe, diff, - atomic_read(&vblank->refcount), - (flags & DRM_CALLED_FROM_VBLIRQ) != 0); - diff = 1; - } - DRM_DEBUG_VBL("updating vblank count on crtc %u:" " current=%u, diff=%u, hw=%u hw_last=%u\n", pipe, vblank->count, diff, cur_vblank, vblank->last); @@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); } +/** + * drm_accurate_vblank_count - retrieve the master vblank counter + * @crtc: which counter to retrieve + * + * This function is similar to @drm_crtc_vblank_count but this + * function interpolates to handle a race with vblank irq's. + * + * This is mostly useful for hardware that can obtain the scanout + * position, but doesn't have a frame counter. + */ +u32 drm_accurate_vblank_count(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + u32 vblank; + unsigned long flags; + + WARN(!dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); + + spin_lock_irqsave(&dev->vblank_time_lock, flags); + + drm_update_vblank_count(dev, pipe, 0); + vblank = drm_vblank_count(dev, pipe); + + spin_unlock_irqrestore(&dev->vblank_time_lock, flags); + + return vblank; +} +EXPORT_SYMBOL(drm_accurate_vblank_count); + /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter @@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) init_waitqueue_head(&vblank->queue); setup_timer(&vblank->disable_timer, vblank_disable_fn, (unsigned long)vblank); + seqlock_init(&vblank->seqlock); } DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); @@ -569,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev) /* * Wake up any waiters so they don't hang. This is just to paper over - * isssues for UMS drivers which aren't in full control of their + * issues for UMS drivers which aren't in full control of their * vblank/irq handling. KMS drivers must ensure that vblanks are all * disabled when uninstalling the irq handler. */ @@ -631,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data, return 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - /* UMS was only ever support on pci devices. */ + /* UMS was only ever supported on pci devices. */ if (WARN_ON(!dev->pdev)) return -EINVAL; @@ -982,31 +945,24 @@ EXPORT_SYMBOL(drm_crtc_vblank_count); * * This is the legacy version of drm_crtc_vblank_count_and_time(). */ -u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, - struct timeval *vblanktime) +static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, + struct timeval *vblanktime) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - int count = DRM_TIMESTAMP_MAXRETRIES; - u32 cur_vblank; + u32 vblank_count; + unsigned int seq; if (WARN_ON(pipe >= dev->num_crtcs)) return 0; - /* - * Vblank timestamps are read lockless. To ensure consistency the vblank - * counter is rechecked and ordering is ensured using memory barriers. - * This works like a seqlock. The write-side barriers are in store_vblank. - */ do { - cur_vblank = vblank->count; - smp_rmb(); - *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); - smp_rmb(); - } while (cur_vblank != vblank->count && --count > 0); + seq = read_seqbegin(&vblank->seqlock); + vblank_count = vblank->count; + *vblanktime = vblank->time; + } while (read_seqretry(&vblank->seqlock, seq)); - return cur_vblank; + return vblank_count; } -EXPORT_SYMBOL(drm_vblank_count_and_time); /** * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value @@ -1018,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time); * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. - * - * This is the native KMS version of drm_vblank_count_and_time(). */ u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, struct timeval *vblanktime) @@ -1037,39 +991,11 @@ static void send_vblank_event(struct drm_device *dev, e->event.tv_sec = now->tv_sec; e->event.tv_usec = now->tv_usec; - drm_send_event_locked(dev, &e->base); - trace_drm_vblank_event_delivered(e->base.pid, e->pipe, e->event.sequence); -} - -/** - * drm_arm_vblank_event - arm vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index - * @e: the event to prepare to send - * - * A lot of drivers need to generate vblank events for the very next vblank - * interrupt. For example when the page flip interrupt happens when the page - * flip gets armed, but not when it actually executes within the next vblank - * period. This helper function implements exactly the required vblank arming - * behaviour. - * - * Caller must hold event lock. Caller must also hold a vblank reference for - * the event @e, which will be dropped when the next vblank arrives. - * - * This is the legacy version of drm_crtc_arm_vblank_event(). - */ -void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) -{ - assert_spin_locked(&dev->event_lock); - e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); - list_add_tail(&e->base.link, &dev->vblank_event_list); + drm_send_event_locked(dev, &e->base); } -EXPORT_SYMBOL(drm_arm_vblank_event); /** * drm_crtc_arm_vblank_event - arm vblank event after pageflip @@ -1084,32 +1010,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event); * * Caller must hold event lock. Caller must also hold a vblank reference for * the event @e, which will be dropped when the next vblank arrives. - * - * This is the native KMS version of drm_arm_vblank_event(). */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { - drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + + assert_spin_locked(&dev->event_lock); + + e->pipe = pipe; + e->event.sequence = drm_vblank_count(dev, pipe); + list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); /** - * drm_send_vblank_event - helper to send vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index + * drm_crtc_send_vblank_event - helper to send vblank event after pageflip + * @crtc: the source CRTC of the vblank event * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. * Caller must hold event lock. - * - * This is the legacy version of drm_crtc_send_vblank_event(). */ -void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) { + struct drm_device *dev = crtc->dev; + unsigned int seq, pipe = drm_crtc_index(crtc); struct timeval now; - unsigned int seq; if (dev->num_crtcs > 0) { seq = drm_vblank_count_and_time(dev, pipe, &now); @@ -1121,23 +1050,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, e->pipe = pipe; send_vblank_event(dev, e, seq, &now); } -EXPORT_SYMBOL(drm_send_vblank_event); - -/** - * drm_crtc_send_vblank_event - helper to send vblank event after pageflip - * @crtc: the source CRTC of the vblank event - * @e: the event to send - * - * Updates sequence # and timestamp on event, and sends it to userspace. - * Caller must hold event lock. - * - * This is the native KMS version of drm_send_vblank_event(). - */ -void drm_crtc_send_vblank_event(struct drm_crtc *crtc, - struct drm_pending_vblank_event *e) -{ - drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e); -} EXPORT_SYMBOL(drm_crtc_send_vblank_event); /** @@ -1193,7 +1105,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) * Returns: * Zero on success or a negative error code on failure. */ -int drm_vblank_get(struct drm_device *dev, unsigned int pipe) +static int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; @@ -1219,7 +1131,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe) return ret; } -EXPORT_SYMBOL(drm_vblank_get); /** * drm_crtc_vblank_get - get a reference count on vblank events @@ -1228,8 +1139,6 @@ EXPORT_SYMBOL(drm_vblank_get); * Acquire a reference count on vblank events to avoid having them disabled * while in use. * - * This is the native kms version of drm_vblank_get(). - * * Returns: * Zero on success or a negative error code on failure. */ @@ -1249,7 +1158,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get); * * This is the legacy version of drm_crtc_vblank_put(). */ -void drm_vblank_put(struct drm_device *dev, unsigned int pipe) +static void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -1270,7 +1179,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) jiffies + ((drm_vblank_offdelay * HZ)/1000)); } } -EXPORT_SYMBOL(drm_vblank_put); /** * drm_crtc_vblank_put - give up ownership of vblank events @@ -1278,8 +1186,6 @@ EXPORT_SYMBOL(drm_vblank_put); * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. - * - * This is the native kms version of drm_vblank_put(). */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { @@ -1679,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, seq = drm_vblank_count_and_time(dev, pipe, &now); - if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait->request.sequence) <= (1 << 23)) { - vblwait->request.sequence = seq + 1; - vblwait->reply.sequence = vblwait->request.sequence; - } - DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", vblwait->request.sequence, seq, pipe); @@ -1781,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } + if ((flags & _DRM_VBLANK_NEXTONMISS) && + (seq - vblwait->request.sequence) <= (1 << 23)) { + vblwait->request.sequence = seq + 1; + } + if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously @@ -1788,14 +1693,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); } - if ((flags & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait->request.sequence) <= (1<<23)) { - vblwait->request.sequence = seq + 1; - } - DRM_DEBUG("waiting on vblank count %d, crtc %u\n", vblwait->request.sequence, pipe); - vblank->last_wait = vblwait->request.sequence; DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, (((drm_vblank_count(dev, pipe) - vblwait->request.sequence) <= (1 << 23)) || diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index d3b6ee357a2b..c6f422e879dd 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -88,14 +88,10 @@ struct drm_agp_mem { struct list_head head; }; -/* - * Generic Userspace Locking-API - */ - -int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f); +/* drm_lock.c */ int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx); +void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); /* DMA support */ int drm_legacy_dma_setup(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index daa2ff12101b..48ac0ebbd663 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -41,6 +41,110 @@ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); /** + * Take the heavyweight lock. + * + * \param lock lock pointer. + * \param context locking context. + * \return one if the lock is held, or zero otherwise. + * + * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. + */ +static +int drm_lock_take(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock_bh(&lock_data->spinlock); + do { + old = *lock; + if (old & _DRM_LOCK_HELD) + new = old | _DRM_LOCK_CONT; + else { + new = context | _DRM_LOCK_HELD | + ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? + _DRM_LOCK_CONT : 0); + } + prev = cmpxchg(lock, old, new); + } while (prev != old); + spin_unlock_bh(&lock_data->spinlock); + + if (_DRM_LOCKING_CONTEXT(old) == context) { + if (old & _DRM_LOCK_HELD) { + if (context != DRM_KERNEL_CONTEXT) { + DRM_ERROR("%d holds heavyweight lock\n", + context); + } + return 0; + } + } + + if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { + /* Have lock */ + return 1; + } + return 0; +} + +/** + * This takes a lock forcibly and hands it to context. Should ONLY be used + * inside *_unlock to give lock to kernel before calling *_dma_schedule. + * + * \param dev DRM device. + * \param lock lock pointer. + * \param context locking context. + * \return always one. + * + * Resets the lock file pointer. + * Marks the lock as held by the given context, via the \p cmpxchg instruction. + */ +static int drm_lock_transfer(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + lock_data->file_priv = NULL; + do { + old = *lock; + new = context | _DRM_LOCK_HELD; + prev = cmpxchg(lock, old, new); + } while (prev != old); + return 1; +} + +static int drm_legacy_lock_free(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock_bh(&lock_data->spinlock); + if (lock_data->kernel_waiters != 0) { + drm_lock_transfer(lock_data, 0); + lock_data->idle_has_lock = 1; + spin_unlock_bh(&lock_data->spinlock); + return 1; + } + spin_unlock_bh(&lock_data->spinlock); + + do { + old = *lock; + new = _DRM_LOCKING_CONTEXT(old); + prev = cmpxchg(lock, old, new); + } while (prev != old); + + if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { + DRM_ERROR("%d freed heavyweight lock held by %d\n", + context, _DRM_LOCKING_CONTEXT(old)); + return 1; + } + wake_up_interruptible(&lock_data->lock_queue); + return 0; +} + +/** * Lock ioctl. * * \param inode device inode. @@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data, /* don't set the block all signals on the master process for now * really probably not the correct answer but lets us debug xkb * xserver for now */ - if (!file_priv->is_master) { + if (!drm_is_current_master(file_priv)) { dev->sigdata.context = lock->context; dev->sigdata.lock = master->lock.hw_lock; } @@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_ } /** - * Take the heavyweight lock. - * - * \param lock lock pointer. - * \param context locking context. - * \return one if the lock is held, or zero otherwise. - * - * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. - */ -static -int drm_lock_take(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - do { - old = *lock; - if (old & _DRM_LOCK_HELD) - new = old | _DRM_LOCK_CONT; - else { - new = context | _DRM_LOCK_HELD | - ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? - _DRM_LOCK_CONT : 0); - } - prev = cmpxchg(lock, old, new); - } while (prev != old); - spin_unlock_bh(&lock_data->spinlock); - - if (_DRM_LOCKING_CONTEXT(old) == context) { - if (old & _DRM_LOCK_HELD) { - if (context != DRM_KERNEL_CONTEXT) { - DRM_ERROR("%d holds heavyweight lock\n", - context); - } - return 0; - } - } - - if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { - /* Have lock */ - return 1; - } - return 0; -} - -/** - * This takes a lock forcibly and hands it to context. Should ONLY be used - * inside *_unlock to give lock to kernel before calling *_dma_schedule. - * - * \param dev DRM device. - * \param lock lock pointer. - * \param context locking context. - * \return always one. - * - * Resets the lock file pointer. - * Marks the lock as held by the given context, via the \p cmpxchg instruction. - */ -static int drm_lock_transfer(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - lock_data->file_priv = NULL; - do { - old = *lock; - new = context | _DRM_LOCK_HELD; - prev = cmpxchg(lock, old, new); - } while (prev != old); - return 1; -} - -/** - * Free lock. - * - * \param dev DRM device. - * \param lock lock. - * \param context context. - * - * Resets the lock file pointer. - * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task - * waiting on the lock queue. - */ -int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (lock_data->kernel_waiters != 0) { - drm_lock_transfer(lock_data, 0); - lock_data->idle_has_lock = 1; - spin_unlock_bh(&lock_data->spinlock); - return 1; - } - spin_unlock_bh(&lock_data->spinlock); - - do { - old = *lock; - new = _DRM_LOCKING_CONTEXT(old); - prev = cmpxchg(lock, old, new); - } while (prev != old); - - if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); - return 1; - } - wake_up_interruptible(&lock_data->lock_queue); - return 0; -} - -/** * This function returns immediately and takes the hw lock * with the kernel context if it is free, otherwise it gets the highest priority when and if * it is eventually released. @@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data) } EXPORT_SYMBOL(drm_legacy_idlelock_release); -int drm_legacy_i_have_hw_lock(struct drm_device *dev, - struct drm_file *file_priv) +static int drm_legacy_i_have_hw_lock(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_master *master = file_priv->master; return (file_priv->lock_count && master->lock.hw_lock && _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); } + +void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + + /* if the master has gone away we can't do anything with the lock */ + if (!dev->master) + return; + + if (drm_legacy_i_have_hw_lock(dev, file_priv)) { + DRM_DEBUG("File %p released, freeing lock for context %d\n", + filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); + drm_legacy_lock_free(&file_priv->master->lock, + _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); + } +} diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 87a8cb73366f..fc0ebd273ef8 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -44,7 +44,7 @@ # include <asm/agp.h> #else # ifdef __powerpc__ -# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) # else # define PAGE_AGP PAGE_KERNEL # endif diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index f5d80839a90c..af0d471ee246 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv) return 0; } +static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + int err; + + err = of_device_uevent_modalias(dev, env); + if (err != -ENODEV) + return err; + + add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX, + dsi->name); + + return 0; +} + static const struct dev_pm_ops mipi_dsi_device_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, @@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = { static struct bus_type mipi_dsi_bus_type = { .name = "mipi-dsi", .match = mipi_dsi_device_match, + .uevent = mipi_dsi_uevent, .pm = &mipi_dsi_device_pm_ops, }; @@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); /** + * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for + * the Tearing Effect output signal of the display module + * @dsi: DSI peripheral device + * @scanline: scanline to use as trigger + * + * Return: 0 on success or a negative error code on failure + */ +int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) +{ + u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, + scanline & 0xff }; + ssize_t err; + + err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline); + +/** * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image * data used by the interface * @dsi: DSI peripheral device diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04de6fd88f8c..cb39f45d6a16 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; - u64 end = node->start + node->size; + u64 end; u64 hole_start; u64 hole_end; BUG_ON(node == NULL); + end = node->start + node->size; + /* Find the relevant hole to add our node to */ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { if (hole_start > node->start || hole_end < end) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index e5e6f504d8cc..fc5040ae5f25 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * * This function is to create the modeline based on the GTF algorithm. * Generalized Timing Formula is derived from: + * * GTF Spreadsheet by Andy Morrish (1/5/97) * available at http://www.vesa.org * @@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * I also refer to the function of fb_get_mode in the file of * drivers/video/fbmon.c * - * Standard GTF parameters: + * Standard GTF parameters:: + * * M = 600 * C = 40 * K = 128 diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index e3a4adf03e7b..61146f5b4f56 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -30,14 +30,14 @@ * * As KMS moves toward more fine grained locking, and atomic ioctl where * userspace can indirectly control locking order, it becomes necessary - * to use ww_mutex and acquire-contexts to avoid deadlocks. But because + * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because * the locking is more distributed around the driver code, we want a bit * of extra utility/tracking out of our acquire-ctx. This is provided * by drm_modeset_lock / drm_modeset_acquire_ctx. * - * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt + * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt * - * The basic usage pattern is to: + * The basic usage pattern is to:: * * drm_modeset_acquire_init(&ctx) * retry: @@ -51,6 +51,13 @@ * ... do stuff ... * drm_modeset_drop_locks(&ctx); * drm_modeset_acquire_fini(&ctx); + * + * On top of of these per-object locks using &ww_mutex there's also an overall + * dev->mode_config.lock, for protecting everything else. Mostly this means + * probe state of connectors, and preventing hotplug add/removal of connectors. + * + * Finally there's a bunch of dedicated locks to protect drm core internal + * lists and lookup data structures. */ /** diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 29d5a548d07a..b2f8f1062d5f 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) } EXPORT_SYMBOL(drm_pci_set_busid); -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u) -{ - int domain, bus, slot, func, ret; - - master->unique_len = u->unique_len; - master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); - if (!master->unique) { - ret = -ENOMEM; - goto err; - } - - if (copy_from_user(master->unique, u->unique, master->unique_len)) { - ret = -EFAULT; - goto err; - } - - master->unique[master->unique_len] = '\0'; - - /* Return error if the busid submitted doesn't match the device's actual - * busid. - */ - ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); - if (ret != 3) { - ret = -EINVAL; - goto err; - } - - domain = bus >> 8; - bus &= 0xff; - - if ((domain != drm_get_pci_domain(dev)) || - (bus != dev->pdev->bus->number) || - (slot != PCI_SLOT(dev->pdev->devfn)) || - (func != PCI_FUNC(dev->pdev->devfn))) { - ret = -EINVAL; - goto err; - } - return 0; -err: - return ret; -} - static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) { if ((p->busnum >> 8) != drm_get_pci_domain(dev) || @@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, { return -EINVAL; } - -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u) -{ - return -EINVAL; -} #endif EXPORT_SYMBOL(drm_pci_init); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 369d2898ff9e..16c4a7bd7465 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * @src: source coordinates in 16.16 fixed point * @dest: integer destination coordinates * @clip: integer clipping coordinates + * @rotation: plane rotation * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point * @can_position: is it legal to position the plane such that it @@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * Zero if update appears valid, error code on failure */ int drm_plane_helper_check_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_rect *src, - struct drm_rect *dest, - const struct drm_rect *clip, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled, - bool *visible) + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dest, + const struct drm_rect *clip, + unsigned int rotation, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible) { int hscale, vscale; @@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane, return -EINVAL; } + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + /* Check scaling */ hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); @@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane, } *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); + + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + if (!*visible) /* * Plane isn't visible; some drivers can handle this @@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update); * * Note that we make some assumptions about hardware limitations that may not be * true for all hardware -- - * 1) Primary plane cannot be repositioned. - * 2) Primary plane cannot be scaled. - * 3) Primary plane must cover the entire CRTC. - * 4) Subpixel positioning is not supported. + * + * 1. Primary plane cannot be repositioned. + * 2. Primary plane cannot be scaled. + * 3. Primary plane must cover the entire CRTC. + * 4. Subpixel positioning is not supported. + * * Drivers for hardware that don't have these restrictions can provide their * own implementation rather than using this helper. * @@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + BIT(DRM_ROTATE_0), DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, false, &visible); diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 644169e1a029..2c819ef90090 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -68,24 +68,6 @@ err_free: return ret; } -int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) -{ - int id; - - id = dev->platformdev->id; - if (id < 0) - id = 0; - - master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d", - dev->platformdev->name, id); - if (!master->unique) - return -ENOMEM; - - master->unique_len = strlen(master->unique); - return 0; -} -EXPORT_SYMBOL(drm_platform_set_busid); - /** * drm_platform_init - Register a platform device with the DRM subsystem * @driver: DRM device driver diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index aab0f3f1f42d..780589b420a4 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, get_dma_buf(dma_buf); } - /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) @@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); + mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; - mutex_unlock(&file_priv->prime.lock); - dma_buf_put(dma_buf); return 0; @@ -615,11 +614,14 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); + dma_buf_put(dma_buf); + return ret; + out_unlock: mutex_unlock(&dev->object_name_lock); out_put: - dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); + dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 0329080d7f7c..a0df377d7d1c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode, static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) { + struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode; - if (!connector->cmdline_mode.specified) + cmdline_mode = &connector->cmdline_mode; + if (!cmdline_mode->specified) return 0; + /* Only add a GTF mode if we find no matching probed modes */ + list_for_each_entry(mode, &connector->probed_modes, head) { + if (mode->hdisplay != cmdline_mode->xres || + mode->vdisplay != cmdline_mode->yres) + continue; + + if (cmdline_mode->refresh_specified) { + /* The probed mode's vrefresh is set until later */ + if (drm_mode_vrefresh(mode) != cmdline_mode->refresh) + continue; + } + + return 0; + } + mode = drm_mode_create_from_cmdline_mode(connector->dev, - &connector->cmdline_mode); + cmdline_mode); if (mode == NULL) return 0; diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index 4f0f3b36d537..bf70431073f6 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -41,7 +41,7 @@ static inline void *drm_vmalloc_dma(unsigned long size) { #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); + return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL)); #else return vmalloc_32(size); #endif diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c new file mode 100644 index 000000000000..0db36d27e90b --- /dev/null +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_simple_kms_helper.h> +#include <linux/slab.h> + +/** + * DOC: overview + * + * This helper library provides helpers for drivers for simple display + * hardware. + * + * drm_simple_display_pipe_init() initializes a simple display pipeline + * which has only one full-screen scanout buffer feeding one output. The + * pipeline is represented by struct &drm_simple_display_pipe and binds + * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed + * entity. Some flexibility for code reuse is provided through a separately + * allocated &drm_connector object and supporting optional &drm_bridge + * encoder drivers. + */ + +static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->enable) + return; + + pipe->funcs->enable(pipe, crtc->state); +} + +static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->disable) + return; + + pipe->funcs->disable(pipe); +} + +static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { + .disable = drm_simple_kms_crtc_disable, + .enable = drm_simple_kms_crtc_enable, +}; + +static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_rect src = { + .x1 = plane_state->src_x, + .y1 = plane_state->src_y, + .x2 = plane_state->src_x + plane_state->src_w, + .y2 = plane_state->src_y + plane_state->src_h, + }; + struct drm_rect dest = { + .x1 = plane_state->crtc_x, + .y1 = plane_state->crtc_y, + .x2 = plane_state->crtc_x + plane_state->crtc_w, + .y2 = plane_state->crtc_y + plane_state->crtc_h, + }; + struct drm_rect clip = { 0 }; + struct drm_simple_display_pipe *pipe; + struct drm_crtc_state *crtc_state; + bool visible; + int ret; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + &pipe->crtc); + if (crtc_state->enable != !!plane_state->crtc) + return -EINVAL; /* plane must match crtc enable state */ + + if (!crtc_state->enable) + return 0; /* nothing to check when disabling or disabled */ + + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + ret = drm_plane_helper_check_update(plane, &pipe->crtc, + plane_state->fb, + &src, &dest, &clip, + plane_state->rotation, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &visible); + if (ret) + return ret; + + if (!visible) + return -EINVAL; + + if (!pipe->funcs || !pipe->funcs->check) + return 0; + + return pipe->funcs->check(pipe, plane_state, crtc_state); +} + +static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *pstate) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->update) + return; + + pipe->funcs->update(pipe, pstate); +} + +static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { + .atomic_check = drm_simple_kms_plane_atomic_check, + .atomic_update = drm_simple_kms_plane_atomic_update, +}; + +static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +/** + * drm_simple_display_pipe_init - Initialize a simple display pipeline + * @dev: DRM device + * @pipe: simple display pipe object to initialize + * @funcs: callbacks for the display pipe (optional) + * @formats: array of supported formats (%DRM_FORMAT_*) + * @format_count: number of elements in @formats + * @connector: connector to attach and register + * + * Sets up a display pipeline which consist of a really simple + * plane-crtc-encoder pipe coupled with the provided connector. + * Teardown of a simple display pipe is all handled automatically by the drm + * core through calling drm_mode_config_cleanup(). Drivers afterwards need to + * release the memory for the structure themselves. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + struct drm_connector *connector) +{ + struct drm_encoder *encoder = &pipe->encoder; + struct drm_plane *plane = &pipe->plane; + struct drm_crtc *crtc = &pipe->crtc; + int ret; + + pipe->connector = connector; + pipe->funcs = funcs; + + drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &drm_simple_kms_plane_funcs, + formats, format_count, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &drm_simple_kms_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = 1 << drm_crtc_index(crtc); + ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + return drm_mode_connector_attach_encoder(connector, encoder); +} +EXPORT_SYMBOL(drm_simple_display_pipe_init); + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index fa7fadce8063..32dd821b7202 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = { struct class *drm_class; -/** - * __drm_class_suspend - internal DRM class suspend routine - * @dev: Linux device to suspend - * @state: power state to enter - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its suspend hook, if present. - */ -static int __drm_class_suspend(struct device *dev, pm_message_t state) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->suspend) - return drm_dev->driver->suspend(drm_dev, state); - } - return 0; -} - -/** - * drm_class_suspend - internal DRM class suspend hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to suspend - */ -static int drm_class_suspend(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_SUSPEND); -} - -/** - * drm_class_freeze - internal DRM class freeze hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to freeze - */ -static int drm_class_freeze(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_FREEZE); -} - -/** - * drm_class_resume - DRM class resume hook - * @dev: Linux device to resume - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its resume hook, if present. - */ -static int drm_class_resume(struct device *dev) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->resume) - return drm_dev->driver->resume(drm_dev); - } - return 0; -} - -static const struct dev_pm_ops drm_class_dev_pm_ops = { - .suspend = drm_class_suspend, - .resume = drm_class_resume, - .freeze = drm_class_freeze, -}; - static char *drm_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); @@ -131,8 +62,6 @@ int drm_sysfs_init(void) if (IS_ERR(drm_class)) return PTR_ERR(drm_class); - drm_class->pm = &drm_class_dev_pm_ops; - err = class_create_file(drm_class, &class_attr_version.attr); if (err) { class_destroy(drm_class); diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index ac9f4b3ec615..caa4e4ca616d 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) pgprot_t tmp = vm_get_page_prot(vma->vm_flags); #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - tmp |= _PAGE_NO_CACHE; + tmp = pgprot_noncached_wc(tmp); #endif return tmp; } @@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) * pages and mappings in fault() */ #if defined(__powerpc__) - pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif vma->vm_ops = &drm_vm_ops; break; @@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev) kfree(vma); } } - -int drm_vma_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_vma_entry *pt; - struct vm_area_struct *vma; - unsigned long vma_count = 0; -#if defined(__i386__) - unsigned int pgprot; -#endif - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(pt, &dev->vmalist, head) - vma_count++; - - seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n", - vma_count, high_memory, - (void *)(unsigned long)virt_to_phys(high_memory)); - - list_for_each_entry(pt, &dev->vmalist, head) { - vma = pt->vma; - if (!vma) - continue; - seq_printf(m, - "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000", - pt->pid, - (void *)vma->vm_start, (void *)vma->vm_end, - vma->vm_flags & VM_READ ? 'r' : '-', - vma->vm_flags & VM_WRITE ? 'w' : '-', - vma->vm_flags & VM_EXEC ? 'x' : '-', - vma->vm_flags & VM_MAYSHARE ? 's' : 'p', - vma->vm_flags & VM_LOCKED ? 'l' : '-', - vma->vm_flags & VM_IO ? 'i' : '-', - vma->vm_pgoff); - -#if defined(__i386__) - pgprot = pgprot_val(vma->vm_page_prot); - seq_printf(m, " %c%c%c%c%c%c%c%c%c", - pgprot & _PAGE_PRESENT ? 'p' : '-', - pgprot & _PAGE_RW ? 'w' : 'r', - pgprot & _PAGE_USER ? 'u' : 's', - pgprot & _PAGE_PWT ? 't' : 'b', - pgprot & _PAGE_PCD ? 'u' : 'c', - pgprot & _PAGE_ACCESSED ? 'a' : '-', - pgprot & _PAGE_DIRTY ? 'd' : '-', - pgprot & _PAGE_PSE ? 'm' : 'k', - pgprot & _PAGE_GLOBAL ? 'g' : 'l'); -#endif - seq_printf(m, "\n"); - } - mutex_unlock(&dev->struct_mutex); - return 0; -} diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 2f2ecde8285b..f306c8855978 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy); * used to implement weakly referenced lookups using kref_get_unless_zero(). * * Example: + * + * :: + * * drm_vma_offset_lock_lookup(mgr); * node = drm_vma_offset_lookup_locked(mgr); * if (node) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 3d4f56df8359..ffd1b32caa8d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev) int ret; ret = etnaviv_gpu_init(g); - if (ret) { - dev_err(g->dev, "hw init failed: %d\n", ret); + if (ret) priv->gpu[i] = NULL; - } } } } @@ -496,7 +494,6 @@ static struct drm_driver etnaviv_drm_driver = { DRIVER_RENDER, .open = etnaviv_open, .preclose = etnaviv_preclose, - .set_busid = drm_platform_set_busid, .gem_free_object_unlocked = etnaviv_gem_free_object, .gem_vm_ops = &vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index df9bcbab922f..5ce3603e6eac 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) { - if (etnaviv_obj->vaddr) - vunmap(etnaviv_obj->vaddr); + vunmap(etnaviv_obj->vaddr); put_pages(etnaviv_obj); } @@ -660,7 +659,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * why this is required _and_ expected if you're * going to pin these pages. */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER); } @@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, return obj; fail: - if (obj) - drm_gem_object_unreference_unlocked(obj); - + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } @@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, get_task_struct(current); ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); - if (ret) { - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); - return ret; - } + if (ret) + goto unreference; ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); - +unreference: /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&etnaviv_obj->base); - return ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index ff6aa5dfb2d7..87ef34150d46 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) return 0; } +static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) +{ + u32 pmc, ppc; + + /* enable clock gating */ + ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + + /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ + if (gpu->identity.revision == 0x4301 || + gpu->identity.revision == 0x4302) + ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; + + gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); + + pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); + + /* Disable PA clock gating for GC400+ except for GC420 */ + if (gpu->identity.model >= chipModel_GC400 && + gpu->identity.model != chipModel_GC420) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; + + /* + * Disable PE clock gating on revs < 5.0.0.0 when HZ is + * present without a bug fix. + */ + if (gpu->identity.revision < 0x5000 && + gpu->identity.minor_features0 & chipMinorFeatures0_HZ && + !(gpu->identity.minor_features1 & + chipMinorFeatures1_DISABLE_PE_GATING)) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE; + + if (gpu->identity.revision < 0x5422) + pmc |= BIT(15); /* Unknown bit */ + + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; + + gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); +} + static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) { u16 prefetch; @@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); } + /* enable module-level clock gating */ + etnaviv_gpu_enable_mlcg(gpu); + /* * Update GPU AXI cache atttribute to "cacheable, no allocate". * This is necessary to prevent the iMX6 SoC locking up. @@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) bool mmuv2; ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + dev_err(gpu->dev, "Failed to enable GPU power domain\n"); return ret; + } etnaviv_hw_identify(gpu); @@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) } ret = etnaviv_hw_reset(gpu); - if (ret) + if (ret) { + dev_err(gpu->dev, "GPU reset failed\n"); goto fail; + } /* Setup IOMMU.. eventually we will (I think) do this once per context * and have separate page tables per context. For now, to keep things @@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) } if (!iommu) { + dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n"); ret = -ENOMEM; goto fail; } gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); if (!gpu->mmu) { + dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); iommu_domain_free(iommu); ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index f5321e2f25ff..a69cdd526bf8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -125,7 +125,7 @@ struct etnaviv_gpu { u32 completed_fence; u32 retired_fence; wait_queue_head_t fence_event; - unsigned int fence_context; + u64 fence_context; spinlock_t fence_spinlock; /* worker for handling active-list retiring: */ diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 6a7de5f1454a..807a3d9e0dd5 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -218,6 +218,13 @@ Copyright (C) 2015 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000 #define VIVS_PM_MODULE_STATUS 0x00000108 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index d814b3048ee5..83f61c513b7e 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,10 +2,6 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select VIDEOMODE_HELPERS help Choose this option if you have a Samsung SoC EXYNOS chipset. @@ -15,7 +11,7 @@ if DRM_EXYNOS config DRM_EXYNOS_IOMMU bool - depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU + depends on EXYNOS_IOMMU default y comment "CRTCs" diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 4c1fb3f8b5a6..4f0850585b8e 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) return exynos_dp_crtc_clock_enable(plat_data, false); } -static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) +static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data, + struct drm_connector *connector) { struct exynos_dp_device *dp = to_dp(plat_data); - struct drm_connector *connector = dp->connector; struct drm_display_mode *mode; int num_modes = 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 5e38e749ac17..ad6b73c7fc59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dpi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dpi *ctx = connector_to_dpi(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { .get_modes = exynos_dpi_get_modes, - .best_encoder = exynos_dpi_best_encoder, }; static int exynos_dpi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2dd820e23b0c..877d2efa28e2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", dev_name(private->dma_dev)); - /* - * create mapping to manage iommu table and set a pointer to iommu - * mapping structure to iommu_mapping of private data. - * also this iommu_mapping can be used to check if iommu is supported - * or not. - */ + /* create common IOMMU mapping for all devices attached to Exynos DRM */ ret = drm_create_iommu_mapping(dev); if (ret < 0) { DRM_ERROR("failed to create iommu mapping.\n"); @@ -267,6 +262,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, { struct exynos_drm_private *priv = dev->dev_private; struct exynos_atomic_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; int i, ret; commit = kzalloc(sizeof(*commit), GFP_KERNEL); @@ -288,10 +285,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); @@ -299,7 +294,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, priv->pending |= commit->crtcs; spin_unlock(&priv->lock); - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); @@ -407,7 +402,6 @@ static struct drm_driver exynos_drm_driver = { .preclose = exynos_drm_preclose, .lastclose = exynos_drm_lastclose, .postclose = exynos_drm_postclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cc33ec9296e7..b39d521f093d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -224,8 +224,6 @@ struct exynos_drm_private { struct drm_property *plane_zpos_property; struct device *dma_dev; - unsigned long da_start; - unsigned long da_space_size; void *mapping; unsigned int pipe; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 601ecf8006a7..e07cb1fe4860 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dsi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { .get_modes = exynos_dsi_get_modes, - .best_encoder = exynos_dsi_best_encoder, }; static int exynos_dsi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 67dcd6831291..fb49443bfd32 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; struct drm_framebuffer *fb; - if (exynos_gem->kvaddr) - vunmap(exynos_gem->kvaddr); + vunmap(exynos_gem->kvaddr); /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c index 7ca09ee19656..0f373702414e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -14,13 +14,27 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> -#include <linux/kref.h> - -#include <asm/dma-iommu.h> #include "exynos_drm_drv.h" #include "exynos_drm_iommu.h" +static inline int configure_dma_max_seg_size(struct device *dev) +{ + if (!dev->dma_parms) + dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + return 0; +} + +static inline void clear_dma_max_seg_size(struct device *dev) +{ + kfree(dev->dma_parms); + dev->dma_parms = NULL; +} + /* * drm_create_iommu_mapping - create a mapping structure * @@ -28,38 +42,22 @@ */ int drm_create_iommu_mapping(struct drm_device *drm_dev) { - struct dma_iommu_mapping *mapping = NULL; struct exynos_drm_private *priv = drm_dev->dev_private; - if (!priv->da_start) - priv->da_start = EXYNOS_DEV_ADDR_START; - if (!priv->da_space_size) - priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; - - mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, - priv->da_space_size); - - if (IS_ERR(mapping)) - return PTR_ERR(mapping); - - priv->mapping = mapping; - - return 0; + return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START, + EXYNOS_DEV_ADDR_SIZE); } /* * drm_release_iommu_mapping - release iommu mapping structure * * @drm_dev: DRM device - * - * if mapping->kref becomes 0 then all things related to iommu mapping - * will be released */ void drm_release_iommu_mapping(struct drm_device *drm_dev) { struct exynos_drm_private *priv = drm_dev->dev_private; - arm_iommu_release_mapping(priv->mapping); + __exynos_iommu_release_mapping(priv); } /* @@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, struct exynos_drm_private *priv = drm_dev->dev_private; int ret; - if (!priv->mapping) - return 0; - - subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, - sizeof(*subdrv_dev->dma_parms), - GFP_KERNEL); - if (!subdrv_dev->dma_parms) - return -ENOMEM; - - dma_set_max_seg_size(subdrv_dev, 0xffffffffu); - - if (subdrv_dev->archdata.mapping) - arm_iommu_detach_device(subdrv_dev); + if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { + DRM_ERROR("Device %s lacks support for IOMMU\n", + dev_name(subdrv_dev)); + return -EINVAL; + } - ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); - if (ret < 0) { - DRM_DEBUG_KMS("failed iommu attach.\n"); + ret = configure_dma_max_seg_size(subdrv_dev); + if (ret) return ret; - } + + ret = __exynos_iommu_attach(priv, subdrv_dev); + if (ret) + clear_dma_max_seg_size(subdrv_dev); return 0; } @@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct exynos_drm_private *priv = drm_dev->dev_private; - struct dma_iommu_mapping *mapping = priv->mapping; - - if (!mapping || !mapping->domain) - return; - arm_iommu_detach_device(subdrv_dev); + __exynos_iommu_detach(priv, subdrv_dev); + clear_dma_max_seg_size(subdrv_dev); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h index 5ffebe02ee4d..c8de4913fdbe 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h @@ -17,6 +17,97 @@ #ifdef CONFIG_DRM_EXYNOS_IOMMU +#if defined(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> + +static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, + unsigned long start, unsigned long size) +{ + priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start, + size); + return IS_ERR(priv->mapping); +} + +static inline void +__exynos_iommu_release_mapping(struct exynos_drm_private *priv) +{ + arm_iommu_release_mapping(priv->mapping); +} + +static inline int __exynos_iommu_attach(struct exynos_drm_private *priv, + struct device *dev) +{ + if (dev->archdata.mapping) + arm_iommu_detach_device(dev); + + return arm_iommu_attach_device(dev, priv->mapping); +} + +static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, + struct device *dev) +{ + arm_iommu_detach_device(dev); +} + +#elif defined(CONFIG_IOMMU_DMA) +#include <linux/dma-iommu.h> + +static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, + unsigned long start, unsigned long size) +{ + struct iommu_domain *domain; + int ret; + + domain = iommu_domain_alloc(priv->dma_dev->bus); + if (!domain) + return -ENOMEM; + + ret = iommu_get_dma_cookie(domain); + if (ret) + goto free_domain; + + ret = iommu_dma_init_domain(domain, start, size); + if (ret) + goto put_cookie; + + priv->mapping = domain; + return 0; + +put_cookie: + iommu_put_dma_cookie(domain); +free_domain: + iommu_domain_free(domain); + return ret; +} + +static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv) +{ + struct iommu_domain *domain = priv->mapping; + + iommu_put_dma_cookie(domain); + iommu_domain_free(domain); + priv->mapping = NULL; +} + +static inline int __exynos_iommu_attach(struct exynos_drm_private *priv, + struct device *dev) +{ + struct iommu_domain *domain = priv->mapping; + + return iommu_attach_device(domain, dev); +} + +static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, + struct device *dev) +{ + struct iommu_domain *domain = priv->mapping; + + iommu_detach_device(domain, dev); +} +#else +#error Unsupported architecture and IOMMU/DMA-mapping glue code +#endif + int drm_create_iommu_mapping(struct drm_device *drm_dev); void drm_release_iommu_mapping(struct drm_device *drm_dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 608b0afa337f..e8f6c92b2a36 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector) return drm_add_edid_modes(connector, edid); } -static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) -{ - struct vidi_context *ctx = ctx_from_connector(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { .get_modes = vidi_get_modes, - .best_encoder = vidi_best_encoder, }; static int vidi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 58de5a430508..2275efe41acd 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) -{ - struct hdmi_context *hdata = connector_to_hdmi(connector); - - return &hdata->encoder; -} - static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_get_modes, .mode_valid = hdmi_mode_valid, - .best_encoder = hdmi_best_encoder, }; static int hdmi_create_connector(struct drm_encoder *encoder) @@ -1828,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev) DRM_ERROR("Failed to find ddc node in device tree\n"); return -ENODEV; } + of_node_put(dev->of_node); out_get_ddc_adpt: hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); @@ -1846,6 +1839,7 @@ out_get_ddc_adpt: ret = -ENODEV; goto err_ddc; } + of_node_put(dev->of_node); out_get_phy_port: if (hdata->drv_data->is_apb_phy) { diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index b9c714de6e40..14a72c4c496d 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -5,12 +5,7 @@ config DRM_FSL_DCU select BACKLIGHT_LCD_SUPPORT select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER - select DRM_KMS_FB_HELPER select DRM_PANEL - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS select REGMAP_MMIO select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 89c0084c2814..3371635cd4d7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -22,20 +22,21 @@ #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" -static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, +static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { -} + struct drm_pending_vblank_event *event = crtc->state->event; -static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - return 0; -} + if (event) { + crtc->state->event = NULL; -static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) @@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + drm_crtc_vblank_off(crtc); + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_OFF)); @@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); + + drm_crtc_vblank_on(crtc); } static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) @@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { - .atomic_begin = fsl_dcu_drm_crtc_atomic_begin, - .atomic_check = fsl_dcu_drm_crtc_atomic_check, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, .disable = fsl_dcu_drm_disable_crtc, .enable = fsl_dcu_drm_crtc_enable, @@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) { struct drm_plane *primary; struct drm_crtc *crtc = &fsl_dev->crtc; - unsigned int i, j, reg_num; int ret; + fsl_dcu_drm_init_planes(fsl_dev->drm); + primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); if (!primary) return -ENOMEM; @@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); - if (!strcmp(fsl_dev->soc->name, "ls1021a")) - reg_num = LS1021A_LAYER_REG_NUM; - else - reg_num = VF610_LAYER_REG_NUM; - for (i = 0; i < fsl_dev->soc->total_layer; i++) { - for (j = 1; j <= reg_num; j++) - regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); - } - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_OFF)); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index dc723f7ead7d..7882387f9bff 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -11,6 +11,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/console.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/mm.h> @@ -22,6 +23,7 @@ #include <linux/regmap.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -42,10 +44,8 @@ static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .cache_type = REGCACHE_FLAT, .volatile_reg = fsl_dcu_drm_is_volatile_reg, - .max_register = 0x11fc, }; static int fsl_dcu_drm_irq_init(struct drm_device *dev) @@ -199,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = fsl_dcu_drm_enable_vblank, .disable_vblank = fsl_dcu_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -229,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) if (!fsl_dev) return 0; + disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm); - regcache_cache_only(fsl_dev->regmap, true); - regcache_mark_dirty(fsl_dev->regmap); - clk_disable(fsl_dev->clk); - clk_unprepare(fsl_dev->clk); + + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); + console_unlock(); + + fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); + if (IS_ERR(fsl_dev->state)) { + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); + console_unlock(); + + drm_kms_helper_poll_enable(fsl_dev->drm); + enable_irq(fsl_dev->irq); + return PTR_ERR(fsl_dev->state); + } + + clk_disable_unprepare(fsl_dev->pix_clk); + clk_disable_unprepare(fsl_dev->clk); return 0; } @@ -246,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (!fsl_dev) return 0; - ret = clk_enable(fsl_dev->clk); + ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); - clk_unprepare(fsl_dev->clk); return ret; } - ret = clk_prepare(fsl_dev->clk); + + ret = clk_prepare_enable(fsl_dev->pix_clk); if (ret < 0) { - dev_err(dev, "failed to prepare dcu clk\n"); + dev_err(dev, "failed to enable pix clk\n"); return ret; } + fsl_dcu_drm_init_planes(fsl_dev->drm); + drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); + + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); + console_unlock(); + drm_kms_helper_poll_enable(fsl_dev->drm); - regcache_cache_only(fsl_dev->regmap, false); - regcache_sync(fsl_dev->regmap); + enable_irq(fsl_dev->irq); return 0; } @@ -274,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = { .name = "ls1021a", .total_layer = 16, .max_layer = 4, + .layer_regs = LS1021A_LAYER_REG_NUM, }; static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { .name = "vf610", .total_layer = 64, .max_layer = 6, + .layer_regs = VF610_LAYER_REG_NUM, }; static const struct of_device_id fsl_dcu_of_match[] = { diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index c275f900ff23..3b371fe7491e 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -175,6 +175,7 @@ struct fsl_dcu_soc_data { unsigned int total_layer; /*max layer number DCU supported*/ unsigned int max_layer; + unsigned int layer_regs; }; struct fsl_dcu_drm_device { @@ -193,6 +194,7 @@ struct fsl_dcu_drm_device { struct drm_encoder encoder; struct fsl_dcu_drm_connector connector; const struct fsl_dcu_soc_data *soc; + struct drm_atomic_state *state; }; void fsl_dcu_fbdev_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c index c564ec612b59..d9d6cc1c8e39 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c @@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) ret = fsl_dcu_drm_crtc_create(fsl_dev); if (ret) - return ret; + goto err; ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); if (ret) - goto fail_encoder; + goto err; - ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); + ret = fsl_dcu_create_outputs(fsl_dev); if (ret) - goto fail_connector; + goto err; drm_mode_config_reset(fsl_dev->drm); drm_kms_helper_poll_init(fsl_dev->drm); return 0; -fail_encoder: - fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc); -fail_connector: - fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder); + +err: + drm_mode_config_cleanup(fsl_dev->drm); return ret; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h index 7093109fbc21..5a7b88e19e44 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h @@ -25,9 +25,8 @@ to_fsl_dcu_connector(struct drm_connector *con) : NULL; } -int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, - struct drm_encoder *encoder); int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, struct drm_crtc *crtc); +int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev); #endif /* __FSL_DCU_DRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 274558b3b32b..e50467a0deb0 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = { DRM_FORMAT_YUV422, }; +void fsl_dcu_drm_init_planes(struct drm_device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + int i, j; + + for (i = 0; i < fsl_dev->soc->total_layer; i++) { + for (j = 1; j <= fsl_dev->soc->layer_regs; j++) + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); + } + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); +} + struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) { struct drm_plane *primary; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h index d657f088d859..8ee45f813ee8 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h @@ -12,6 +12,7 @@ #ifndef __FSL_DCU_DRM_PLANE_H__ #define __FSL_DCU_DRM_PLANE_H__ +void fsl_dcu_drm_init_planes(struct drm_device *dev); struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev); #endif /* __FSL_DCU_DRM_PLANE_H__ */ diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 98c998da91eb..26edcc899712 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -10,6 +10,7 @@ */ #include <linux/backlight.h> +#include <linux/of_graph.h> #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> @@ -102,14 +103,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = { .reset = drm_atomic_helper_connector_reset, }; -static struct drm_encoder * -fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector); - - return fsl_con->encoder; -} - static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) { struct fsl_dcu_drm_connector *fsl_connector; @@ -136,17 +129,16 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, } static const struct drm_connector_helper_funcs connector_helper_funcs = { - .best_encoder = fsl_dcu_drm_connector_best_encoder, .get_modes = fsl_dcu_drm_connector_get_modes, .mode_valid = fsl_dcu_drm_connector_mode_valid, }; -int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, - struct drm_encoder *encoder) +static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, + struct drm_panel *panel) { + struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; - struct device_node *panel_node; int ret; fsl_dev->connector.encoder = encoder; @@ -170,21 +162,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, mode_config->dpms_property, DRM_MODE_DPMS_OFF); - panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0); - if (!panel_node) { - dev_err(fsl_dev->dev, "fsl,panel property not found\n"); - ret = -ENODEV; - goto err_sysfs; - } - - fsl_dev->connector.panel = of_drm_find_panel(panel_node); - if (!fsl_dev->connector.panel) { - ret = -EPROBE_DEFER; - goto err_panel; - } - of_node_put(panel_node); - - ret = drm_panel_attach(fsl_dev->connector.panel, connector); + ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); goto err_sysfs; @@ -192,11 +170,62 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, return 0; -err_panel: - of_node_put(panel_node); err_sysfs: drm_connector_unregister(connector); err_cleanup: drm_connector_cleanup(connector); return ret; } + +static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev, + const struct of_endpoint *ep) +{ + struct drm_bridge *bridge; + struct device_node *np; + + np = of_graph_get_remote_port_parent(ep->local_node); + + fsl_dev->connector.panel = of_drm_find_panel(np); + if (fsl_dev->connector.panel) { + of_node_put(np); + return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel); + } + + bridge = of_drm_find_bridge(np); + of_node_put(np); + if (!bridge) + return -ENODEV; + + fsl_dev->encoder.bridge = bridge; + bridge->encoder = &fsl_dev->encoder; + + return drm_bridge_attach(fsl_dev->drm, bridge); +} + +int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev) +{ + struct of_endpoint ep; + struct device_node *ep_node, *panel_node; + int ret; + + /* This is for backward compatibility */ + panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0); + if (panel_node) { + fsl_dev->connector.panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!fsl_dev->connector.panel) + return -EPROBE_DEFER; + return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel); + } + + ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL); + if (!ep_node) + return -ENODEV; + + ret = of_graph_parse_endpoint(ep_node, &ep); + of_node_put(ep_node); + if (ret) + return -ENODEV; + + return fsl_dcu_attach_endpoint(fsl_dev, &ep); +} diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index bbe34f1c0505..bca09ea24632 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c @@ -92,6 +92,7 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev) goto err_node_put; } + of_node_put(np); clk_prepare_enable(tcon->ipg_clk); dev_info(dev, "Using TCON in bypass mode\n"); diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 17f928ec84ea..8906d67494fc 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -1,11 +1,7 @@ config DRM_GMA500 tristate "Intel GMA5/600 KMS Framebuffer" depends on DRM && PCI && X86 - select FB_CFB_COPYAREA - select FB_CFB_FILLRECT - select FB_CFB_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 select ACPI_VIDEO if ACPI diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 28f9d90988ff..563f193fcfac 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) { struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 813ef23a8054..38dc89083148 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector) { struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); @@ -780,12 +779,10 @@ out: failed_find: mutex_unlock(&dev->mode_config.mutex); printk(KERN_ERR "Failed find\n"); - if (gma_encoder->ddc_bus) - psb_intel_i2c_destroy(gma_encoder->ddc_bus); + psb_intel_i2c_destroy(gma_encoder->ddc_bus); failed_ddc: printk(KERN_ERR "Failed DDC\n"); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); failed_blc_i2c: printk(KERN_ERR "Failed BLC\n"); drm_encoder_cleanup(encoder); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 7440bf90ac9c..0fcdce0817de 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) return 0; } -static int psbfb_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) -{ - return -ENOTTY; -} - static struct fb_ops psbfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, @@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = { .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, .fb_sync = psbfb_sync, - .fb_ioctl = psbfb_ioctl, }; static struct fb_ops psbfb_roll_ops = { @@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = { .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = psbfb_pan, .fb_mmap = psbfb_mmap, - .fb_ioctl = psbfb_ioctl, }; static struct fb_ops psbfb_unaccel_ops = { @@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = { .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, - .fb_ioctl = psbfb_ioctl, }; /** diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index c95406e6f44d..1a1cf7a3b5ef 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) } } -void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, - u32 start, u32 size) +int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, + u32 size) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int i; - int end = (start + size > 256) ? 256 : start + size; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { gma_crtc->lut_r[i] = red[i] >> 8; gma_crtc->lut_g[i] = green[i] >> 8; gma_crtc->lut_b[i] = blue[i] >> 8; } gma_crtc_load_lut(crtc); + + return 0; } /** @@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Turn off vblank interrupts */ - drm_vblank_off(dev, pipe); + drm_crtc_vblank_off(crtc); /* Wait for vblank for the disable to take effect */ gma_wait_for_vblank(dev); diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index b2491c65f053..e72dd08b701b 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc, uint32_t width, uint32_t height); extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); extern void gma_crtc_load_lut(struct drm_crtc *crtc); -extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, u32 start, u32 size); +extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, u32 size); extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); extern void gma_crtc_prepare(struct drm_crtc *crtc); extern void gma_crtc_commit(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 82b8ce418b27..50eb944fb78a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev) iounmap(dev_priv->aux_reg); dev_priv->aux_reg = NULL; } - if (dev_priv->aux_pdev) - pci_dev_put(dev_priv->aux_pdev); - if (dev_priv->lpc_pdev) - pci_dev_put(dev_priv->lpc_pdev); + pci_dev_put(dev_priv->aux_pdev); + pci_dev_put(dev_priv->lpc_pdev); /* Destroy VBT data */ psb_intel_destroy_bios(dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 398015be87e4..7b6c84925098 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc; int i; - uint16_t *r_base, *g_base, *b_base; /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ @@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, gma_crtc->pipe = pipe; gma_crtc->plane = pipe; - r_base = gma_crtc->base.gamma_store; - g_base = r_base + 256; - b_base = g_base + 256; for (i = 0; i < 256; i++) { gma_crtc->lut_r[i] = i; gma_crtc->lut_g[i] = i; gma_crtc->lut_b[i] = i; - r_base[i] = i << 8; - g_base[i] = i << 8; - b_base[i] = i << 8; gma_crtc->lut_adj[i] = 0; } diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index b1b93317d054..e55733ca46d2 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector) struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv; - if (lvds_priv->ddc_bus) - psb_intel_i2c_destroy(lvds_priv->ddc_bus); + psb_intel_i2c_destroy(lvds_priv->ddc_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); @@ -835,11 +834,9 @@ out: failed_find: mutex_unlock(&dev->mode_config.mutex); - if (lvds_priv->ddc_bus) - psb_intel_i2c_destroy(lvds_priv->ddc_bus); + psb_intel_i2c_destroy(lvds_priv->ddc_bus); failed_ddc: - if (lvds_priv->i2c_bus) - psb_intel_i2c_destroy(lvds_priv->i2c_bus); + psb_intel_i2c_destroy(lvds_priv->i2c_bus); failed_blc_i2c: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index ea0df6115f7e..499f64405dac 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -4,6 +4,7 @@ config DRM_HISI_KIRIN select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER + select HISI_KIRIN_DW_DSI help Choose this option if you have a hisilicon Kirin chipsets(hi6220). If M is selected the module will be called kirin-drm. diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index fba6372d060e..c3707d47cd89 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc) ade_set_medianoc_qos(acrtc); ade_display_enable(acrtc); ade_dump_regs(ctx->base); + drm_crtc_vblank_on(crtc); acrtc->enable = true; } @@ -498,17 +499,11 @@ static void ade_crtc_disable(struct drm_crtc *crtc) if (!acrtc->enable) return; + drm_crtc_vblank_off(crtc); ade_power_down(ctx); acrtc->enable = false; } -static int ade_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - /* do nothing */ - return 0; -} - static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct ade_crtc *acrtc = to_ade_crtc(crtc); @@ -537,6 +532,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_pending_vblank_event *event = crtc->state->event; void __iomem *base = ctx->base; /* only crtc is enabled regs take effect */ @@ -545,12 +541,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, /* flush ade registers */ writel(ADE_ENABLE, base + ADE_EN); } + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { .enable = ade_crtc_enable, .disable = ade_crtc_disable, - .atomic_check = ade_crtc_atomic_check, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, @@ -961,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx) } ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core"); - if (!ctx->ade_core_clk) { + if (IS_ERR(ctx->ade_core_clk)) { DRM_ERROR("failed to parse clk ADE_CORE\n"); - return -ENODEV; + return PTR_ERR(ctx->ade_core_clk); } ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg"); - if (!ctx->media_noc_clk) { + if (IS_ERR(ctx->media_noc_clk)) { DRM_ERROR("failed to parse clk CODEC_JPEG\n"); - return -ENODEV; + return PTR_ERR(ctx->media_noc_clk); } ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix"); - if (!ctx->ade_pix_clk) { + if (IS_ERR(ctx->ade_pix_clk)) { DRM_ERROR("failed to parse clk ADE_PIX\n"); - return -ENODEV; + return PTR_ERR(ctx->ade_pix_clk); } return 0; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 3f94785fbcca..1edd9bc80294 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC | DRIVER_HAVE_IRQ, .fops = &kirin_drm_fops, - .set_busid = drm_platform_set_busid, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = kirin_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, @@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev) if (ret) goto err_kms_cleanup; - /* connectors should be registered after drm device register */ - ret = drm_connector_register_all(drm_dev); - if (ret) - goto err_drm_dev_unregister; - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, drm_dev->primary->index); return 0; -err_drm_dev_unregister: - drm_dev_unregister(drm_dev); err_kms_cleanup: kirin_drm_kms_cleanup(drm_dev); err_drm_dev_unref: @@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - drm_connector_unregister_all(drm_dev); drm_dev_unregister(drm_dev); kirin_drm_kms_cleanup(drm_dev); drm_dev_unref(drm_dev); diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig index 22c7ed63a001..4d341db462a2 100644 --- a/drivers/gpu/drm/i2c/Kconfig +++ b/drivers/gpu/drm/i2c/Kconfig @@ -1,12 +1,6 @@ menu "I2C encoder or helper chips" depends on DRM && DRM_KMS_HELPER && I2C -config DRM_I2C_ADV7511 - tristate "AV7511 encoder" - select REGMAP_I2C - help - Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders. - config DRM_I2C_CH7006 tristate "Chrontel ch7006 TV encoder" default m if DRM_NOUVEAU diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile index 2c72eb584ab7..43aa33baebed 100644 --- a/drivers/gpu/drm/i2c/Makefile +++ b/drivers/gpu/drm/i2c/Makefile @@ -1,7 +1,5 @@ ccflags-y := -Iinclude/drm -obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o - ch7006-y := ch7006_drv.o ch7006_mode.o obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 0594c45f7164..e9e8ae2ec06b 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder, /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ - if (crtc) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } + if (crtc) + drm_crtc_force_disable(crtc); } return 0; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 29a32b11953b..7769e469118f 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -57,6 +57,28 @@ config DRM_I915_USERPTR If in doubt, say "Y". +config DRM_I915_GVT + bool "Enable Intel GVT-g graphics virtualization host support" + depends on DRM_I915 + default n + help + Choose this option if you want to enable Intel GVT-g graphics + virtualization technology host support with integrated graphics. + With GVT-g, it's possible to have one integrated graphics + device shared by multiple VMs under different hypervisors. + + Note that at least one hypervisor like Xen or KVM is required for + this driver to work, and it only supports newer device from + Broadwell+. For further information and setup guide, you can + visit: http://01.org/igvt-g. + + Now it's just a stub to support the modifications of i915 for + GVT device model. It requires at least one MPT modules for Xen/KVM + and other components of GVT device model to work. Use it under + you own risk. + + If in doubt, say "N". + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 8f404103341d..cee87bfd10c4 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -18,6 +18,9 @@ config DRM_I915_WERROR config DRM_I915_DEBUG bool "Enable additional driver debugging" depends on DRM_I915 + select PREEMPT_COUNT + select X86_MSR # used by igt/pm_rpm + select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) default n help Choose this option to turn on extra driver debugging that may affect diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0b88ba0f3c1f..684fc1cd08fa 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror i915-y := i915_drv.o \ i915_irq.o \ i915_params.o \ + i915_pci.o \ i915_suspend.o \ i915_sysfs.o \ intel_csr.o \ + intel_device_info.o \ intel_pm.o \ intel_runtime_pm.o @@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_userptr.o \ i915_gpu_error.o \ i915_trace_points.o \ + intel_breadcrumbs.o \ intel_lrc.o \ intel_mocs.o \ intel_ringbuffer.o \ @@ -59,6 +62,7 @@ i915-y += intel_audio.o \ intel_bios.o \ intel_color.o \ intel_display.o \ + intel_dpio_phy.o \ intel_dpll_mgr.o \ intel_fbc.o \ intel_fifo_underrun.o \ @@ -81,10 +85,12 @@ i915-y += dvo_ch7017.o \ dvo_tfp410.o \ intel_crt.o \ intel_ddi.o \ + intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ intel_dsi.o \ + intel_dsi_dcs_backlight.o \ intel_dsi_panel_vbt.o \ intel_dsi_pll.o \ intel_dvo.o \ @@ -98,8 +104,10 @@ i915-y += dvo_ch7017.o \ # virtual gpu code i915-y += i915_vgpu.o -# legacy horrors -i915-y += i915_dma.o +ifeq ($(CONFIG_DRM_I915_GVT),y) +i915-y += intel_gvt.o +include $(src)/gvt/Makefile +endif obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile new file mode 100644 index 000000000000..d0f21a6ad60d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -0,0 +1,5 @@ +GVT_DIR := gvt +GVT_SOURCE := gvt.o + +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall +i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h new file mode 100644 index 000000000000..7ef412be665f --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -0,0 +1,34 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __GVT_DEBUG_H__ +#define __GVT_DEBUG_H__ + +#define gvt_dbg_core(fmt, args...) \ + DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) + +/* + * Other GVT debug stuff will be introduced in the GVT device model patches. + */ + +#endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c new file mode 100644 index 000000000000..927f4579f5b6 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -0,0 +1,145 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/types.h> +#include <xen/xen.h> + +#include "i915_drv.h" + +struct intel_gvt_host intel_gvt_host; + +static const char * const supported_hypervisors[] = { + [INTEL_GVT_HYPERVISOR_XEN] = "XEN", + [INTEL_GVT_HYPERVISOR_KVM] = "KVM", +}; + +/** + * intel_gvt_init_host - Load MPT modules and detect if we're running in host + * @gvt: intel gvt device + * + * This function is called at the driver loading stage. If failed to find a + * loadable MPT module or detect currently we're running in a VM, then GVT-g + * will be disabled + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init_host(void) +{ + if (intel_gvt_host.initialized) + return 0; + + /* Xen DOM U */ + if (xen_domain() && !xen_initial_domain()) + return -ENODEV; + + /* Try to load MPT modules for hypervisors */ + if (xen_initial_domain()) { + /* In Xen dom0 */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(xengt_mpt), "xengt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; + } else { + /* not in Xen. Try KVMGT */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(kvmgt_mpt), "kvm"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; + } + + /* Fail to load MPT modules - bail out */ + if (!intel_gvt_host.mpt) + return -EINVAL; + + /* Try to detect if we're running in host instead of VM. */ + if (!intel_gvt_hypervisor_detect_host()) + return -ENODEV; + + gvt_dbg_core("Running with hypervisor %s in host mode\n", + supported_hypervisors[intel_gvt_host.hypervisor_type]); + + intel_gvt_host.initialized = true; + return 0; +} + +static void init_device_info(struct intel_gvt *gvt) +{ + if (IS_BROADWELL(gvt->dev_priv)) + gvt->device_info.max_support_vgpus = 8; + /* This function will grow large in GVT device model patches. */ +} + +/** + * intel_gvt_clean_device - clean a GVT device + * @gvt: intel gvt device + * + * This function is called at the driver unloading stage, to free the + * resources owned by a GVT device. + * + */ +void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +{ + struct intel_gvt *gvt = &dev_priv->gvt; + + if (WARN_ON(!gvt->initialized)) + return; + + /* Other de-initialization of GVT components will be introduced. */ + + gvt->initialized = false; +} + +/** + * intel_gvt_init_device - initialize a GVT device + * @dev_priv: drm i915 private data + * + * This function is called at the initialization stage, to initialize + * necessary GVT components. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init_device(struct drm_i915_private *dev_priv) +{ + struct intel_gvt *gvt = &dev_priv->gvt; + /* + * Cannot initialize GVT device without intel_gvt_host gets + * initialized first. + */ + if (WARN_ON(!intel_gvt_host.initialized)) + return -EINVAL; + + if (WARN_ON(gvt->initialized)) + return -EEXIST; + + gvt_dbg_core("init gvt device\n"); + + init_device_info(gvt); + /* + * Other initialization of GVT components will be introduce here. + */ + gvt_dbg_core("gvt device creation is done\n"); + gvt->initialized = true; + return 0; +} diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h new file mode 100644 index 000000000000..fb619a6e519d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -0,0 +1,69 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_H_ +#define _GVT_H_ + +#include "debug.h" +#include "hypercall.h" + +#define GVT_MAX_VGPU 8 + +enum { + INTEL_GVT_HYPERVISOR_XEN = 0, + INTEL_GVT_HYPERVISOR_KVM, +}; + +struct intel_gvt_host { + bool initialized; + int hypervisor_type; + struct intel_gvt_mpt *mpt; +}; + +extern struct intel_gvt_host intel_gvt_host; + +/* Describe per-platform limitations. */ +struct intel_gvt_device_info { + u32 max_support_vgpus; + /* This data structure will grow bigger in GVT device model patches */ +}; + +struct intel_vgpu { + struct intel_gvt *gvt; + int id; + unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ +}; + +struct intel_gvt { + struct mutex lock; + bool initialized; + + struct drm_i915_private *dev_priv; + struct idr vgpu_idr; /* vGPU IDR pool */ + + struct intel_gvt_device_info device_info; +}; + +#include "mpt.h" + +#endif diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h new file mode 100644 index 000000000000..254df8bf1f35 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -0,0 +1,38 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_HYPERCALL_H_ +#define _GVT_HYPERCALL_H_ + +/* + * Specific GVT-g MPT modules function collections. Currently GVT-g supports + * both Xen and KVM by providing dedicated hypervisor-related MPT modules. + */ +struct intel_gvt_mpt { + int (*detect_host)(void); +}; + +extern struct intel_gvt_mpt xengt_mpt; +extern struct intel_gvt_mpt kvmgt_mpt; + +#endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h new file mode 100644 index 000000000000..03601e3ffa7c --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -0,0 +1,49 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_MPT_H_ +#define _GVT_MPT_H_ + +/** + * DOC: Hypervisor Service APIs for GVT-g Core Logic + * + * This is the glue layer between specific hypervisor MPT modules and GVT-g core + * logic. Each kind of hypervisor MPT module provides a collection of function + * callbacks and will be attached to GVT host when the driver is loading. + * GVT-g core logic will call these APIs to request specific services from + * hypervisor. + */ + +/** + * intel_gvt_hypervisor_detect_host - check if GVT-g is running within + * hypervisor host/privilged domain + * + * Returns: + * Zero on success, -ENODEV if current kernel is running inside a VM + */ +static inline int intel_gvt_hypervisor_detect_host(void) +{ + return intel_gvt_host.mpt->detect_host(); +} + +#endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a337f33bec5b..b0fd6a7b0603 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), - CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), @@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) /** * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer - * @ring: the ringbuffer to initialize + * @engine: the engine to initialize * * Optionally initializes fields related to batch buffer command parsing in the * struct intel_engine_cs based on whether the platform requires software @@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->dev)) + if (!IS_GEN7(engine->i915)) return 0; switch (engine->id) { case RCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_render_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); } else { @@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { @@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) /** * i915_cmd_parser_fini_ring() - clean up cmd parser related fields - * @ring: the ringbuffer to clean up + * @engine: the engine to clean up * * Releases any resources related to command parsing that may have been * initialized for the specified ring. @@ -1023,7 +1024,7 @@ unpin_src: /** * i915_needs_cmd_parser() - should a given ring use software command parsing? - * @ring: the ring in question + * @engine: the engine in question * * Only certain platforms require software batch buffer command parsing, and * only when enabled via module parameter. @@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(engine->dev)) + if (!USES_PPGTT(engine->i915)) return false; return (i915.enable_cmd_parser == 1); @@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) *oacontrol_set = (cmd[offset + 1] != 0); } @@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", + reg_addr); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { @@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations - * @ring: the ring on which the batch is to execute + * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question * @shadow_batch_obj: copy of the batch buffer in question * @batch_start_offset: byte offset in the batch at which execution starts @@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine, /** * i915_cmd_parser_get_version() - get the cmd parser version number + * @dev_priv: i915 device private * * The cmd parser maintains a simple increasing integer version number suitable * for passing to userspace clients to determine what operations are permitted. * * Return: the current version number of the cmd parser */ -int i915_cmd_parser_get_version(void) +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { + struct intel_engine_cs *engine; + bool active = false; + + /* If the command parser is not enabled, report 0 - unsupported */ + for_each_engine(engine, dev_priv) { + if (i915_needs_cmd_parser(engine)) { + active = true; + break; + } + } + if (!active) + return 0; + /* * Command parser version history * @@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void) * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers + * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. */ - return 6; + return 7; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 103546834b60..844fea795bae 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char get_active_flag(struct drm_i915_gem_object *obj) +static char get_active_flag(struct drm_i915_gem_object *obj) { return obj->active ? '*' : ' '; } -static const char get_pin_flag(struct drm_i915_gem_object *obj) +static char get_pin_flag(struct drm_i915_gem_object *obj) { return obj->pin_display ? 'p' : ' '; } -static const char get_tiling_flag(struct drm_i915_gem_object *obj) +static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (obj->tiling_mode) { default: @@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj) } } -static inline const char get_global_flag(struct drm_i915_gem_object *obj) +static char get_global_flag(struct drm_i915_gem_object *obj) { return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; } -static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) +static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) { return obj->mapping ? 'M' : ' '; } @@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } -static void describe_ctx(struct seq_file *m, struct intel_context *ctx) -{ - seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); - seq_putc(m, ctx->remap_slice ? 'R' : 'r'); - seq_putc(m, ' '); -} - static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -272,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; LIST_HEAD(stolen); @@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m, print_file_stats(m, "[k]batch pool", stats); } +static int per_file_ctx_stats(int id, void *ptr, void *data) +{ + struct i915_gem_context *ctx = ptr; + int n; + + for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { + if (ctx->engine[n].state) + per_file_stats(0, ctx->engine[n].state, data); + if (ctx->engine[n].ringbuf) + per_file_stats(0, ctx->engine[n].ringbuf->obj, data); + } + + return 0; +} + +static void print_context_stats(struct seq_file *m, + struct drm_i915_private *dev_priv) +{ + struct file_stats stats; + struct drm_file *file; + + memset(&stats, 0, sizeof(stats)); + + mutex_lock(&dev_priv->drm.struct_mutex); + if (dev_priv->kernel_context) + per_file_ctx_stats(0, dev_priv->kernel_context, &stats); + + list_for_each_entry(file, &dev_priv->drm.filelist, lhead) { + struct drm_i915_file_private *fpriv = file->driver_priv; + idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); + } + mutex_unlock(&dev_priv->drm.struct_mutex); + + print_file_stats(m, "[k]contexts", stats); +} + #define count_vmas(list, member) do { \ list_for_each_entry(vma, list, member) { \ size += i915_gem_obj_total_ggtt_size(vma->obj); \ @@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_putc(m, '\n'); print_batch_pool_stats(m, dev_priv); - mutex_unlock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex); + print_context_stats(m, dev_priv); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; struct task_struct *task; @@ -562,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; uintptr_t list = (uintptr_t) node->info_ent->data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; int count, ret; @@ -596,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; int ret; @@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) for_each_intel_crtc(dev, crtc) { const char pipe = pipe_name(crtc->pipe); const char plane = plane_name(crtc->plane); - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = crtc->unpin_work; + work = crtc->flip_work; if (work == NULL) { seq_printf(m, "No flip due on pipe %c (plane %c)\n", pipe, plane); } else { + u32 pending; u32 addr; - if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - seq_printf(m, "Flip queued on pipe %c (plane %c)\n", + pending = atomic_read(&work->pending); + if (pending) { + seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n", pipe, plane); } else { seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", @@ -631,18 +662,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) engine->name, i915_gem_request_get_seqno(work->flip_queued_req), dev_priv->next_seqno, - engine->get_seqno(engine), - i915_gem_request_completed(work->flip_queued_req, true)); + intel_engine_get_seqno(engine), + i915_gem_request_completed(work->flip_queued_req)); } else seq_printf(m, "Flip not associated with any ring\n"); seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", work->flip_queued_vblank, work->flip_ready_vblank, - drm_crtc_vblank_count(&crtc->base)); - if (work->enable_stall_check) - seq_puts(m, "Stall check enabled, "); - else - seq_puts(m, "Stall check waiting for page flip ioctl, "); + intel_crtc_get_vblank_counter(crtc)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); if (INTEL_INFO(dev)->gen >= 4) @@ -668,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; int total = 0; @@ -713,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct drm_i915_gem_request *req; int ret, any; @@ -761,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data) static void i915_ring_seqno_info(struct seq_file *m, struct intel_engine_cs *engine) { + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node *rb; + seq_printf(m, "Current sequence (%s): %x\n", - engine->name, engine->get_seqno(engine)); - seq_printf(m, "Current user interrupts (%s): %x\n", - engine->name, READ_ONCE(engine->user_interrupts)); + engine->name, intel_engine_get_seqno(engine)); + seq_printf(m, "Current user interrupts (%s): %lx\n", + engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups)); + + spin_lock(&b->lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = container_of(rb, typeof(*w), node); + + seq_printf(m, "Waiting (%s): %s [%d] on %x\n", + engine->name, w->tsk->comm, w->tsk->pid, w->seqno); + } + spin_unlock(&b->lock); } static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int ret; @@ -794,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int ret, i, pipe; @@ -985,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1013,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; const u32 *hws; int i; @@ -1124,7 +1163,7 @@ static int i915_next_seqno_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1161,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; intel_runtime_pm_get(dev_priv); @@ -1281,6 +1320,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); + seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); @@ -1363,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; u64 acthd[I915_NUM_ENGINES]; u32 seqno[I915_NUM_ENGINES]; @@ -1380,10 +1420,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) for_each_engine_id(engine, dev_priv, id) { acthd[id] = intel_ring_get_active_head(engine); - seqno[id] = engine->get_seqno(engine); + seqno[id] = intel_engine_get_seqno(engine); } - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); intel_runtime_pm_put(dev_priv); @@ -1400,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) engine->hangcheck.seqno, seqno[id], engine->last_submitted_seqno); - seq_printf(m, "\tuser interrupts = %x [current %x]\n", + seq_printf(m, "\twaiters? %d\n", + intel_engine_has_waiter(engine)); + seq_printf(m, "\tuser interrupts = %lx [current %lx]\n", engine->hangcheck.user_interrupts, - READ_ONCE(engine->user_interrupts)); + READ_ONCE(engine->breadcrumbs.irq_wakeups)); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); @@ -1432,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rgvmodectl, rstdbyctl; u16 crstandvid; int ret; @@ -1500,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore_forcewake_domain *fw_domain; spin_lock_irq(&dev_priv->uncore.lock); @@ -1518,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rpmodectl1, rcctl1, pw_status; intel_runtime_pm_get(dev_priv); @@ -1558,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; unsigned forcewake_count; int count = 0, ret; @@ -1670,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); seq_printf(m, "FB tracking busy bits: 0x%08x\n", dev_priv->fb_tracking.busy_bits); @@ -1685,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_FBC(dev)) { seq_puts(m, "FBC unsupported on this chipset\n"); @@ -1715,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) static int i915_fbc_fc_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) return -ENODEV; @@ -1728,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val) static int i915_fbc_fc_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 reg; if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) @@ -1755,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_IPS(dev)) { seq_puts(m, "not supported\n"); @@ -1785,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool sr_enabled = false; intel_runtime_pm_get(dev_priv); @@ -1814,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long temp, chipset, gfx; int ret; @@ -1842,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; int gpu_freq, ia_freq; unsigned int max_gpu_freq, min_gpu_freq; @@ -1897,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_opregion *opregion = &dev_priv->opregion; int ret; @@ -1918,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_opregion *opregion = &dev_priv->opregion; if (opregion->vbt) @@ -1940,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION - if (to_i915(dev)->fbdev) { - fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); - - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fbdev_fb->base.width, - fbdev_fb->base.height, - fbdev_fb->base.depth, - fbdev_fb->base.bits_per_pixel, - fbdev_fb->base.modifier[0], - drm_framebuffer_read_refcount(&fbdev_fb->base)); - describe_obj(m, fbdev_fb->obj); - seq_putc(m, '\n'); - } + if (to_i915(dev)->fbdev) { + fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fbdev_fb->base.width, + fbdev_fb->base.height, + fbdev_fb->base.depth, + fbdev_fb->base.bits_per_pixel, + fbdev_fb->base.modifier[0], + drm_framebuffer_read_refcount(&fbdev_fb->base)); + describe_obj(m, fbdev_fb->obj); + seq_putc(m, '\n'); + } #endif mutex_lock(&dev->mode_config.fb_lock); @@ -1989,10 +2031,9 @@ static int i915_context_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - struct intel_context *ctx; - enum intel_engine_id id; + struct i915_gem_context *ctx; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2000,32 +2041,36 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (!i915.enable_execlists && - ctx->legacy_hw_ctx.rcs_state == NULL) - continue; - - seq_puts(m, "HW context "); - describe_ctx(m, ctx); - if (ctx == dev_priv->kernel_context) - seq_printf(m, "(kernel context) "); + seq_printf(m, "HW context %u ", ctx->hw_id); + if (IS_ERR(ctx->file_priv)) { + seq_puts(m, "(deleted) "); + } else if (ctx->file_priv) { + struct pid *pid = ctx->file_priv->file->pid; + struct task_struct *task; - if (i915.enable_execlists) { - seq_putc(m, '\n'); - for_each_engine_id(engine, dev_priv, id) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[id].ringbuf; - - seq_printf(m, "%s: ", engine->name); - if (ctx_obj) - describe_obj(m, ctx_obj); - if (ringbuf) - describe_ctx_ringbuf(m, ringbuf); - seq_putc(m, '\n'); + task = get_pid_task(pid, PIDTYPE_PID); + if (task) { + seq_printf(m, "(%s [%d]) ", + task->comm, task->pid); + put_task_struct(task); } } else { - describe_obj(m, ctx->legacy_hw_ctx.rcs_state); + seq_puts(m, "(kernel) "); + } + + seq_putc(m, ctx->remap_slice ? 'R' : 'r'); + seq_putc(m, '\n'); + + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; + + seq_printf(m, "%s: ", engine->name); + seq_putc(m, ce->initialised ? 'I' : 'i'); + if (ce->state) + describe_obj(m, ce->state); + if (ce->ringbuf) + describe_ctx_ringbuf(m, ce->ringbuf); + seq_putc(m, '\n'); } seq_putc(m, '\n'); @@ -2037,24 +2082,22 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct page *page; uint32_t *reg_state; int j; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; unsigned long ggtt_offset = 0; + seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); + if (ctx_obj == NULL) { - seq_printf(m, "Context on %s with no gem object\n", - engine->name); + seq_puts(m, "\tNot allocated\n"); return; } - seq_printf(m, "CONTEXT: %s %u\n", engine->name, - intel_execlists_ctx_id(ctx, engine)); - if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); else @@ -2085,9 +2128,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!i915.enable_execlists) { @@ -2100,9 +2143,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) - if (ctx != dev_priv->kernel_context) - for_each_engine(engine, dev_priv) - i915_dump_lrc_obj(m, ctx, engine); + for_each_engine(engine, dev_priv) + i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2113,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; u32 status_pointer; u8 read_pointer; @@ -2173,8 +2215,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(head_req->ctx, engine)); + seq_printf(m, "\tHead request context: %u\n", + head_req->ctx->hw_id); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2216,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2268,7 +2310,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) static int per_file_ctx(int id, void *ptr, void *data) { - struct intel_context *ctx = ptr; + struct i915_gem_context *ctx = ptr; struct seq_file *m = data; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; @@ -2289,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; int i; @@ -2310,15 +2352,15 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(engine))); seq_printf(m, "PP_DIR_BASE: 0x%08x\n", @@ -2344,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_file *file; int ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2387,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915) int count = 0; for_each_engine(engine, i915) - count += engine->irq_refcount; + count += intel_engine_has_waiter(engine); return count; } @@ -2396,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_file *file; seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); - seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); + seq_printf(m, "GPU busy? %s [%x]\n", + yesno(dev_priv->gt.awake), dev_priv->gt.active_engines); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), @@ -2441,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const bool edram = INTEL_GEN(dev_priv) > 8; seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); @@ -2454,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data) static int i915_guc_load_status_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = node->minor->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(node->minor->dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; u32 tmp, i; @@ -2509,15 +2552,16 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", client->wq_size, client->wq_offset, client->wq_tail); + seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); for_each_engine(engine, dev_priv) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[engine->guc_id], + client->submissions[engine->id], engine->name); - tot += client->submissions[engine->guc_id]; + tot += client->submissions[engine->id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2526,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc guc; struct i915_guc_client client = {}; struct intel_engine_cs *engine; @@ -2545,6 +2589,10 @@ static int i915_guc_info(struct seq_file *m, void *data) mutex_unlock(&dev->struct_mutex); + seq_printf(m, "Doorbell map:\n"); + seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap); + seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline); + seq_printf(m, "GuC total action count: %llu\n", guc.action_count); seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); @@ -2554,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC submissions:\n"); for_each_engine(engine, dev_priv) { seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - engine->name, guc.submissions[engine->guc_id], - guc.last_seqno[engine->guc_id]); - total += guc.submissions[engine->guc_id]; + engine->name, guc.submissions[engine->id], + guc.last_seqno[engine->id]); + total += guc.submissions[engine->id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2572,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; u32 *log; int i = 0, pg; @@ -2600,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 psrperf = 0; u32 stat[3]; enum pipe pipe; @@ -2668,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct intel_encoder *encoder; struct intel_connector *connector; struct intel_dp *intel_dp = NULL; int ret; @@ -2676,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data) drm_modeset_lock_all(dev); for_each_intel_connector(dev, connector) { + struct drm_crtc *crtc; - if (connector->base.dpms != DRM_MODE_DPMS_ON) + if (!connector->base.state->best_encoder) continue; - if (!connector->base.encoder) + crtc = connector->base.state->crtc; + if (!crtc->state->active) continue; - encoder = to_intel_encoder(connector->base.encoder); - if (encoder->type != INTEL_OUTPUT_EDP) + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(connector->base.state->best_encoder); ret = intel_dp_sink_crc(intel_dp, crc); if (ret) @@ -2708,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 power; u32 units; @@ -2734,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_RUNTIME_PM(dev_priv)) seq_puts(m, "Runtime power management not supported\n"); - seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); + seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "IRQs disabled: %s\n", yesno(!intel_irqs_enabled(dev_priv))); #ifdef CONFIG_PM @@ -2749,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); #endif seq_printf(m, "PCI device power state: %s [%d]\n", - pci_power_name(dev_priv->dev->pdev->current_state), - dev_priv->dev->pdev->current_state); + pci_power_name(dev_priv->drm.pdev->current_state), + dev_priv->drm.pdev->current_state); return 0; } @@ -2759,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_power_domains *power_domains = &dev_priv->power_domains; int i; @@ -2794,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_csr *csr; if (!HAS_CSR(dev)) { @@ -2917,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m, seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); - if (intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) intel_panel_info(m, &intel_connector->panel); } @@ -2956,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); } - if (intel_encoder) { - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - intel_dp_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_HDMI) - intel_hdmi_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + + if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + return; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + intel_dp_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_LVDS: + if (intel_encoder->type == INTEL_OUTPUT_LVDS) intel_lvds_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_HDMIA: + if (intel_encoder->type == INTEL_OUTPUT_HDMI || + intel_encoder->type == INTEL_OUTPUT_UNKNOWN) + intel_hdmi_info(m, intel_connector); + break; + default: + break; } seq_printf(m, "\tmodes:\n"); @@ -2973,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m, static bool cursor_active(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 state; if (IS_845G(dev) || IS_I865G(dev)) @@ -2986,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe) static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pos; pos = I915_READ(CURPOS(pipe)); @@ -3107,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; struct drm_connector *connector; @@ -3162,13 +3222,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); enum intel_engine_id id; int j, ret; - if (!i915_semaphore_is_enabled(dev)) { + if (!i915_semaphore_is_enabled(dev_priv)) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -3235,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; drm_modeset_lock_all(dev); @@ -3265,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) struct intel_engine_cs *engine; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_workarounds *workarounds = &dev_priv->workarounds; enum intel_engine_id id; @@ -3303,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation *ddb; struct skl_ddb_entry *entry; enum pipe pipe; @@ -3341,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused) static void drrs_status_per_crtc(struct seq_file *m, struct drm_device *dev, struct intel_crtc *intel_crtc) { - struct intel_encoder *intel_encoder; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_drrs *drrs = &dev_priv->drrs; int vrefresh = 0; + struct drm_connector *connector; - for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { - /* Encoder connected on this CRTC */ - switch (intel_encoder->type) { - case INTEL_OUTPUT_EDP: - seq_puts(m, "eDP:\n"); - break; - case INTEL_OUTPUT_DSI: - seq_puts(m, "DSI:\n"); - break; - case INTEL_OUTPUT_HDMI: - seq_puts(m, "HDMI:\n"); - break; - case INTEL_OUTPUT_DISPLAYPORT: - seq_puts(m, "DP:\n"); - break; - default: - seq_printf(m, "Other encoder (id=%d).\n", - intel_encoder->type); - return; - } + drm_for_each_connector(connector, dev) { + if (connector->state->crtc != &intel_crtc->base) + continue; + + seq_printf(m, "%s:\n", connector->name); } if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) @@ -3428,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused) struct intel_crtc *intel_crtc; int active_crtc_cnt = 0; + drm_modeset_lock_all(dev); for_each_intel_crtc(dev, intel_crtc) { - drm_modeset_lock(&intel_crtc->base.mutex, NULL); - if (intel_crtc->base.state->active) { active_crtc_cnt++; seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); drrs_status_per_crtc(m, dev, intel_crtc); } - - drm_modeset_unlock(&intel_crtc->base.mutex); } + drm_modeset_unlock_all(dev); if (!active_crtc_cnt) seq_puts(m, "No active crtc found\n"); @@ -3457,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_encoder *encoder; struct intel_encoder *intel_encoder; struct intel_digital_port *intel_dig_port; + struct drm_connector *connector; + drm_modeset_lock_all(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_encoder = to_intel_encoder(encoder); - if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) + drm_for_each_connector(connector, dev) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - intel_dig_port = enc_to_dig_port(encoder); + + intel_encoder = intel_attached_encoder(connector); + if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + intel_dig_port = enc_to_dig_port(&intel_encoder->base); if (!intel_dig_port->dp.can_mst) continue; + seq_printf(m, "MST Source Port %c\n", port_name(intel_dig_port->port)); drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); @@ -3479,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) static int i915_pipe_crc_open(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(info->dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) @@ -3503,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep) static int i915_pipe_crc_release(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(info->dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; spin_lock_irq(&pipe_crc->lock); @@ -3531,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, { struct pipe_crc_info *info = filep->private_data; struct drm_device *dev = info->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; char buf[PIPE_CRC_BUFFER_LEN]; int n_entries; @@ -3664,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) static int display_crc_ctl_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < I915_MAX_PIPES; i++) @@ -3725,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, case INTEL_OUTPUT_TVOUT: *source = INTEL_PIPE_CRC_SOURCE_TV; break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: dig_port = enc_to_dig_port(&encoder->base); switch (dig_port->port) { @@ -3758,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, enum intel_pipe_crc_source *source, uint32_t *val) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { @@ -3829,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, enum intel_pipe_crc_source *source, uint32_t *val) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { @@ -3903,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp = I915_READ(PORT_DFT2_G4X); switch (pipe) { @@ -3928,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp = I915_READ(PORT_DFT2_G4X); if (pipe == PIPE_A) @@ -3971,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); struct intel_crtc_state *pipe_config; @@ -4039,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, enum intel_pipe_crc_source source) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); @@ -4546,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) static int pri_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4562,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4578,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4669,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4685,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4701,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4743,7 +4792,7 @@ static int i915_wedged_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = i915_terminally_wedged(&dev_priv->gpu_error); @@ -4754,7 +4803,7 @@ static int i915_wedged_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * There is no safeguard against this debugfs entry colliding @@ -4769,7 +4818,7 @@ i915_wedged_set(void *data, u64 val) intel_runtime_pm_get(dev_priv); - i915_handle_error(dev, val, + i915_handle_error(dev_priv, val, "Manually setting wedged to %llu", val); intel_runtime_pm_put(dev_priv); @@ -4782,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, "%llu\n"); static int -i915_ring_stop_get(void *data, u64 *val) -{ - struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - - *val = dev_priv->gpu_error.stop_rings; - - return 0; -} - -static int -i915_ring_stop_set(void *data, u64 val) -{ - struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - dev_priv->gpu_error.stop_rings = val; - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, - i915_ring_stop_get, i915_ring_stop_set, - "0x%08llx\n"); - -static int i915_ring_missed_irq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = dev_priv->gpu_error.missed_irq_rings; return 0; @@ -4829,7 +4844,7 @@ static int i915_ring_missed_irq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; /* Lock against concurrent debugfs callers */ @@ -4850,7 +4865,7 @@ static int i915_ring_test_irq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = dev_priv->gpu_error.test_irq_rings; @@ -4861,18 +4876,11 @@ static int i915_ring_test_irq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + struct drm_i915_private *dev_priv = to_i915(dev); + val &= INTEL_INFO(dev_priv)->ring_mask; DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); - - /* Lock against concurrent debugfs callers */ - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - dev_priv->gpu_error.test_irq_rings = val; - mutex_unlock(&dev->struct_mutex); return 0; } @@ -4901,7 +4909,7 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; DRM_DEBUG("Dropping caches: 0x%08llx\n", val); @@ -4913,13 +4921,13 @@ i915_drop_caches_set(void *data, u64 val) return ret; if (val & DROP_ACTIVE) { - ret = i915_gpu_idle(dev); + ret = i915_gem_wait_for_idle(dev_priv); if (ret) goto unlock; } if (val & (DROP_RETIRE | DROP_ACTIVE)) - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); if (val & DROP_BOUND) i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); @@ -4941,7 +4949,7 @@ static int i915_max_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (INTEL_INFO(dev)->gen < 6) @@ -4963,7 +4971,7 @@ static int i915_max_freq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_max, hw_min; int ret; @@ -4993,7 +5001,7 @@ i915_max_freq_set(void *data, u64 val) dev_priv->rps.max_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5008,7 +5016,7 @@ static int i915_min_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (INTEL_INFO(dev)->gen < 6) @@ -5030,7 +5038,7 @@ static int i915_min_freq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_max, hw_min; int ret; @@ -5060,7 +5068,7 @@ i915_min_freq_set(void *data, u64 val) dev_priv->rps.min_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5075,7 +5083,7 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 snpcr; int ret; @@ -5090,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val) snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -5101,7 +5109,7 @@ static int i915_cache_sharing_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 snpcr; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -5138,7 +5146,7 @@ struct sseu_dev_status { static void cherryview_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ss_max = 2; int ss; u32 sig1[ss_max], sig2[ss_max]; @@ -5170,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev, static void gen9_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int s_max = 3, ss_max = 4; int s, ss; u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; @@ -5235,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev, static void broadwell_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int s; u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); @@ -5277,6 +5285,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused) INTEL_INFO(dev)->eu_total); seq_printf(m, " Available EU Per Subslice: %u\n", INTEL_INFO(dev)->eu_per_subslice); + seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev))); + if (HAS_POOLED_EU(dev)) + seq_printf(m, " Min EU in pool: %u\n", + INTEL_INFO(dev)->min_eu_in_pool); seq_printf(m, " Has Slice Power Gating: %s\n", yesno(INTEL_INFO(dev)->has_slice_pg)); seq_printf(m, " Has Subslice Power Gating: %s\n", @@ -5310,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 6) return 0; @@ -5324,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 6) return 0; @@ -5440,7 +5452,6 @@ static const struct i915_debugfs_files { {"i915_max_freq", &i915_max_freq_fops}, {"i915_min_freq", &i915_min_freq_fops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, - {"i915_ring_stop", &i915_ring_stop_fops}, {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, {"i915_ring_test_irq", &i915_ring_test_irq_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, @@ -5458,7 +5469,7 @@ static const struct i915_debugfs_files { void intel_display_crc_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -5470,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev) } } -int i915_debugfs_init(struct drm_minor *minor) +int i915_debugfs_register(struct drm_i915_private *dev_priv) { + struct drm_minor *minor = dev_priv->drm.primary; int ret, i; ret = i915_forcewake_create(minor->debugfs_root, minor); @@ -5497,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor) minor->debugfs_root, minor); } -void i915_debugfs_cleanup(struct drm_minor *minor) +void i915_debugfs_unregister(struct drm_i915_private *dev_priv) { + struct drm_minor *minor = dev_priv->drm.primary; int i; drm_debugfs_remove_files(i915_debugfs_list, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c deleted file mode 100644 index b3198fcd0536..000000000000 --- a/drivers/gpu/drm/i915/i915_dma.c +++ /dev/null @@ -1,1587 +0,0 @@ -/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- - */ -/* - * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_legacy.h> -#include "intel_drv.h" -#include <drm/i915_drm.h> -#include "i915_drv.h" -#include "i915_vgpu.h" -#include "i915_trace.h" -#include <linux/pci.h> -#include <linux/console.h> -#include <linux/vt.h> -#include <linux/vgaarb.h> -#include <linux/acpi.h> -#include <linux/pnp.h> -#include <linux/vga_switcheroo.h> -#include <linux/slab.h> -#include <acpi/video.h> -#include <linux/pm.h> -#include <linux/pm_runtime.h> -#include <linux/oom.h> - -static unsigned int i915_load_fail_count; - -bool __i915_inject_load_failure(const char *func, int line) -{ - if (i915_load_fail_count >= i915.inject_load_failure) - return false; - - if (++i915_load_fail_count == i915.inject_load_failure) { - DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915.inject_load_failure, func, line); - return true; - } - - return false; -} - -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ - "providing the dmesg log by booting with drm.debug=0xf" - -void -__i915_printk(struct drm_i915_private *dev_priv, const char *level, - const char *fmt, ...) -{ - static bool shown_bug_once; - struct device *dev = dev_priv->dev->dev; - bool is_error = level[1] <= KERN_ERR[1]; - bool is_debug = level[1] == KERN_DEBUG[1]; - struct va_format vaf; - va_list args; - - if (is_debug && !(drm_debug & DRM_UT_DRIVER)) - return; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", - __builtin_return_address(0), &vaf); - - if (is_error && !shown_bug_once) { - dev_notice(dev, "%s", FDO_BUG_MSG); - shown_bug_once = true; - } - - va_end(args); -} - -static bool i915_error_injected(struct drm_i915_private *dev_priv) -{ - return i915.inject_load_failure && - i915_load_fail_count == i915.inject_load_failure; -} - -#define i915_load_error(dev_priv, fmt, ...) \ - __i915_printk(dev_priv, \ - i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ - fmt, ##__VA_ARGS__) - -static int i915_getparam(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_getparam_t *param = data; - int value; - - switch (param->param) { - case I915_PARAM_IRQ_ACTIVE: - case I915_PARAM_ALLOW_BATCHBUFFER: - case I915_PARAM_LAST_DISPATCH: - /* Reject all old ums/dri params. */ - return -ENODEV; - case I915_PARAM_CHIPSET_ID: - value = dev->pdev->device; - break; - case I915_PARAM_REVISION: - value = dev->pdev->revision; - break; - case I915_PARAM_HAS_GEM: - value = 1; - break; - case I915_PARAM_NUM_FENCES_AVAIL: - value = dev_priv->num_fence_regs; - break; - case I915_PARAM_HAS_OVERLAY: - value = dev_priv->overlay ? 1 : 0; - break; - case I915_PARAM_HAS_PAGEFLIPPING: - value = 1; - break; - case I915_PARAM_HAS_EXECBUF2: - /* depends on GEM */ - value = 1; - break; - case I915_PARAM_HAS_BSD: - value = intel_engine_initialized(&dev_priv->engine[VCS]); - break; - case I915_PARAM_HAS_BLT: - value = intel_engine_initialized(&dev_priv->engine[BCS]); - break; - case I915_PARAM_HAS_VEBOX: - value = intel_engine_initialized(&dev_priv->engine[VECS]); - break; - case I915_PARAM_HAS_BSD2: - value = intel_engine_initialized(&dev_priv->engine[VCS2]); - break; - case I915_PARAM_HAS_RELAXED_FENCING: - value = 1; - break; - case I915_PARAM_HAS_COHERENT_RINGS: - value = 1; - break; - case I915_PARAM_HAS_EXEC_CONSTANTS: - value = INTEL_INFO(dev)->gen >= 4; - break; - case I915_PARAM_HAS_RELAXED_DELTA: - value = 1; - break; - case I915_PARAM_HAS_GEN7_SOL_RESET: - value = 1; - break; - case I915_PARAM_HAS_LLC: - value = HAS_LLC(dev); - break; - case I915_PARAM_HAS_WT: - value = HAS_WT(dev); - break; - case I915_PARAM_HAS_ALIASING_PPGTT: - value = USES_PPGTT(dev); - break; - case I915_PARAM_HAS_WAIT_TIMEOUT: - value = 1; - break; - case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev); - break; - case I915_PARAM_HAS_PRIME_VMAP_FLUSH: - value = 1; - break; - case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); - break; - case I915_PARAM_HAS_PINNED_BATCHES: - value = 1; - break; - case I915_PARAM_HAS_EXEC_NO_RELOC: - value = 1; - break; - case I915_PARAM_HAS_EXEC_HANDLE_LUT: - value = 1; - break; - case I915_PARAM_CMD_PARSER_VERSION: - value = i915_cmd_parser_get_version(); - break; - case I915_PARAM_HAS_COHERENT_PHYS_GTT: - value = 1; - break; - case I915_PARAM_MMAP_VERSION: - value = 1; - break; - case I915_PARAM_SUBSLICE_TOTAL: - value = INTEL_INFO(dev)->subslice_total; - if (!value) - return -ENODEV; - break; - case I915_PARAM_EU_TOTAL: - value = INTEL_INFO(dev)->eu_total; - if (!value) - return -ENODEV; - break; - case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && - intel_has_gpu_reset(dev); - break; - case I915_PARAM_HAS_RESOURCE_STREAMER: - value = HAS_RESOURCE_STREAMER(dev); - break; - case I915_PARAM_HAS_EXEC_SOFTPIN: - value = 1; - break; - default: - DRM_DEBUG("Unknown parameter %d\n", param->param); - return -EINVAL; - } - - if (copy_to_user(param->value, &value, sizeof(int))) { - DRM_ERROR("copy_to_user failed\n"); - return -EFAULT; - } - - return 0; -} - -static int i915_get_bridge_dev(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); - if (!dev_priv->bridge_dev) { - DRM_ERROR("bridge device not found\n"); - return -1; - } - return 0; -} - -/* Allocate space for the MCH regs if needed, return nonzero on error */ -static int -intel_alloc_mchbar_resource(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - u32 temp_lo, temp_hi = 0; - u64 mchbar_addr; - int ret; - - if (INTEL_INFO(dev)->gen >= 4) - pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); - pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); - mchbar_addr = ((u64)temp_hi << 32) | temp_lo; - - /* If ACPI doesn't have it, assume we need to allocate it ourselves */ -#ifdef CONFIG_PNP - if (mchbar_addr && - pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) - return 0; -#endif - - /* Get some space for it */ - dev_priv->mch_res.name = "i915 MCHBAR"; - dev_priv->mch_res.flags = IORESOURCE_MEM; - ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, - &dev_priv->mch_res, - MCHBAR_SIZE, MCHBAR_SIZE, - PCIBIOS_MIN_MEM, - 0, pcibios_align_resource, - dev_priv->bridge_dev); - if (ret) { - DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); - dev_priv->mch_res.start = 0; - return ret; - } - - if (INTEL_INFO(dev)->gen >= 4) - pci_write_config_dword(dev_priv->bridge_dev, reg + 4, - upper_32_bits(dev_priv->mch_res.start)); - - pci_write_config_dword(dev_priv->bridge_dev, reg, - lower_32_bits(dev_priv->mch_res.start)); - return 0; -} - -/* Setup MCHBAR if possible, return true if we should disable it again */ -static void -intel_setup_mchbar(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - u32 temp; - bool enabled; - - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - return; - - dev_priv->mchbar_need_disable = false; - - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); - enabled = !!(temp & DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - enabled = temp & 1; - } - - /* If it's already enabled, don't have to do anything */ - if (enabled) - return; - - if (intel_alloc_mchbar_resource(dev)) - return; - - dev_priv->mchbar_need_disable = true; - - /* Space is allocated or reserved, so enable it. */ - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_write_config_dword(dev_priv->bridge_dev, DEVEN, - temp | DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); - } -} - -static void -intel_teardown_mchbar(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - - if (dev_priv->mchbar_need_disable) { - if (IS_I915G(dev) || IS_I915GM(dev)) { - u32 deven_val; - - pci_read_config_dword(dev_priv->bridge_dev, DEVEN, - &deven_val); - deven_val &= ~DEVEN_MCHBAR_EN; - pci_write_config_dword(dev_priv->bridge_dev, DEVEN, - deven_val); - } else { - u32 mchbar_val; - - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, - &mchbar_val); - mchbar_val &= ~1; - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, - mchbar_val); - } - } - - if (dev_priv->mch_res.start) - release_resource(&dev_priv->mch_res); -} - -/* true = enable decode, false = disable decoder */ -static unsigned int i915_vga_set_decode(void *cookie, bool state) -{ - struct drm_device *dev = cookie; - - intel_modeset_vga_set_state(dev, state); - if (state) - return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; - else - return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; -} - -static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - - if (state == VGA_SWITCHEROO_ON) { - pr_info("switched on\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - /* i915 resume handler doesn't set to D0 */ - pci_set_power_state(dev->pdev, PCI_D0); - i915_resume_switcheroo(dev); - dev->switch_power_state = DRM_SWITCH_POWER_ON; - } else { - pr_info("switched off\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(dev, pmm); - dev->switch_power_state = DRM_SWITCH_POWER_OFF; - } -} - -static bool i915_switcheroo_can_switch(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - /* - * FIXME: open_count is protected by drm_global_mutex but that would lead to - * locking inversion with the driver load path. And the access here is - * completely racy anyway. So don't bother with locking for now. - */ - return dev->open_count == 0; -} - -static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { - .set_gpu_state = i915_switcheroo_set_state, - .reprobe = NULL, - .can_switch = i915_switcheroo_can_switch, -}; - -static int i915_load_modeset_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - ret = intel_bios_init(dev_priv); - if (ret) - DRM_INFO("failed to find VBIOS tables\n"); - - /* If we have > 1 VGA cards, then we need to arbitrate access - * to the common VGA resources. - * - * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), - * then we do not take part in VGA arbitration and the - * vga_client_register() fails with -ENODEV. - */ - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); - if (ret && ret != -ENODEV) - goto out; - - intel_register_dsm_handler(); - - ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); - if (ret) - goto cleanup_vga_client; - - intel_power_domains_init_hw(dev_priv, false); - - intel_csr_ucode_init(dev_priv); - - ret = intel_irq_install(dev_priv); - if (ret) - goto cleanup_csr; - - intel_setup_gmbus(dev); - - /* Important: The output setup functions called by modeset_init need - * working irqs for e.g. gmbus and dp aux transfers. */ - intel_modeset_init(dev); - - intel_guc_ucode_init(dev); - - ret = i915_gem_init(dev); - if (ret) - goto cleanup_irq; - - intel_modeset_gem_init(dev); - - if (INTEL_INFO(dev)->num_pipes == 0) - return 0; - - ret = intel_fbdev_init(dev); - if (ret) - goto cleanup_gem; - - /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(dev_priv); - - /* - * Some ports require correctly set-up hpd registers for detection to - * work properly (leading to ghost connected connector status), e.g. VGA - * on gm45. Hence we can only set up the initial fbdev config after hpd - * irqs are fully enabled. Now we should scan for the initial config - * only once hotplug handling is enabled, but due to screwed-up locking - * around kms/fbdev init we can't protect the fdbev initial config - * scanning against hotplug events. Hence do this first and ignore the - * tiny window where we will loose hotplug notifactions. - */ - intel_fbdev_initial_config_async(dev); - - drm_kms_helper_poll_init(dev); - - return 0; - -cleanup_gem: - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); -cleanup_irq: - intel_guc_ucode_fini(dev); - drm_irq_uninstall(dev); - intel_teardown_gmbus(dev); -cleanup_csr: - intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini(dev_priv); - vga_switcheroo_unregister_client(dev->pdev); -cleanup_vga_client: - vga_client_register(dev->pdev, NULL, NULL, NULL); -out: - return ret; -} - -#if IS_ENABLED(CONFIG_FB) -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) -{ - struct apertures_struct *ap; - struct pci_dev *pdev = dev_priv->dev->pdev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool primary; - int ret; - - ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - - ap->ranges[0].base = ggtt->mappable_base; - ap->ranges[0].size = ggtt->mappable_end; - - primary = - pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - - ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); - - kfree(ap); - - return ret; -} -#else -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) -{ - return 0; -} -#endif - -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - -static void i915_dump_device_info(struct drm_i915_private *dev_priv) -{ - const struct intel_device_info *info = &dev_priv->info; - -#define PRINT_S(name) "%s" -#define SEP_EMPTY -#define PRINT_FLAG(name) info->name ? #name "," : "" -#define SEP_COMMA , - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" - DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), - info->gen, - dev_priv->dev->pdev->device, - dev_priv->dev->pdev->revision, - DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); -#undef PRINT_S -#undef SEP_EMPTY -#undef PRINT_FLAG -#undef SEP_COMMA -} - -static void cherryview_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - u32 fuse, eu_dis; - - info = (struct intel_device_info *)&dev_priv->info; - fuse = I915_READ(CHV_FUSE_GT); - - info->slice_total = 1; - - if (!(fuse & CHV_FGT_DISABLE_SS0)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | - CHV_FGT_EU_DIS_SS0_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - if (!(fuse & CHV_FGT_DISABLE_SS1)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | - CHV_FGT_EU_DIS_SS1_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - info->subslice_total = info->subslice_per_slice; - /* - * CHV expected to always have a uniform distribution of EU - * across subslices. - */ - info->eu_per_subslice = info->subslice_total ? - info->eu_total / info->subslice_total : - 0; - /* - * CHV supports subslice power gating on devices with more than - * one subslice, and supports EU power gating on devices with - * more than one EU pair per subslice. - */ - info->has_slice_pg = 0; - info->has_subslice_pg = (info->subslice_total > 1); - info->has_eu_pg = (info->eu_per_subslice > 2); -} - -static void gen9_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - int s_max = 3, ss_max = 4, eu_max = 8; - int s, ss; - u32 fuse2, s_enable, ss_disable, eu_disable; - u8 eu_mask = 0xff; - - info = (struct intel_device_info *)&dev_priv->info; - fuse2 = I915_READ(GEN8_FUSE2); - s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> - GEN8_F2_S_ENA_SHIFT; - ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> - GEN9_F2_SS_DIS_SHIFT; - - info->slice_total = hweight32(s_enable); - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - info->subslice_per_slice = ss_max - hweight32(ss_disable); - info->subslice_total = info->slice_total * - info->subslice_per_slice; - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < s_max; s++) { - if (!(s_enable & (0x1 << s))) - /* skip disabled slice */ - continue; - - eu_disable = I915_READ(GEN9_EU_DISABLE(s)); - for (ss = 0; ss < ss_max; ss++) { - int eu_per_ss; - - if (ss_disable & (0x1 << ss)) - /* skip disabled subslice */ - continue; - - eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & - eu_mask); - - /* - * Record which subslice(s) has(have) 7 EUs. we - * can tune the hash used to spread work among - * subslices if they are unbalanced. - */ - if (eu_per_ss == 7) - info->subslice_7eu[s] |= 1 << ss; - - info->eu_total += eu_per_ss; - } - } - - /* - * SKL is expected to always have a uniform distribution - * of EU across subslices with the exception that any one - * EU in any one subslice may be fused off for die - * recovery. BXT is expected to be perfectly uniform in EU - * distribution. - */ - info->eu_per_subslice = info->subslice_total ? - DIV_ROUND_UP(info->eu_total, - info->subslice_total) : 0; - /* - * SKL supports slice power gating on devices with more than - * one slice, and supports EU power gating on devices with - * more than one EU pair per subslice. BXT supports subslice - * power gating on devices with more than one subslice, and - * supports EU power gating on devices with more than one EU - * pair per subslice. - */ - info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && - (info->slice_total > 1)); - info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); - info->has_eu_pg = (info->eu_per_subslice > 2); -} - -static void broadwell_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - const int s_max = 3, ss_max = 3, eu_max = 8; - int s, ss; - u32 fuse2, eu_disable[s_max], s_enable, ss_disable; - - fuse2 = I915_READ(GEN8_FUSE2); - s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; - - eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; - eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << - (32 - GEN8_EU_DIS0_S1_SHIFT)); - eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << - (32 - GEN8_EU_DIS1_S2_SHIFT)); - - - info = (struct intel_device_info *)&dev_priv->info; - info->slice_total = hweight32(s_enable); - - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - info->subslice_per_slice = ss_max - hweight32(ss_disable); - info->subslice_total = info->slice_total * info->subslice_per_slice; - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < s_max; s++) { - if (!(s_enable & (0x1 << s))) - /* skip disabled slice */ - continue; - - for (ss = 0; ss < ss_max; ss++) { - u32 n_disabled; - - if (ss_disable & (0x1 << ss)) - /* skip disabled subslice */ - continue; - - n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); - - /* - * Record which subslices have 7 EUs. - */ - if (eu_max - n_disabled == 7) - info->subslice_7eu[s] |= 1 << ss; - - info->eu_total += eu_max - n_disabled; - } - } - - /* - * BDW is expected to always have a uniform distribution of EU across - * subslices with the exception that any one EU in any one subslice may - * be fused off for die recovery. - */ - info->eu_per_subslice = info->subslice_total ? - DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; - - /* - * BDW supports slice power gating on devices with more than - * one slice. - */ - info->has_slice_pg = (info->slice_total > 1); - info->has_subslice_pg = 0; - info->has_eu_pg = 0; -} - -/* - * Determine various intel_device_info fields at runtime. - * - * Use it when either: - * - it's judged too laborious to fill n static structures with the limit - * when a simple if statement does the job, - * - run-time checks (eg read fuse/strap registers) are needed. - * - * This function needs to be called: - * - after the MMIO has been setup as we are reading registers, - * - after the PCH has been detected, - * - before the first usage of the fields it can tweak. - */ -static void intel_device_info_runtime_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - enum pipe pipe; - - info = (struct intel_device_info *)&dev_priv->info; - - /* - * Skylake and Broxton currently don't expose the topmost plane as its - * use is exclusive with the legacy cursor and we only want to expose - * one of those, not both. Until we can safely expose the topmost plane - * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, - * we don't expose the topmost plane at all to prevent ABI breakage - * down the line. - */ - if (IS_BROXTON(dev)) { - info->num_sprites[PIPE_A] = 2; - info->num_sprites[PIPE_B] = 2; - info->num_sprites[PIPE_C] = 1; - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 2; - else - for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 1; - - if (i915.disable_display) { - DRM_INFO("Display disabled (module parameter)\n"); - info->num_pipes = 0; - } else if (info->num_pipes > 0 && - (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && - HAS_PCH_SPLIT(dev)) { - u32 fuse_strap = I915_READ(FUSE_STRAP); - u32 sfuse_strap = I915_READ(SFUSE_STRAP); - - /* - * SFUSE_STRAP is supposed to have a bit signalling the display - * is fused off. Unfortunately it seems that, at least in - * certain cases, fused off display means that PCH display - * reads don't land anywhere. In that case, we read 0s. - * - * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK - * should be set when taking over after the firmware. - */ - if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || - sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || - (dev_priv->pch_type == PCH_CPT && - !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { - DRM_INFO("Display fused off, disabling\n"); - info->num_pipes = 0; - } else if (fuse_strap & IVB_PIPE_C_DISABLE) { - DRM_INFO("PipeC fused off\n"); - info->num_pipes -= 1; - } - } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { - u32 dfsm = I915_READ(SKL_DFSM); - u8 disabled_mask = 0; - bool invalid; - int num_bits; - - if (dfsm & SKL_DFSM_PIPE_A_DISABLE) - disabled_mask |= BIT(PIPE_A); - if (dfsm & SKL_DFSM_PIPE_B_DISABLE) - disabled_mask |= BIT(PIPE_B); - if (dfsm & SKL_DFSM_PIPE_C_DISABLE) - disabled_mask |= BIT(PIPE_C); - - num_bits = hweight8(disabled_mask); - - switch (disabled_mask) { - case BIT(PIPE_A): - case BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_C): - invalid = true; - break; - default: - invalid = false; - } - - if (num_bits > info->num_pipes || invalid) - DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", - disabled_mask); - else - info->num_pipes -= num_bits; - } - - /* Initialize slice/subslice/EU info */ - if (IS_CHERRYVIEW(dev)) - cherryview_sseu_info_init(dev); - else if (IS_BROADWELL(dev)) - broadwell_sseu_info_init(dev); - else if (INTEL_INFO(dev)->gen >= 9) - gen9_sseu_info_init(dev); - - /* Snooping is broken on BXT A stepping. */ - info->has_snoop = !info->has_llc; - info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); - - DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); - DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); - DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); - DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); - DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); - DRM_DEBUG_DRIVER("has slice power gating: %s\n", - info->has_slice_pg ? "y" : "n"); - DRM_DEBUG_DRIVER("has subslice power gating: %s\n", - info->has_subslice_pg ? "y" : "n"); - DRM_DEBUG_DRIVER("has EU power gating: %s\n", - info->has_eu_pg ? "y" : "n"); -} - -static void intel_init_dpio(struct drm_i915_private *dev_priv) -{ - /* - * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), - * CHV x1 PHY (DP/HDMI D) - * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) - */ - if (IS_CHERRYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; - DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; - } else if (IS_VALLEYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; - } -} - -static int i915_workqueues_init(struct drm_i915_private *dev_priv) -{ - /* - * The i915 workqueue is primarily used for batched retirement of - * requests (and thus managing bo) once the task has been completed - * by the GPU. i915_gem_retire_requests() is called directly when we - * need high-priority retirement, such as waiting for an explicit - * bo. - * - * It is also used for periodic low-priority events, such as - * idle-timers and recording error state. - * - * All tasks on the workqueue are expected to acquire the dev mutex - * so there is no point in running more than one instance of the - * workqueue at any time. Use an ordered one. - */ - dev_priv->wq = alloc_ordered_workqueue("i915", 0); - if (dev_priv->wq == NULL) - goto out_err; - - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) - goto out_free_wq; - - dev_priv->gpu_error.hangcheck_wq = - alloc_ordered_workqueue("i915-hangcheck", 0); - if (dev_priv->gpu_error.hangcheck_wq == NULL) - goto out_free_dp_wq; - - return 0; - -out_free_dp_wq: - destroy_workqueue(dev_priv->hotplug.dp_wq); -out_free_wq: - destroy_workqueue(dev_priv->wq); -out_err: - DRM_ERROR("Failed to allocate workqueues.\n"); - - return -ENOMEM; -} - -static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) -{ - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); - destroy_workqueue(dev_priv->hotplug.dp_wq); - destroy_workqueue(dev_priv->wq); -} - -/** - * i915_driver_init_early - setup state not requiring device access - * @dev_priv: device private - * - * Initialize everything that is a "SW-only" state, that is state not - * requiring accessing the device or exposing the driver via kernel internal - * or userspace interfaces. Example steps belonging here: lock initialization, - * system memory allocation, setting up device specific attributes and - * function hooks not requiring accessing the device. - */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv, - struct drm_device *dev, - struct intel_device_info *info) -{ - struct intel_device_info *device_info; - int ret = 0; - - if (i915_inject_load_failure()) - return -ENODEV; - - /* Setup the write-once "constant" device info */ - device_info = (struct intel_device_info *)&dev_priv->info; - memcpy(device_info, info, sizeof(dev_priv->info)); - device_info->device_id = dev->pdev->device; - - spin_lock_init(&dev_priv->irq_lock); - spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->backlight_lock); - spin_lock_init(&dev_priv->uncore.lock); - spin_lock_init(&dev_priv->mm.object_stat_lock); - spin_lock_init(&dev_priv->mmio_flip_lock); - mutex_init(&dev_priv->sb_lock); - mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->av_mutex); - mutex_init(&dev_priv->wm.wm_mutex); - mutex_init(&dev_priv->pps_mutex); - - ret = i915_workqueues_init(dev_priv); - if (ret < 0) - return ret; - - /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(dev); - - intel_pm_setup(dev); - intel_init_dpio(dev_priv); - intel_power_domains_init(dev_priv); - intel_irq_init(dev_priv); - intel_init_display_hooks(dev_priv); - intel_init_clock_gating_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); - i915_gem_load_init(dev); - - intel_display_crc_init(dev); - - i915_dump_device_info(dev_priv); - - /* Not all pre-production machines fall into this category, only the - * very first ones. Almost everything should work, except for maybe - * suspend/resume. And we don't implement workarounds that affect only - * pre-production machines. */ - if (IS_HSW_EARLY_SDV(dev)) - DRM_INFO("This is an early pre-production Haswell machine. " - "It may not be fully functional.\n"); - - return 0; -} - -/** - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() - * @dev_priv: device private - */ -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) -{ - i915_gem_load_cleanup(dev_priv->dev); - i915_workqueues_cleanup(dev_priv); -} - -static int i915_mmio_setup(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int mmio_bar; - int mmio_size; - - mmio_bar = IS_GEN2(dev) ? 1 : 0; - /* - * Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (INTEL_INFO(dev)->gen < 5) - mmio_size = 512 * 1024; - else - mmio_size = 2 * 1024 * 1024; - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); - if (dev_priv->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - - return -EIO; - } - - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); - - return 0; -} - -static void i915_mmio_cleanup(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - intel_teardown_mchbar(dev); - pci_iounmap(dev->pdev, dev_priv->regs); -} - -/** - * i915_driver_init_mmio - setup device MMIO - * @dev_priv: device private - * - * Setup minimal device state necessary for MMIO accesses later in the - * initialization sequence. The setup here should avoid any other device-wide - * side effects or exposing the driver via kernel internal or user space - * interfaces. - */ -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - if (i915_get_bridge_dev(dev)) - return -EIO; - - ret = i915_mmio_setup(dev); - if (ret < 0) - goto put_bridge; - - intel_uncore_init(dev); - - return 0; - -put_bridge: - pci_dev_put(dev_priv->bridge_dev); - - return ret; -} - -/** - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() - * @dev_priv: device private - */ -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - intel_uncore_fini(dev); - i915_mmio_cleanup(dev); - pci_dev_put(dev_priv->bridge_dev); -} - -/** - * i915_driver_init_hw - setup state requiring device access - * @dev_priv: device private - * - * Setup state that requires accessing the device, but doesn't require - * exposing the driver via kernel internal or userspace interfaces. - */ -static int i915_driver_init_hw(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - uint32_t aperture_size; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - intel_device_info_runtime_init(dev); - - ret = i915_ggtt_init_hw(dev); - if (ret) - return ret; - - ret = i915_ggtt_enable_hw(dev); - if (ret) { - DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; - } - - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ - ret = i915_kick_out_firmware_fb(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; - } - - ret = i915_kick_out_vgacon(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; - } - - pci_set_master(dev->pdev); - - /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); - - /* 965GM sometimes incorrectly writes to hardware status page (HWS) - * using 32bit addressing, overwriting memory if HWS is located - * above 4GB. - * - * The documentation also mentions an issue with undefined - * behaviour if any general state is accessed within a page above 4GB, - * which also needs to be handled carefully. - */ - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); - - aperture_size = ggtt->mappable_end; - - ggtt->mappable = - io_mapping_create_wc(ggtt->mappable_base, - aperture_size); - if (!ggtt->mappable) { - ret = -EIO; - goto out_ggtt; - } - - ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, - aperture_size); - - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - - intel_uncore_sanitize(dev); - - intel_opregion_setup(dev); - - i915_gem_load_init_fences(dev_priv); - - /* On the 945G/GM, the chipset reports the MSI capability on the - * integrated graphics even though the support isn't actually there - * according to the published specs. It doesn't appear to function - * correctly in testing on 945G. - * This may be a side effect of MSI having been made available for PEG - * and the registers being closely associated. - * - * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. - */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) { - if (pci_enable_msi(dev->pdev) < 0) - DRM_DEBUG_DRIVER("can't enable MSI"); - } - - return 0; - -out_ggtt: - i915_ggtt_cleanup_hw(dev); - - return ret; -} - -/** - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() - * @dev_priv: device private - */ -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - - pm_qos_remove_request(&dev_priv->pm_qos); - arch_phys_wc_del(ggtt->mtrr); - io_mapping_free(ggtt->mappable); - i915_ggtt_cleanup_hw(dev); -} - -/** - * i915_driver_register - register the driver with the rest of the system - * @dev_priv: device private - * - * Perform any steps necessary to make the driver available via kernel - * internal or userspace interfaces. - */ -static void i915_driver_register(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - i915_gem_shrinker_init(dev_priv); - /* - * Notify a valid surface after modesetting, - * when running inside a VM. - */ - if (intel_vgpu_active(dev)) - I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); - - i915_setup_sysfs(dev); - - if (INTEL_INFO(dev_priv)->num_pipes) { - /* Must be done after probing outputs */ - intel_opregion_init(dev); - acpi_video_register(); - } - - if (IS_GEN5(dev_priv)) - intel_gpu_ips_init(dev_priv); - - i915_audio_component_init(dev_priv); -} - -/** - * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() - * @dev_priv: device private - */ -static void i915_driver_unregister(struct drm_i915_private *dev_priv) -{ - i915_audio_component_cleanup(dev_priv); - intel_gpu_ips_teardown(); - acpi_video_unregister(); - intel_opregion_fini(dev_priv->dev); - i915_teardown_sysfs(dev_priv->dev); - i915_gem_shrinker_cleanup(dev_priv); -} - -/** - * i915_driver_load - setup chip and create an initial config - * @dev: DRM device - * @flags: startup flags - * - * The driver load routine has to do several things: - * - drive output discovery via intel_modeset_init() - * - initialize the memory manager - * - allocate initial config memory - * - setup the DRM framebuffer with the allocated memory - */ -int i915_driver_load(struct drm_device *dev, unsigned long flags) -{ - struct drm_i915_private *dev_priv; - int ret = 0; - - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (dev_priv == NULL) - return -ENOMEM; - - dev->dev_private = dev_priv; - /* Must be set before calling __i915_printk */ - dev_priv->dev = dev; - - ret = i915_driver_init_early(dev_priv, dev, - (struct intel_device_info *)flags); - - if (ret < 0) - goto out_free_priv; - - intel_runtime_pm_get(dev_priv); - - ret = i915_driver_init_mmio(dev_priv); - if (ret < 0) - goto out_runtime_pm_put; - - ret = i915_driver_init_hw(dev_priv); - if (ret < 0) - goto out_cleanup_mmio; - - /* - * TODO: move the vblank init and parts of modeset init steps into one - * of the i915_driver_init_/i915_driver_register functions according - * to the role/effect of the given init step. - */ - if (INTEL_INFO(dev)->num_pipes) { - ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); - if (ret) - goto out_cleanup_hw; - } - - ret = i915_load_modeset_init(dev); - if (ret < 0) - goto out_cleanup_vblank; - - i915_driver_register(dev_priv); - - intel_runtime_pm_enable(dev_priv); - - intel_runtime_pm_put(dev_priv); - - return 0; - -out_cleanup_vblank: - drm_vblank_cleanup(dev); -out_cleanup_hw: - i915_driver_cleanup_hw(dev_priv); -out_cleanup_mmio: - i915_driver_cleanup_mmio(dev_priv); -out_runtime_pm_put: - intel_runtime_pm_put(dev_priv); - i915_driver_cleanup_early(dev_priv); -out_free_priv: - i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - - kfree(dev_priv); - - return ret; -} - -int i915_driver_unload(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - intel_fbdev_fini(dev); - - ret = i915_gem_suspend(dev); - if (ret) { - DRM_ERROR("failed to idle hardware: %d\n", ret); - return ret; - } - - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - - i915_driver_unregister(dev_priv); - - drm_vblank_cleanup(dev); - - intel_modeset_cleanup(dev); - - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; - - vga_switcheroo_unregister_client(dev->pdev); - vga_client_register(dev->pdev, NULL, NULL, NULL); - - intel_csr_ucode_fini(dev_priv); - - /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - i915_destroy_error_state(dev); - - /* Flush any outstanding unpin_work. */ - flush_workqueue(dev_priv->wq); - - intel_guc_ucode_fini(dev); - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); - intel_fbc_cleanup_cfb(dev_priv); - - intel_power_domains_fini(dev_priv); - - i915_driver_cleanup_hw(dev_priv); - i915_driver_cleanup_mmio(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - i915_driver_cleanup_early(dev_priv); - kfree(dev_priv); - - return 0; -} - -int i915_driver_open(struct drm_device *dev, struct drm_file *file) -{ - int ret; - - ret = i915_gem_open(dev, file); - if (ret) - return ret; - - return 0; -} - -/** - * i915_driver_lastclose - clean up after all DRM clients have exited - * @dev: DRM device - * - * Take care of cleaning up after all DRM clients have exited. In the - * mode setting case, we want to restore the kernel's initial mode (just - * in case the last client left us in a bad state). - * - * Additionally, in the non-mode setting case, we'll tear down the GTT - * and DMA structures, since the kernel won't be using them, and clea - * up any GEM state. - */ -void i915_driver_lastclose(struct drm_device *dev) -{ - intel_fbdev_restore_mode(dev); - vga_switcheroo_process_delayed_switch(); -} - -void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) -{ - mutex_lock(&dev->struct_mutex); - i915_gem_context_close(dev, file); - i915_gem_release(dev, file); - mutex_unlock(&dev->struct_mutex); -} - -void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - - kfree(file_priv); -} - -static int -i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - return -ENODEV; -} - -const struct drm_ioctl_desc i915_ioctls[] = { - DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), - DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), -}; - -int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 85c4debf47e0..95ddd56b89f0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -27,400 +27,92 @@ * */ -#include <linux/device.h> #include <linux/acpi.h> -#include <drm/drmP.h> -#include <drm/i915_drm.h> -#include "i915_drv.h" -#include "i915_trace.h" -#include "intel_drv.h" - -#include <linux/apple-gmux.h> -#include <linux/console.h> +#include <linux/device.h> +#include <linux/oom.h> #include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/pnp.h> +#include <linux/slab.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_crtc_helper.h> - -static struct drm_driver driver; - -#define GEN_DEFAULT_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } - -#define GEN_CHV_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - CHV_PIPE_C_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - CHV_TRANSCODER_C_OFFSET, }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ - CHV_PALETTE_C_OFFSET } - -#define CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } - -#define IVB_CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } - -#define BDW_COLORS \ - .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } -#define CHV_COLORS \ - .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } - -static const struct intel_device_info intel_i830_info = { - .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_845g_info = { - .gen = 2, .num_pipes = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i85x_info = { - .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i865g_info = { - .gen = 2, .num_pipes = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i915g_info = { - .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i915gm_info = { - .gen = 3, .is_mobile = 1, .num_pipes = 2, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i945g_info = { - .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i945gm_info = { - .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, - .has_hotplug = 1, .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i965g_info = { - .gen = 4, .is_broadwater = 1, .num_pipes = 2, - .has_hotplug = 1, - .has_overlay = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i965gm_info = { - .gen = 4, .is_crestline = 1, .num_pipes = 2, - .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, - .has_overlay = 1, - .supports_tv = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_g33_info = { - .gen = 3, .is_g33 = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_overlay = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_g45_info = { - .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, - .has_pipe_cxsr = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_gm45_info = { - .gen = 4, .is_g4x = 1, .num_pipes = 2, - .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, - .has_pipe_cxsr = 1, .has_hotplug = 1, - .supports_tv = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_pineview_info = { - .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_overlay = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; +#include <linux/vt.h> +#include <acpi/video.h> -static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_sandybridge_d_info = { - .gen = 6, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, - .has_llc = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_sandybridge_m_info = { - .gen = 6, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, - .has_llc = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -#define GEN7_FEATURES \ - .gen = 7, .num_pipes = 3, \ - .need_gfx_hws = 1, .has_hotplug = 1, \ - .has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ - .has_llc = 1, \ - GEN_DEFAULT_PIPEOFFSETS, \ - IVB_CURSOR_OFFSETS - -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .is_ivybridge = 1, -}; - -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .is_ivybridge = 1, - .is_mobile = 1, -}; - -static const struct intel_device_info intel_ivybridge_q_info = { - GEN7_FEATURES, - .is_ivybridge = 1, - .num_pipes = 0, /* legal, last one wins */ -}; - -#define VLV_FEATURES \ - .gen = 7, .num_pipes = 2, \ - .need_gfx_hws = 1, .has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ - .display_mmio_offset = VLV_DISPLAY_BASE, \ - GEN_DEFAULT_PIPEOFFSETS, \ - CURSOR_OFFSETS - -static const struct intel_device_info intel_valleyview_m_info = { - VLV_FEATURES, - .is_valleyview = 1, - .is_mobile = 1, -}; +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/i915_drm.h> -static const struct intel_device_info intel_valleyview_d_info = { - VLV_FEATURES, - .is_valleyview = 1, -}; +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_drv.h" -#define HSW_FEATURES \ - GEN7_FEATURES, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ - .has_ddi = 1, \ - .has_fpga_dbg = 1 +static struct drm_driver driver; -static const struct intel_device_info intel_haswell_d_info = { - HSW_FEATURES, - .is_haswell = 1, -}; +static unsigned int i915_load_fail_count; -static const struct intel_device_info intel_haswell_m_info = { - HSW_FEATURES, - .is_haswell = 1, - .is_mobile = 1, -}; +bool __i915_inject_load_failure(const char *func, int line) +{ + if (i915_load_fail_count >= i915.inject_load_failure) + return false; -#define BDW_FEATURES \ - HSW_FEATURES, \ - BDW_COLORS + if (++i915_load_fail_count == i915.inject_load_failure) { + DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", + i915.inject_load_failure, func, line); + return true; + } -static const struct intel_device_info intel_broadwell_d_info = { - BDW_FEATURES, - .gen = 8, -}; + return false; +} -static const struct intel_device_info intel_broadwell_m_info = { - BDW_FEATURES, - .gen = 8, .is_mobile = 1, -}; +#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" +#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ + "providing the dmesg log by booting with drm.debug=0xf" -static const struct intel_device_info intel_broadwell_gt3d_info = { - BDW_FEATURES, - .gen = 8, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; +void +__i915_printk(struct drm_i915_private *dev_priv, const char *level, + const char *fmt, ...) +{ + static bool shown_bug_once; + struct device *dev = dev_priv->drm.dev; + bool is_error = level[1] <= KERN_ERR[1]; + bool is_debug = level[1] == KERN_DEBUG[1]; + struct va_format vaf; + va_list args; + + if (is_debug && !(drm_debug & DRM_UT_DRIVER)) + return; -static const struct intel_device_info intel_broadwell_gt3m_info = { - BDW_FEATURES, - .gen = 8, .is_mobile = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; + va_start(args, fmt); -static const struct intel_device_info intel_cherryview_info = { - .gen = 8, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .is_cherryview = 1, - .display_mmio_offset = VLV_DISPLAY_BASE, - GEN_CHV_PIPEOFFSETS, - CURSOR_OFFSETS, - CHV_COLORS, -}; + vaf.fmt = fmt; + vaf.va = &args; -static const struct intel_device_info intel_skylake_info = { - BDW_FEATURES, - .is_skylake = 1, - .gen = 9, -}; + dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", + __builtin_return_address(0), &vaf); -static const struct intel_device_info intel_skylake_gt3_info = { - BDW_FEATURES, - .is_skylake = 1, - .gen = 9, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; + if (is_error && !shown_bug_once) { + dev_notice(dev, "%s", FDO_BUG_MSG); + shown_bug_once = true; + } -static const struct intel_device_info intel_broxton_info = { - .is_preliminary = 1, - .is_broxton = 1, - .gen = 9, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .num_pipes = 3, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, - BDW_COLORS, -}; + va_end(args); +} -static const struct intel_device_info intel_kabylake_info = { - BDW_FEATURES, - .is_kabylake = 1, - .gen = 9, -}; +static bool i915_error_injected(struct drm_i915_private *dev_priv) +{ + return i915.inject_load_failure && + i915_load_fail_count == i915.inject_load_failure; +} -static const struct intel_device_info intel_kabylake_gt3_info = { - BDW_FEATURES, - .is_kabylake = 1, - .gen = 9, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; +#define i915_load_error(dev_priv, fmt, ...) \ + __i915_printk(dev_priv, \ + i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ + fmt, ##__VA_ARGS__) -/* - * Make sure any device matches here are from most specific to most - * general. For example, since the Quanta match is based on the subsystem - * and subvendor IDs, we need it to come before the more general IVB - * PCI ID matches, otherwise we'll use the wrong info struct above. - */ -static const struct pci_device_id pciidlist[] = { - INTEL_I830_IDS(&intel_i830_info), - INTEL_I845G_IDS(&intel_845g_info), - INTEL_I85X_IDS(&intel_i85x_info), - INTEL_I865G_IDS(&intel_i865g_info), - INTEL_I915G_IDS(&intel_i915g_info), - INTEL_I915GM_IDS(&intel_i915gm_info), - INTEL_I945G_IDS(&intel_i945g_info), - INTEL_I945GM_IDS(&intel_i945gm_info), - INTEL_I965G_IDS(&intel_i965g_info), - INTEL_G33_IDS(&intel_g33_info), - INTEL_I965GM_IDS(&intel_i965gm_info), - INTEL_GM45_IDS(&intel_gm45_info), - INTEL_G45_IDS(&intel_g45_info), - INTEL_PINEVIEW_IDS(&intel_pineview_info), - INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), - INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), - INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_D_IDS(&intel_haswell_d_info), - INTEL_HSW_M_IDS(&intel_haswell_m_info), - INTEL_VLV_M_IDS(&intel_valleyview_m_info), - INTEL_VLV_D_IDS(&intel_valleyview_d_info), - INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), - INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), - INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), - INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), - INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), - INTEL_BXT_IDS(&intel_broxton_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), - INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), - INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - {0, 0, 0} -}; - -MODULE_DEVICE_TABLE(pci, pciidlist); static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) { @@ -450,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) return ret; } -void intel_detect_pch(struct drm_device *dev) +static void intel_detect_pch(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting @@ -519,8 +211,10 @@ void intel_detect_pch(struct drm_device *dev) } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && - pch->subsystem_vendor == 0x1af4 && - pch->subsystem_device == 0x1100)) { + pch->subsystem_vendor == + PCI_SUBVENDOR_ID_REDHAT_QUMRANET && + pch->subsystem_device == + PCI_SUBDEVICE_ID_QEMU)) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else continue; @@ -534,9 +228,9 @@ void intel_detect_pch(struct drm_device *dev) pci_dev_put(pch); } -bool i915_semaphore_is_enabled(struct drm_device *dev) +bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return false; if (i915.semaphores >= 0) @@ -546,22 +240,1172 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (i915.enable_execlists) return false; - /* Until we get further testing... */ - if (IS_GEN8(dev)) - return false; - #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) return false; #endif return true; } +static int i915_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + drm_i915_getparam_t *param = data; + int value; + + switch (param->param) { + case I915_PARAM_IRQ_ACTIVE: + case I915_PARAM_ALLOW_BATCHBUFFER: + case I915_PARAM_LAST_DISPATCH: + /* Reject all old ums/dri params. */ + return -ENODEV; + case I915_PARAM_CHIPSET_ID: + value = dev->pdev->device; + break; + case I915_PARAM_REVISION: + value = dev->pdev->revision; + break; + case I915_PARAM_HAS_GEM: + value = 1; + break; + case I915_PARAM_NUM_FENCES_AVAIL: + value = dev_priv->num_fence_regs; + break; + case I915_PARAM_HAS_OVERLAY: + value = dev_priv->overlay ? 1 : 0; + break; + case I915_PARAM_HAS_PAGEFLIPPING: + value = 1; + break; + case I915_PARAM_HAS_EXECBUF2: + /* depends on GEM */ + value = 1; + break; + case I915_PARAM_HAS_BSD: + value = intel_engine_initialized(&dev_priv->engine[VCS]); + break; + case I915_PARAM_HAS_BLT: + value = intel_engine_initialized(&dev_priv->engine[BCS]); + break; + case I915_PARAM_HAS_VEBOX: + value = intel_engine_initialized(&dev_priv->engine[VECS]); + break; + case I915_PARAM_HAS_BSD2: + value = intel_engine_initialized(&dev_priv->engine[VCS2]); + break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; + case I915_PARAM_HAS_RELAXED_DELTA: + value = 1; + break; + case I915_PARAM_HAS_GEN7_SOL_RESET: + value = 1; + break; + case I915_PARAM_HAS_LLC: + value = HAS_LLC(dev); + break; + case I915_PARAM_HAS_WT: + value = HAS_WT(dev); + break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = USES_PPGTT(dev); + break; + case I915_PARAM_HAS_WAIT_TIMEOUT: + value = 1; + break; + case I915_PARAM_HAS_SEMAPHORES: + value = i915_semaphore_is_enabled(dev_priv); + break; + case I915_PARAM_HAS_PRIME_VMAP_FLUSH: + value = 1; + break; + case I915_PARAM_HAS_SECURE_BATCHES: + value = capable(CAP_SYS_ADMIN); + break; + case I915_PARAM_HAS_PINNED_BATCHES: + value = 1; + break; + case I915_PARAM_HAS_EXEC_NO_RELOC: + value = 1; + break; + case I915_PARAM_HAS_EXEC_HANDLE_LUT: + value = 1; + break; + case I915_PARAM_CMD_PARSER_VERSION: + value = i915_cmd_parser_get_version(dev_priv); + break; + case I915_PARAM_HAS_COHERENT_PHYS_GTT: + value = 1; + break; + case I915_PARAM_MMAP_VERSION: + value = 1; + break; + case I915_PARAM_SUBSLICE_TOTAL: + value = INTEL_INFO(dev)->subslice_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_EU_TOTAL: + value = INTEL_INFO(dev)->eu_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_HAS_GPU_RESET: + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + break; + case I915_PARAM_HAS_RESOURCE_STREAMER: + value = HAS_RESOURCE_STREAMER(dev); + break; + case I915_PARAM_HAS_EXEC_SOFTPIN: + value = 1; + break; + case I915_PARAM_HAS_POOLED_EU: + value = HAS_POOLED_EU(dev); + break; + case I915_PARAM_MIN_EU_IN_POOL: + value = INTEL_INFO(dev)->min_eu_in_pool; + break; + default: + DRM_DEBUG("Unknown parameter %d\n", param->param); + return -EINVAL; + } + + if (put_user(value, param->value)) + return -EFAULT; + + return 0; +} + +static int i915_get_bridge_dev(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!dev_priv->bridge_dev) { + DRM_ERROR("bridge device not found\n"); + return -1; + } + return 0; +} + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret; + + if (INTEL_INFO(dev)->gen >= 4) + pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; +#endif + + /* Get some space for it */ + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + dev_priv->bridge_dev); + if (ret) { + DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + return ret; + } + + if (INTEL_INFO(dev)->gen >= 4) + pci_write_config_dword(dev_priv->bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(dev_priv->bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); + return 0; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static void +intel_setup_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool enabled; + + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + return; + + dev_priv->mchbar_need_disable = false; + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + return; + + if (intel_alloc_mchbar_resource(dev)) + return; + + dev_priv->mchbar_need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(dev_priv->bridge_dev, DEVEN, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); + } +} + +static void +intel_teardown_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + + if (dev_priv->mchbar_need_disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + u32 deven_val; + + pci_read_config_dword(dev_priv->bridge_dev, DEVEN, + &deven_val); + deven_val &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(dev_priv->bridge_dev, DEVEN, + deven_val); + } else { + u32 mchbar_val; + + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, + &mchbar_val); + mchbar_val &= ~1; + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, + mchbar_val); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + + if (state == VGA_SWITCHEROO_ON) { + pr_info("switched on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume_switcheroo(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; + } else { + pr_info("switched off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend_switcheroo(dev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count == 0; +} + +static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { + .set_gpu_state = i915_switcheroo_set_state, + .reprobe = NULL, + .can_switch = i915_switcheroo_can_switch, +}; + +static void i915_gem_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload. Afterwards we then clean up the + * GEM state tracking, flushing off the requests and leaving the + * system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that reseting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + if (HAS_HW_CONTEXTS(dev)) { + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); + WARN_ON(reset && reset != -ENODEV); + } + + mutex_lock(&dev->struct_mutex); + i915_gem_reset(dev); + i915_gem_cleanup_engines(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + + WARN_ON(!list_empty(&to_i915(dev)->context_list)); +} + +static int i915_load_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + ret = intel_bios_init(dev_priv); + if (ret) + DRM_INFO("failed to find VBIOS tables\n"); + + /* If we have > 1 VGA cards, then we need to arbitrate access + * to the common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; + + intel_register_dsm_handler(); + + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); + if (ret) + goto cleanup_vga_client; + + /* must happen before intel_power_domains_init_hw() on VLV/CHV */ + intel_update_rawclk(dev_priv); + + intel_power_domains_init_hw(dev_priv, false); + + intel_csr_ucode_init(dev_priv); + + ret = intel_irq_install(dev_priv); + if (ret) + goto cleanup_csr; + + intel_setup_gmbus(dev); + + /* Important: The output setup functions called by modeset_init need + * working irqs for e.g. gmbus and dp aux transfers. */ + intel_modeset_init(dev); + + intel_guc_init(dev); + + ret = i915_gem_init(dev); + if (ret) + goto cleanup_irq; + + intel_modeset_gem_init(dev); + + if (INTEL_INFO(dev)->num_pipes == 0) + return 0; + + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(dev_priv); + + drm_kms_helper_poll_init(dev); + + return 0; + +cleanup_gem: + i915_gem_fini(dev); +cleanup_irq: + intel_guc_fini(dev); + drm_irq_uninstall(dev); + intel_teardown_gmbus(dev); +cleanup_csr: + intel_csr_ucode_fini(dev_priv); + intel_power_domains_fini(dev_priv); + vga_switcheroo_unregister_client(dev->pdev); +cleanup_vga_client: + vga_client_register(dev->pdev, NULL, NULL, NULL); +out: + return ret; +} + +#if IS_ENABLED(CONFIG_FB) +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->drm.pdev; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + bool primary; + int ret; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = ggtt->mappable_base; + ap->ranges[0].size = ggtt->mappable_end; + + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); + + return ret; +} +#else +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + return 0; +} +#endif + +#if !defined(CONFIG_VGA_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return 0; +} +#elif !defined(CONFIG_DUMMY_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return -ENODEV; +} +#else +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + DRM_INFO("Replacing VGA console driver\n"); + + console_lock(); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); + if (ret == 0) { + ret = do_unregister_con_driver(&vga_con); + + /* Ignore "already unregistered". */ + if (ret == -ENODEV) + ret = 0; + } + console_unlock(); + + return ret; +} +#endif + +static void intel_init_dpio(struct drm_i915_private *dev_priv) +{ + /* + * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), + * CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(dev_priv)) { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; + DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; + } else if (IS_VALLEYVIEW(dev_priv)) { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; + } +} + +static int i915_workqueues_init(struct drm_i915_private *dev_priv) +{ + /* + * The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) + goto out_err; + + dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->hotplug.dp_wq == NULL) + goto out_free_wq; + + return 0; + +out_free_wq: + destroy_workqueue(dev_priv->wq); +out_err: + DRM_ERROR("Failed to allocate workqueues.\n"); + + return -ENOMEM; +} + +static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) +{ + destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->wq); +} + +/** + * i915_driver_init_early - setup state not requiring device access + * @dev_priv: device private + * + * Initialize everything that is a "SW-only" state, that is state not + * requiring accessing the device or exposing the driver via kernel internal + * or userspace interfaces. Example steps belonging here: lock initialization, + * system memory allocation, setting up device specific attributes and + * function hooks not requiring accessing the device. + */ +static int i915_driver_init_early(struct drm_i915_private *dev_priv, + const struct pci_device_id *ent) +{ + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; + struct intel_device_info *device_info; + int ret = 0; + + if (i915_inject_load_failure()) + return -ENODEV; + + /* Setup the write-once "constant" device info */ + device_info = mkwrite_device_info(dev_priv); + memcpy(device_info, match_info, sizeof(*device_info)); + device_info->device_id = dev_priv->drm.pdev->device; + + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + device_info->gen_mask = BIT(device_info->gen - 1); + + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + mutex_init(&dev_priv->backlight_lock); + spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); + mutex_init(&dev_priv->sb_lock); + mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->wm.wm_mutex); + mutex_init(&dev_priv->pps_mutex); + + ret = i915_workqueues_init(dev_priv); + if (ret < 0) + return ret; + + ret = intel_gvt_init(dev_priv); + if (ret < 0) + goto err_workqueues; + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(&dev_priv->drm); + + intel_pm_setup(&dev_priv->drm); + intel_init_dpio(dev_priv); + intel_power_domains_init(dev_priv); + intel_irq_init(dev_priv); + intel_init_display_hooks(dev_priv); + intel_init_clock_gating_hooks(dev_priv); + intel_init_audio_hooks(dev_priv); + i915_gem_load_init(&dev_priv->drm); + + intel_display_crc_init(&dev_priv->drm); + + intel_device_info_dump(dev_priv); + + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev_priv)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + + return 0; + +err_workqueues: + i915_workqueues_cleanup(dev_priv); + return ret; +} + +/** + * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() + * @dev_priv: device private + */ +static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) +{ + i915_gem_load_cleanup(&dev_priv->drm); + i915_workqueues_cleanup(dev_priv); +} + +static int i915_mmio_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_INFO(dev)->gen < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + if (dev_priv->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + + return 0; +} + +static void i915_mmio_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_teardown_mchbar(dev); + pci_iounmap(dev->pdev, dev_priv->regs); +} + +/** + * i915_driver_init_mmio - setup device MMIO + * @dev_priv: device private + * + * Setup minimal device state necessary for MMIO accesses later in the + * initialization sequence. The setup here should avoid any other device-wide + * side effects or exposing the driver via kernel internal or user space + * interfaces. + */ +static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + if (i915_get_bridge_dev(dev)) + return -EIO; + + ret = i915_mmio_setup(dev); + if (ret < 0) + goto put_bridge; + + intel_uncore_init(dev_priv); + + return 0; + +put_bridge: + pci_dev_put(dev_priv->bridge_dev); + + return ret; +} + +/** + * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() + * @dev_priv: device private + */ +static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + + intel_uncore_fini(dev_priv); + i915_mmio_cleanup(dev); + pci_dev_put(dev_priv->bridge_dev); +} + +static void intel_sanitize_options(struct drm_i915_private *dev_priv) +{ + i915.enable_execlists = + intel_sanitize_enable_execlists(dev_priv, + i915.enable_execlists); + + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); +} + +/** + * i915_driver_init_hw - setup state requiring device access + * @dev_priv: device private + * + * Setup state that requires accessing the device, but doesn't require + * exposing the driver via kernel internal or userspace interfaces. + */ +static int i915_driver_init_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + uint32_t aperture_size; + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + intel_device_info_runtime_init(dev_priv); + + intel_sanitize_options(dev_priv); + + ret = i915_ggtt_init_hw(dev); + if (ret) + return ret; + + ret = i915_ggtt_enable_hw(dev); + if (ret) { + DRM_ERROR("failed to enable GGTT\n"); + goto out_ggtt; + } + + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + goto out_ggtt; + } + + ret = i915_kick_out_vgacon(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting VGA console\n"); + goto out_ggtt; + } + + pci_set_master(dev->pdev); + + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + + + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + + aperture_size = ggtt->mappable_end; + + ggtt->mappable = + io_mapping_create_wc(ggtt->mappable_base, + aperture_size); + if (!ggtt->mappable) { + ret = -EIO; + goto out_ggtt; + } + + ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, + aperture_size); + + pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + intel_uncore_sanitize(dev_priv); + + intel_opregion_setup(dev_priv); + + i915_gem_load_init_fences(dev_priv); + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + * + * According to chipset errata, on the 965GM, MSI interrupts may + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. + */ + if (!IS_I945G(dev) && !IS_I945GM(dev)) { + if (pci_enable_msi(dev->pdev) < 0) + DRM_DEBUG_DRIVER("can't enable MSI"); + } + + return 0; + +out_ggtt: + i915_ggtt_cleanup_hw(dev); + + return ret; +} + +/** + * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() + * @dev_priv: device private + */ +static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + pm_qos_remove_request(&dev_priv->pm_qos); + arch_phys_wc_del(ggtt->mtrr); + io_mapping_free(ggtt->mappable); + i915_ggtt_cleanup_hw(dev); +} + +/** + * i915_driver_register - register the driver with the rest of the system + * @dev_priv: device private + * + * Perform any steps necessary to make the driver available via kernel + * internal or userspace interfaces. + */ +static void i915_driver_register(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + + i915_gem_shrinker_init(dev_priv); + + /* + * Notify a valid surface after modesetting, + * when running inside a VM. + */ + if (intel_vgpu_active(dev_priv)) + I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); + + /* Reveal our presence to userspace */ + if (drm_dev_register(dev, 0) == 0) { + i915_debugfs_register(dev_priv); + i915_setup_sysfs(dev); + } else + DRM_ERROR("Failed to register driver for userspace access!\n"); + + if (INTEL_INFO(dev_priv)->num_pipes) { + /* Must be done after probing outputs */ + intel_opregion_register(dev_priv); + acpi_video_register(); + } + + if (IS_GEN5(dev_priv)) + intel_gpu_ips_init(dev_priv); + + i915_audio_component_init(dev_priv); + + /* + * Some ports require correctly set-up hpd registers for detection to + * work properly (leading to ghost connected connector status), e.g. VGA + * on gm45. Hence we can only set up the initial fbdev config after hpd + * irqs are fully enabled. We do it last so that the async config + * cannot run before the connectors are registered. + */ + intel_fbdev_initial_config_async(dev); +} + +/** + * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() + * @dev_priv: device private + */ +static void i915_driver_unregister(struct drm_i915_private *dev_priv) +{ + i915_audio_component_cleanup(dev_priv); + + intel_gpu_ips_teardown(); + acpi_video_unregister(); + intel_opregion_unregister(dev_priv); + + i915_teardown_sysfs(&dev_priv->drm); + i915_debugfs_unregister(dev_priv); + drm_dev_unregister(&dev_priv->drm); + + i915_gem_shrinker_cleanup(dev_priv); +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_i915_private *dev_priv; + int ret; + + if (i915.nuclear_pageflip) + driver.driver_features |= DRIVER_ATOMIC; + + ret = -ENOMEM; + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (dev_priv) + ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); + if (ret) { + dev_printk(KERN_ERR, &pdev->dev, + "[" DRM_NAME ":%s] allocation failed\n", __func__); + kfree(dev_priv); + return ret; + } + + dev_priv->drm.pdev = pdev; + dev_priv->drm.dev_private = dev_priv; + + ret = pci_enable_device(pdev); + if (ret) + goto out_free_priv; + + pci_set_drvdata(pdev, &dev_priv->drm); + + ret = i915_driver_init_early(dev_priv, ent); + if (ret < 0) + goto out_pci_disable; + + intel_runtime_pm_get(dev_priv); + + ret = i915_driver_init_mmio(dev_priv); + if (ret < 0) + goto out_runtime_pm_put; + + ret = i915_driver_init_hw(dev_priv); + if (ret < 0) + goto out_cleanup_mmio; + + /* + * TODO: move the vblank init and parts of modeset init steps into one + * of the i915_driver_init_/i915_driver_register functions according + * to the role/effect of the given init step. + */ + if (INTEL_INFO(dev_priv)->num_pipes) { + ret = drm_vblank_init(&dev_priv->drm, + INTEL_INFO(dev_priv)->num_pipes); + if (ret) + goto out_cleanup_hw; + } + + ret = i915_load_modeset_init(&dev_priv->drm); + if (ret < 0) + goto out_cleanup_vblank; + + i915_driver_register(dev_priv); + + intel_runtime_pm_enable(dev_priv); + + intel_runtime_pm_put(dev_priv); + + return 0; + +out_cleanup_vblank: + drm_vblank_cleanup(&dev_priv->drm); +out_cleanup_hw: + i915_driver_cleanup_hw(dev_priv); +out_cleanup_mmio: + i915_driver_cleanup_mmio(dev_priv); +out_runtime_pm_put: + intel_runtime_pm_put(dev_priv); + i915_driver_cleanup_early(dev_priv); +out_pci_disable: + pci_disable_device(pdev); +out_free_priv: + i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); + drm_dev_unref(&dev_priv->drm); + return ret; +} + +void i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_fbdev_fini(dev); + + if (i915_gem_suspend(dev)) + DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + i915_driver_unregister(dev_priv); + + drm_vblank_cleanup(dev); + + intel_modeset_cleanup(dev); + + /* + * free the memory space allocated for the child device + * config parsed from VBT + */ + if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + } + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); + + intel_csr_ucode_fini(dev_priv); + + /* Free error state after interrupts are fully disabled. */ + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + i915_destroy_error_state(dev); + + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + + intel_guc_fini(dev); + i915_gem_fini(dev); + intel_fbc_cleanup_cfb(dev_priv); + + intel_power_domains_fini(dev_priv); + + i915_driver_cleanup_hw(dev_priv); + i915_driver_cleanup_mmio(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + i915_driver_cleanup_early(dev_priv); +} + +static int i915_driver_open(struct drm_device *dev, struct drm_file *file) +{ + int ret; + + ret = i915_gem_open(dev, file); + if (ret) + return ret; + + return 0; +} + +/** + * i915_driver_lastclose - clean up after all DRM clients have exited + * @dev: DRM device + * + * Take care of cleaning up after all DRM clients have exited. In the + * mode setting case, we want to restore the kernel's initial mode (just + * in case the last client left us in a bad state). + * + * Additionally, in the non-mode setting case, we'll tear down the GTT + * and DMA structures, since the kernel won't be using them, and clea + * up any GEM state. + */ +static void i915_driver_lastclose(struct drm_device *dev) +{ + intel_fbdev_restore_mode(dev); + vga_switcheroo_process_delayed_switch(); +} + +static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) +{ + mutex_lock(&dev->struct_mutex); + i915_gem_context_close(dev, file); + i915_gem_release(dev, file); + mutex_unlock(&dev->struct_mutex); +} + +static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + kfree(file_priv); +} + static void intel_suspend_encoders(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_encoder *encoder; drm_modeset_lock_all(dev); @@ -586,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) static int i915_drm_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); pci_power_t opregion_target_state; int error; @@ -614,7 +1458,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_display_suspend(dev); @@ -632,10 +1476,10 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_state(dev); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; - intel_opregion_notify_adapter(dev, opregion_target_state); + intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_forcewake_reset(dev, false); - intel_opregion_fini(dev); + intel_uncore_forcewake_reset(dev_priv, false); + intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -653,7 +1497,7 @@ out: static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) { - struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(drm_dev); bool fw_csr; int ret; @@ -715,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) { int error; - if (!dev || !dev->dev_private) { + if (!dev) { DRM_ERROR("dev: %p\n", dev); DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; @@ -737,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) static int i915_drm_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; disable_rpm_wakeref_asserts(dev_priv); @@ -753,7 +1597,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); - intel_opregion_setup(dev); + intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); @@ -771,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); - atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); @@ -781,7 +1625,7 @@ static int i915_drm_resume(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); @@ -798,7 +1642,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); - intel_opregion_init(dev); + intel_opregion_register(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); @@ -806,7 +1650,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); drm_kms_helper_poll_enable(dev); @@ -817,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev) static int i915_drm_resume_early(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; /* @@ -874,9 +1718,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev, true); + intel_uncore_early_sanitize(dev_priv, true); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { if (!dev_priv->suspended_to_idle) gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); @@ -884,7 +1728,7 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); if (IS_BROXTON(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) @@ -927,14 +1771,14 @@ int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -int i915_reset(struct drm_device *dev) +int i915_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = &dev_priv->drm; struct i915_gpu_error *error = &dev_priv->gpu_error; unsigned reset_counter; int ret; - intel_reset_gt_powersave(dev); + intel_reset_gt_powersave(dev_priv); mutex_lock(&dev->struct_mutex); @@ -948,24 +1792,11 @@ int i915_reset(struct drm_device *dev) goto error; } - i915_gem_reset(dev); - - ret = intel_gpu_reset(dev, ALL_ENGINES); - - /* Also reset the gpu hangman. */ - if (error->stop_rings != 0) { - DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); - error->stop_rings = 0; - if (ret == -ENODEV) { - DRM_INFO("Reset not implemented, but ignoring " - "error for simulated gpu hangs\n"); - ret = 0; - } - } + pr_notice("drm/i915: Resetting chip after gpu hang\n"); - if (i915_stop_ring_allow_warn(dev_priv)) - pr_notice("drm/i915: Resetting chip after gpu hang\n"); + i915_gem_reset(dev); + ret = intel_gpu_reset(dev_priv, ALL_ENGINES); if (ret) { if (ret != -ENODEV) DRM_ERROR("Failed to reset chip: %i\n", ret); @@ -1005,7 +1836,7 @@ int i915_reset(struct drm_device *dev) * of re-init after reset. */ if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); return 0; @@ -1015,51 +1846,12 @@ error: return ret; } -static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct intel_device_info *intel_info = - (struct intel_device_info *) ent->driver_data; - - if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { - DRM_INFO("This hardware requires preliminary hardware support.\n" - "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); - return -ENODEV; - } - - /* Only bind to function 0 of the device. Early generations - * used function 1 as a placeholder for multi-head. This causes - * us confusion instead, especially on the systems where both - * functions have the same PCI-ID! - */ - if (PCI_FUNC(pdev->devfn)) - return -ENODEV; - - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) - return -EPROBE_DEFER; - - return drm_get_pci_dev(pdev, ent, &driver); -} - -static void -i915_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_put_dev(dev); -} - static int i915_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - if (!drm_dev || !drm_dev->dev_private) { + if (!drm_dev) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } @@ -1072,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev) static int i915_pm_suspend_late(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; /* * We have a suspend ordering issue with the snd-hda driver also @@ -1091,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev) static int i915_pm_poweroff_late(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1101,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev) static int i915_pm_resume_early(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1111,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev) static int i915_pm_resume(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1119,6 +1911,49 @@ static int i915_pm_resume(struct device *dev) return i915_drm_resume(drm_dev); } +/* freeze: before creating the hibernation_image */ +static int i915_pm_freeze(struct device *dev) +{ + return i915_pm_suspend(dev); +} + +static int i915_pm_freeze_late(struct device *dev) +{ + int ret; + + ret = i915_pm_suspend_late(dev); + if (ret) + return ret; + + ret = i915_gem_freeze_late(dev_to_i915(dev)); + if (ret) + return ret; + + return 0; +} + +/* thaw: called after creating the hibernation image, but before turning off. */ +static int i915_pm_thaw_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_thaw(struct device *dev) +{ + return i915_pm_resume(dev); +} + +/* restore: called after loading the hibernation image. */ +static int i915_pm_restore_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_restore(struct device *dev) +{ + return i915_pm_resume(dev); +} + /* * Save all Gunit registers that may be lost after a D3 and a subsequent * S0i[R123] transition. The list of registers needing a save/restore is @@ -1318,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) u32 val; int err; -#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val &= ~VLV_GFX_CLK_FORCE_ON_BIT; if (force_on) @@ -1329,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) if (!force_on) return 0; - err = wait_for(COND, 20); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_SURVIVABILITY_REG, + VLV_GFX_CLK_STATUS_BIT, + VLV_GFX_CLK_STATUS_BIT, + 20); if (err) DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", I915_READ(VLV_GTLC_SURVIVABILITY_REG)); return err; -#undef COND } static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) @@ -1350,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) I915_WRITE(VLV_GTLC_WAKE_CTRL, val); POSTING_READ(VLV_GTLC_WAKE_CTRL); -#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ - allow) - err = wait_for(COND, 1); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_PW_STATUS, + VLV_GTLC_ALLOWWAKEACK, + allow, + 1); if (err) DRM_ERROR("timeout disabling GT waking\n"); + return err; -#undef COND } static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, @@ -1368,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; val = wait_for_on ? mask : 0; -#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) - if (COND) + if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) return 0; DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", @@ -1380,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, * RC6 transitioning can be delayed up to 2 msec (see * valleyview_enable_rps), use 3 msec for safety. */ - err = wait_for(COND, 3); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_PW_STATUS, mask, val, + 3); if (err) DRM_ERROR("timeout waiting for GT wells to go %s\n", onoff(wait_for_on)); return err; -#undef COND } static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) @@ -1443,7 +2281,7 @@ err1: static int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int err; int ret; @@ -1479,10 +2317,10 @@ static int intel_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) + if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1517,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device) i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); intel_runtime_pm_disable_interrupts(dev_priv); ret = 0; @@ -1543,7 +2378,7 @@ static int intel_runtime_suspend(struct device *device) return ret; } - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev_priv, false); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@ -1557,14 +2392,14 @@ static int intel_runtime_suspend(struct device *device) * FIXME: We really should find a document that references the arguments * used below! */ - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { /* * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop * being detected, and the call we do at intel_runtime_resume() * won't be able to restore them. Since PCI_D3hot matches the * actual specification and appears to be working, use it. */ - intel_opregion_notify_adapter(dev, PCI_D3hot); + intel_opregion_notify_adapter(dev_priv, PCI_D3hot); } else { /* * current versions of firmware which depend on this opregion @@ -1573,11 +2408,14 @@ static int intel_runtime_suspend(struct device *device) * to distinguish it from notifications that might be sent via * the suspend path. */ - intel_opregion_notify_adapter(dev, PCI_D1); + intel_opregion_notify_adapter(dev_priv, PCI_D1); } assert_forcewakes_inactive(dev_priv); + if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) + intel_hpd_poll_init(dev_priv); + DRM_DEBUG_KMS("Device suspended\n"); return 0; } @@ -1586,7 +2424,7 @@ static int intel_runtime_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1597,7 +2435,7 @@ static int intel_runtime_resume(struct device *device) WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); disable_rpm_wakeref_asserts(dev_priv); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); dev_priv->pm.suspended = false; if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); @@ -1624,7 +2462,7 @@ static int intel_runtime_resume(struct device *device) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); - gen6_update_ring_freq(dev); + gen6_update_ring_freq(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1636,8 +2474,6 @@ static int intel_runtime_resume(struct device *device) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); - intel_enable_gt_powersave(dev); - enable_rpm_wakeref_asserts(dev_priv); if (ret) @@ -1648,7 +2484,7 @@ static int intel_runtime_resume(struct device *device) return ret; } -static const struct dev_pm_ops i915_pm_ops = { +const struct dev_pm_ops i915_pm_ops = { /* * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, * PMSG_RESUME] @@ -1673,14 +2509,14 @@ static const struct dev_pm_ops i915_pm_ops = { * @restore, @restore_early : called after rebooting and restoring the * hibernation image [PMSG_RESTORE] */ - .freeze = i915_pm_suspend, - .freeze_late = i915_pm_suspend_late, - .thaw_early = i915_pm_resume_early, - .thaw = i915_pm_resume, + .freeze = i915_pm_freeze, + .freeze_late = i915_pm_freeze_late, + .thaw_early = i915_pm_thaw_early, + .thaw = i915_pm_thaw, .poweroff = i915_pm_suspend, .poweroff_late = i915_pm_poweroff_late, - .restore_early = i915_pm_resume_early, - .restore = i915_pm_resume, + .restore_early = i915_pm_restore_early, + .restore = i915_pm_restore, /* S0ix (via runtime suspend) event handlers */ .runtime_suspend = intel_runtime_suspend, @@ -1707,6 +2543,68 @@ static const struct file_operations i915_driver_fops = { .llseek = noop_llseek, }; +static int +i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return -ENODEV; +} + +static const struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), +}; + static struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. @@ -1714,18 +2612,12 @@ static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, - .load = i915_driver_load, - .unload = i915_driver_unload, .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, .set_busid = drm_pci_set_busid, -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = i915_debugfs_init, - .debugfs_cleanup = i915_debugfs_cleanup, -#endif .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, @@ -1738,6 +2630,7 @@ static struct drm_driver driver = { .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = drm_gem_dumb_destroy, .ioctls = i915_ioctls, + .num_ioctls = ARRAY_SIZE(i915_ioctls), .fops = &i915_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -1746,56 +2639,3 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; - -static struct pci_driver i915_pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, - .probe = i915_pci_probe, - .remove = i915_pci_remove, - .driver.pm = &i915_pm_ops, -}; - -static int __init i915_init(void) -{ - driver.num_ioctls = i915_max_ioctl; - - /* - * Enable KMS by default, unless explicitly overriden by - * either the i915.modeset prarameter or by the - * vga_text_mode_force boot option. - */ - - if (i915.modeset == 0) - driver.driver_features &= ~DRIVER_MODESET; - - if (vgacon_text_force() && i915.modeset == -1) - driver.driver_features &= ~DRIVER_MODESET; - - if (!(driver.driver_features & DRIVER_MODESET)) { - /* Silently fail loading to not upset userspace. */ - DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); - return 0; - } - - if (i915.nuclear_pageflip) - driver.driver_features |= DRIVER_ATOMIC; - - return drm_pci_init(&driver, &i915_pci_driver); -} - -static void __exit i915_exit(void) -{ - if (!(driver.driver_features & DRIVER_MODESET)) - return; /* Never loaded a driver. */ - - drm_pci_exit(&driver, &i915_pci_driver); -} - -module_init(i915_init); -module_exit(i915_exit); - -MODULE_AUTHOR("Tungsten Graphics, Inc."); -MODULE_AUTHOR("Intel Corporation"); - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc3f2e6842e7..915a3d0acff3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -47,6 +47,7 @@ #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_gem.h> +#include <drm/drm_auth.h> #include "i915_params.h" #include "i915_reg.h" @@ -61,12 +62,14 @@ #include "i915_gem_gtt.h" #include "i915_gem_render_state.h" +#include "intel_gvt.h" + /* General customization: */ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160425" +#define DRIVER_DATE "20160711" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -281,6 +284,9 @@ struct i915_hotplug { u32 short_port_mask; struct work_struct dig_port_work; + struct work_struct poll_init_work; + bool poll_enabled; + /* * if we get a HPD irq from DP and a HPD irq from non-DP * the non-DP HPD could block the workqueue on a mode config @@ -317,21 +323,36 @@ struct i915_hotplug { for_each_if ((__ports_mask) & (1 << (__port))) #define for_each_crtc(dev, crtc) \ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) #define for_each_intel_plane(dev, intel_plane) \ list_for_each_entry(intel_plane, \ - &dev->mode_config.plane_list, \ + &(dev)->mode_config.plane_list, \ base.head) +#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ + list_for_each_entry(intel_plane, \ + &(dev)->mode_config.plane_list, \ + base.head) \ + for_each_if ((plane_mask) & \ + (1 << drm_plane_index(&intel_plane->base))) + #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ &(dev)->mode_config.plane_list, \ base.head) \ for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) -#define for_each_intel_crtc(dev, intel_crtc) \ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_crtc(dev, intel_crtc) \ + list_for_each_entry(intel_crtc, \ + &(dev)->mode_config.crtc_list, \ + base.head) + +#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ + list_for_each_entry(intel_crtc, \ + &(dev)->mode_config.crtc_list, \ + base.head) \ + for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ @@ -340,7 +361,7 @@ struct i915_hotplug { #define for_each_intel_connector(dev, intel_connector) \ list_for_each_entry(intel_connector, \ - &dev->mode_config.connector_list, \ + &(dev)->mode_config.connector_list, \ base.head) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ @@ -462,6 +483,7 @@ struct drm_i915_error_state { struct timeval time; char error_msg[128]; + bool simulated; int iommu; u32 reset_count; u32 suspend_count; @@ -493,6 +515,7 @@ struct drm_i915_error_state { bool valid; /* Software tracked state */ bool waiting; + int num_waiters; int hangcheck_score; enum intel_ring_hangcheck_action hangcheck_action; int num_requests; @@ -538,6 +561,12 @@ struct drm_i915_error_state { u32 tail; } *requests; + struct drm_i915_error_waiter { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 seqno; + } *waiters; + struct { u32 gfx_mode; union { @@ -588,6 +617,7 @@ struct drm_i915_display_funcs { struct intel_crtc_state *newstate); void (*initial_watermarks)(struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_crtc_state *cstate); + int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -612,7 +642,7 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*hpd_irq_setup)(struct drm_device *dev); + void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -735,6 +765,7 @@ struct intel_csr { func(is_valleyview) sep \ func(is_cherryview) sep \ func(is_haswell) sep \ + func(is_broadwell) sep \ func(is_skylake) sep \ func(is_broxton) sep \ func(is_kabylake) sep \ @@ -749,7 +780,8 @@ struct intel_csr { func(has_llc) sep \ func(has_snoop) sep \ func(has_ddi) sep \ - func(has_fpga_dbg) + func(has_fpga_dbg) sep \ + func(has_pooled_eu) #define DEFINE_FLAG(name) u8 name:1 #define SEP_SEMICOLON ; @@ -757,9 +789,10 @@ struct intel_csr { struct intel_device_info { u32 display_mmio_offset; u16 device_id; - u8 num_pipes:3; + u8 num_pipes; u8 num_sprites[I915_MAX_PIPES]; u8 gen; + u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ @@ -774,6 +807,7 @@ struct intel_device_info { u8 subslice_per_slice; u8 eu_total; u8 eu_per_subslice; + u8 min_eu_in_pool; /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ u8 subslice_7eu[3]; u8 has_slice_pg:1; @@ -821,9 +855,8 @@ struct i915_ctx_hang_stats { /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_HANDLE 0 -#define CONTEXT_NO_ZEROMAP (1<<0) /** - * struct intel_context - as the name implies, represents a context. + * struct i915_gem_context - as the name implies, represents a context. * @ref: reference count. * @user_handle: userspace tracking identity for this context. * @remap_slice: l3 row remapping information. @@ -841,33 +874,40 @@ struct i915_ctx_hang_stats { * Contexts are memory images used by the hardware to store copies of their * internal state. */ -struct intel_context { +struct i915_gem_context { struct kref ref; - int user_handle; - uint8_t remap_slice; struct drm_i915_private *i915; - int flags; struct drm_i915_file_private *file_priv; - struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; - /* Legacy ring buffer submission */ - struct { - struct drm_i915_gem_object *rcs_state; - bool initialized; - } legacy_hw_ctx; + struct i915_ctx_hang_stats hang_stats; - /* Execlists */ - struct { + /* Unique identifier for this context, used by the hw for tracking */ + unsigned long flags; +#define CONTEXT_NO_ZEROMAP BIT(0) +#define CONTEXT_NO_ERROR_CAPTURE BIT(1) + unsigned hw_id; + u32 user_handle; + + u32 ggtt_alignment; + + struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; - int pin_count; struct i915_vma *lrc_vma; - u64 lrc_desc; uint32_t *lrc_reg_state; + u64 lrc_desc; + int pin_count; + bool initialised; } engine[I915_NUM_ENGINES]; + u32 ring_size; + u32 desc_template; + struct atomic_notifier_head status_notifier; + bool execlists_force_single_submission; struct list_head link; + + u8 remap_slice; }; enum fb_op_origin { @@ -1116,6 +1156,8 @@ struct intel_gen6_power_mgmt { bool interrupts_enabled; u32 pm_iir; + u32 pm_intr_keep; + /* Frequencies are stored in potentially platform dependent multiples. * In other words, *_freq needs to be multiplied by X to be interesting. * Soft limits are those which are used for the dynamic reclocking done @@ -1283,37 +1325,11 @@ struct i915_gem_mm { struct list_head fence_list; /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to - * fire periodically while the ring is running. When it - * fires, go retire requests. - */ - struct delayed_work retire_work; - - /** - * When we detect an idle GPU, we want to turn on - * powersaving features. So once we see that there - * are no more requests outstanding and no more - * arrive within a small period of time, we fire - * off the idle_work. - */ - struct delayed_work idle_work; - - /** * Are we in a non-interruptible section of code like * modesetting? */ bool interruptible; - /** - * Is the GPU currently considered idle, or busy executing userspace - * requests? Whilst idle, we attempt to power down the hardware and - * display clocks. In order to reduce the effect on performance, there - * is a slight delay before we do so. - */ - bool busy; - /* the indicator for dispatch video commands on two BSD rings */ unsigned int bsd_ring_dispatch_index; @@ -1350,7 +1366,6 @@ struct i915_gpu_error { /* Hang gpu twice in this window and your context gets banned */ #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) - struct workqueue_struct *hangcheck_wq; struct delayed_work hangcheck_work; /* For reset and error_state handling. */ @@ -1387,20 +1402,19 @@ struct i915_gpu_error { #define I915_WEDGED (1 << 31) /** + * Waitqueue to signal when a hang is detected. Used to for waiters + * to release the struct_mutex for the reset to procede. + */ + wait_queue_head_t wait_queue; + + /** * Waitqueue to signal when the reset has completed. Used by clients * that wait for dev_priv->mm.wedged to settle. */ wait_queue_head_t reset_queue; - /* Userspace knobs for gpu hang simulation; - * combines both a ring mask, and extra flags - */ - u32 stop_rings; -#define I915_STOP_RING_ALLOW_BAN (1 << 31) -#define I915_STOP_RING_ALLOW_WARN (1 << 30) - /* For missed irq/seqno simulation. */ - unsigned int test_irq_rings; + unsigned long test_irq_rings; }; enum modeset_restore { @@ -1489,6 +1503,7 @@ struct intel_vbt_data { bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ + enum intel_backlight_type type; } backlight; /* MIPI DSI */ @@ -1581,7 +1596,7 @@ struct skl_ddb_allocation { }; struct skl_wm_values { - bool dirty[I915_MAX_PIPES]; + unsigned dirty_pipes; struct skl_ddb_allocation ddb; uint32_t wm_linetime[I915_MAX_PIPES]; uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; @@ -1698,7 +1713,7 @@ struct i915_execbuffer_params { uint64_t batch_obj_vm_offset; struct intel_engine_cs *engine; struct drm_i915_gem_object *batch_obj; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct drm_i915_gem_request *request; }; @@ -1710,7 +1725,8 @@ struct intel_wm_config { }; struct drm_i915_private { - struct drm_device *dev; + struct drm_device drm; + struct kmem_cache *objects; struct kmem_cache *vmas; struct kmem_cache *requests; @@ -1725,6 +1741,8 @@ struct drm_i915_private { struct i915_virtual_gpu vgpu; + struct intel_gvt gvt; + struct intel_guc guc; struct intel_csr csr; @@ -1748,6 +1766,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; + struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; @@ -1803,13 +1822,17 @@ struct drm_i915_private { int num_fence_regs; /* 8 on pre-965, 16 otherwise */ unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int skl_boot_cdclk; + unsigned int skl_preferred_vco_freq; unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; + struct { + unsigned int vco, ref; + } cdclk_pll; + /** * wq - Driver workqueue for GEM. * @@ -1839,6 +1862,13 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; + /* The hw wants to have a stable context identifier for the lifetime + * of the context (for OA, PASID, faults, etc). This is limited + * in execlists to 21 bits. + */ + struct ida context_hw_ida; +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ + /* Kernel Modesetting */ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -1951,9 +1981,6 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; - /* Committed wm config */ - struct intel_wm_config config; - /* * The skl_wm_values structure is a bit too big for stack * allocation, so we keep the staging struct where we store @@ -1976,6 +2003,13 @@ struct drm_i915_private { * cstate->wm.need_postvbl_update. */ struct mutex wm_mutex; + + /* + * Set during HW readout of watermarks/DDB. Some platforms + * need to know when we're still using BIOS-provided values + * (which we don't fully trust). + */ + bool distrust_bios_wm; } wm; struct i915_runtime_pm pm; @@ -1988,9 +2022,35 @@ struct drm_i915_private { int (*init_engines)(struct drm_device *dev); void (*cleanup_engine)(struct intel_engine_cs *engine); void (*stop_engine)(struct intel_engine_cs *engine); - } gt; - struct intel_context *kernel_context; + /** + * Is the GPU currently considered idle, or busy executing + * userspace requests? Whilst idle, we allow runtime power + * management to power down the hardware and display clocks. + * In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + unsigned int active_engines; + bool awake; + + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + + /** + * When we detect an idle GPU, we want to turn on + * powersaving features. So once we see that there + * are no more requests outstanding and no more + * arrive within a small period of time, we fire + * off the idle_work. + */ + struct delayed_work idle_work; + } gt; /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2005,7 +2065,7 @@ struct drm_i915_private { static inline struct drm_i915_private *to_i915(const struct drm_device *dev) { - return dev->dev_private; + return container_of(dev, struct drm_i915_private, drm); } static inline struct drm_i915_private *dev_to_i915(struct device *dev) @@ -2176,6 +2236,7 @@ struct drm_i915_gem_object { unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + unsigned int has_wc_mmap; unsigned int pin_display; struct sg_table *pages; @@ -2228,9 +2289,81 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -void i915_gem_track_fb(struct drm_i915_gem_object *old, - struct drm_i915_gem_object *new, - unsigned frontbuffer_bits); +static inline bool +i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; +} + +/* + * Optimised SGL iterator for GEM objects + */ +static __always_inline struct sgt_iter { + struct scatterlist *sgp; + union { + unsigned long pfn; + dma_addr_t dma; + }; + unsigned int curr; + unsigned int max; +} __sgt_iter(struct scatterlist *sgl, bool dma) { + struct sgt_iter s = { .sgp = sgl }; + + if (s.sgp) { + s.max = s.curr = s.sgp->offset; + s.max += s.sgp->length; + if (dma) + s.dma = sg_dma_address(s.sgp); + else + s.pfn = page_to_pfn(sg_page(s.sgp)); + } + + return s; +} + +/** + * __sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * If the entry is the last, return NULL; otherwise, step to the next + * element in the array (@sg@+1). If that's a chain pointer, follow it; + * otherwise just return the pointer to the current element. + **/ +static inline struct scatterlist *__sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + return sg_is_last(sg) ? NULL : + likely(!sg_is_chain(++sg)) ? sg : + sg_chain_ptr(sg); +} + +/** + * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table + * @__dmap: DMA address (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_dma(__dmap, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ + ((__dmap) = (__iter).dma + (__iter).curr); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) + +/** + * for_each_sgt_page - iterate over the pages of the given sg_table + * @__pp: page pointer (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_page(__pp, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ + ((__pp) = (__iter).pfn == 0 ? NULL : \ + pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) /** * Request queue structure. @@ -2252,7 +2385,7 @@ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct drm_i915_private *i915; struct intel_engine_cs *engine; - unsigned reset_counter; + struct intel_signal_node signaling; /** GEM sequence number associated with the previous request, * when the HWS breadcrumb is equal to this the GPU is processing @@ -2279,6 +2412,9 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + /** * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a @@ -2289,9 +2425,20 @@ struct drm_i915_gem_request { * i915_gem_request_free() will then decrement the refcount on the * context. */ - struct intel_context *ctx; + struct i915_gem_context *ctx; struct intel_ringbuffer *ringbuf; + /** + * Context related to the previous request. + * As the contexts are accessed by the hardware until the switch is + * completed to a new context, the hardware may still be writing + * to the context object after the breadcrumb is visible. We must + * not unpin/unbind/prune that object whilst still active and so + * we keep the previous context pinned until the following (this) + * request is retired. + */ + struct i915_gem_context *previous_context; + /** Batch buffer related to this request if any (used for error state dump only) */ struct drm_i915_gem_object *batch_obj; @@ -2328,11 +2475,13 @@ struct drm_i915_gem_request { /** Execlists no. of times this request has been sent to the ELSP */ int elsp_submitted; + /** Execlists context hardware id. */ + unsigned ctx_hw_id; }; struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx); + struct i915_gem_context *ctx); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file); @@ -2360,23 +2509,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { - WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } -static inline void -i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) -{ - struct drm_device *dev; - - if (!req) - return; - - dev = req->engine->dev; - if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); -} - static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, struct drm_i915_gem_request *src) { @@ -2504,9 +2639,29 @@ struct drm_i915_cmd_table { #define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) -#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define REVID_FOREVER 0xff +#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) + +#define GEN_FOREVER (0) +/* + * Returns true if Gen is in inclusive range [Start, End]. + * + * Use GEN_FOREVER for unbound start and or end. + */ +#define IS_GEN(p, s, e) ({ \ + unsigned int __s = (s), __e = (e); \ + BUILD_BUG_ON(!__builtin_constant_p(s)); \ + BUILD_BUG_ON(!__builtin_constant_p(e)); \ + if ((__s) != GEN_FOREVER) \ + __s = (s) - 1; \ + if ((__e) == GEN_FOREVER) \ + __e = BITS_PER_LONG - 1; \ + else \ + __e = (e) - 1; \ + !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \ +}) + /* * Return true if revision is in range [since,until] inclusive. * @@ -2539,7 +2694,7 @@ struct drm_i915_cmd_table { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) +#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) @@ -2591,6 +2746,8 @@ struct drm_i915_cmd_table { #define SKL_REVID_D0 0x3 #define SKL_REVID_E0 0x4 #define SKL_REVID_F0 0x5 +#define SKL_REVID_G0 0x6 +#define SKL_REVID_H0 0x7 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) @@ -2616,29 +2773,34 @@ struct drm_i915_cmd_table { * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) - -#define RENDER_RING (1<<RCS) -#define BSD_RING (1<<VCS) -#define BLT_RING (1<<BCS) -#define VEBOX_RING (1<<VECS) -#define BSD2_RING (1<<VCS2) -#define ALL_ENGINES (~0) - -#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) -#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) -#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) -#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) +#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1))) +#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2))) +#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3))) +#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4))) +#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5))) +#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6))) +#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7))) +#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8))) + +#define ENGINE_MASK(id) BIT(id) +#define RENDER_RING ENGINE_MASK(RCS) +#define BSD_RING ENGINE_MASK(VCS) +#define BLT_RING ENGINE_MASK(BCS) +#define VEBOX_RING ENGINE_MASK(VECS) +#define BSD2_RING ENGINE_MASK(VCS2) +#define ALL_ENGINES (~0) + +#define HAS_ENGINE(dev_priv, id) \ + (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) + +#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) +#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) +#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) +#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) + #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) -#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) +#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ HAS_EDRAM(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) @@ -2656,9 +2818,10 @@ struct drm_i915_cmd_table { #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) /* WaRsDisableCoarsePowerGating:skl,bxt */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ - IS_SKL_GT3(dev) || \ - IS_SKL_GT4(dev)) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts @@ -2696,12 +2859,18 @@ struct drm_i915_cmd_table { IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ IS_KABYLAKE(dev) || IS_BROXTON(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) +#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) -#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) -#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +/* + * For now, anything with a GuC requires uCode loading, and then supports + * command submission once loaded. But these are logically independent + * properties, so we have separate macros to test them. + */ +#define HAS_GUC(dev) (IS_GEN9(dev)) +#define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) +#define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ INTEL_INFO(dev)->gen >= 8) @@ -2710,6 +2879,8 @@ struct drm_i915_cmd_table { !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ !IS_BROXTON(dev)) +#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) + #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 @@ -2746,13 +2917,22 @@ struct drm_i915_cmd_table { #include "i915_trace.h" -extern const struct drm_ioctl_desc i915_ioctls[]; -extern int i915_max_ioctl; +static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) +{ +#ifdef CONFIG_INTEL_IOMMU + if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) + return true; +#endif + return false; +} extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); -/* i915_dma.c */ +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt); + +/* i915_drv.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, const char *fmt, ...); @@ -2760,21 +2940,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #define i915_report_error(dev_priv, fmt, ...) \ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) -extern int i915_driver_load(struct drm_device *, unsigned long flags); -extern int i915_driver_unload(struct drm_device *); -extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); -extern void i915_driver_lastclose(struct drm_device * dev); -extern void i915_driver_preclose(struct drm_device *dev, - struct drm_file *file); -extern void i915_driver_postclose(struct drm_device *dev, - struct drm_file *file); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_device *dev); -extern int i915_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); +extern int i915_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2784,30 +2956,51 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); /* intel_hotplug.c */ -void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); +bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); +void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_device *dev); +static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) +{ + unsigned long delay; + + if (unlikely(!i915.enable_hangcheck)) + return; + + /* Don't continually defer the hangcheck so that it is always run at + * least once after work has been scheduled on any ring. Otherwise, + * we will ignore a hung ring if a second ring is kept busy. + */ + + delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); + queue_delayed_work(system_long_wq, + &dev_priv->gpu_error.hangcheck_work, delay); +} + __printf(3, 4) -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev, +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake); -extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -extern void intel_uncore_fini(struct drm_device *dev); -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +extern void intel_uncore_fini(struct drm_i915_private *dev_priv); +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -2823,9 +3016,26 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -static inline bool intel_vgpu_active(struct drm_device *dev) + +int intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms); +int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms); + +static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->gvt.initialized; +} + +static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) { - return to_i915(dev)->vgpu.active; + return dev_priv->vgpu.active; } void @@ -2882,7 +3092,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) ibx_display_interrupt_update(dev_priv, bits, 0); } - /* i915_gem.c */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -2921,7 +3130,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_init_userptr(struct drm_device *dev); +void i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, @@ -2931,11 +3140,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); +int i915_gem_freeze_late(struct drm_i915_private *dev_priv); + void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size); struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); @@ -2990,6 +3201,23 @@ static inline int __sg_page_count(struct scatterlist *sg) struct page * i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); +static inline dma_addr_t +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n) +{ + if (n < obj->get_page.last) { + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + } + + while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { + obj->get_page.last += __sg_page_count(obj->get_page.sg++); + if (unlikely(sg_is_chain(obj->get_page.sg))) + obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); + } + + return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT); +} + static inline struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { @@ -3066,6 +3294,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); + +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Returns true if seq1 is later than seq2. */ @@ -3075,31 +3308,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, - bool lazy_coherency) +static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req) { - if (!lazy_coherency && req->engine->irq_seqno_barrier) - req->engine->irq_seqno_barrier(req->engine); - return i915_seqno_passed(req->engine->get_seqno(req->engine), + return i915_seqno_passed(intel_engine_get_seqno(req->engine), req->previous_seqno); } -static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, - bool lazy_coherency) +static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req) { - if (!lazy_coherency && req->engine->irq_seqno_barrier) - req->engine->irq_seqno_barrier(req->engine); - return i915_seqno_passed(req->engine->get_seqno(req->engine), + return i915_seqno_passed(intel_engine_get_seqno(req->engine), req->seqno); } -int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +bool __i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us); +static inline bool i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us) +{ + return (i915_gem_request_started(request) && + __i915_spin_request(request, state, timeout_us)); +} + +int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); -bool i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) @@ -3142,27 +3378,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; } -static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) -{ - return dev_priv->gpu_error.stop_rings == 0 || - dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; -} - -static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) -{ - return dev_priv->gpu_error.stop_rings == 0 || - dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; -} - void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev); -int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, struct drm_i915_gem_object *batch_obj, @@ -3227,8 +3450,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm); -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm); struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); @@ -3263,14 +3484,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } -static inline unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - return i915_gem_obj_size(obj, &ggtt->base); -} +unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, @@ -3284,12 +3499,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, alignment, flags | PIN_GLOBAL); } -static inline int -i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) -{ - return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); -} - void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); static inline void @@ -3313,28 +3522,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -int i915_gem_context_enable(struct drm_i915_gem_request *req); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); struct drm_i915_gem_object * i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); -static inline void i915_gem_context_reference(struct intel_context *ctx) +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev); + +static inline struct i915_gem_context * +i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_gem_context *ctx; + + lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); + + ctx = idr_find(&file_priv->context_idr, id); + if (!ctx) + return ERR_PTR(-ENOENT); + + return ctx; +} + +static inline void i915_gem_context_reference(struct i915_gem_context *ctx) { kref_get(&ctx->ref); } -static inline void i915_gem_context_unreference(struct intel_context *ctx) +static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) { + lockdep_assert_held(&ctx->i915->drm.struct_mutex); kref_put(&ctx->ref, i915_gem_context_free); } -static inline bool i915_gem_context_is_default(const struct intel_context *c) +static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) { return c->user_handle == DEFAULT_CONTEXT_HANDLE; } @@ -3347,6 +3572,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -3361,9 +3588,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_device *dev) +static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3404,7 +3631,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj->tiling_mode != I915_TILING_NONE; @@ -3418,12 +3645,14 @@ int i915_verify_lists(struct drm_device *dev); #endif /* i915_debugfs.c */ -int i915_debugfs_init(struct drm_minor *minor); -void i915_debugfs_cleanup(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS +int i915_debugfs_register(struct drm_i915_private *dev_priv); +void i915_debugfs_unregister(struct drm_i915_private *dev_priv); int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_device *dev); #else +static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;} +static inline void i915_debugfs_unregister(struct drm_i915_private *) {} static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; } static inline void intel_display_crc_init(struct drm_device *dev) {} @@ -3442,18 +3671,19 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_destroy_error_state(struct drm_device *dev); -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(void); +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); bool i915_needs_cmd_parser(struct intel_engine_cs *engine); @@ -3502,31 +3732,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, /* intel_opregion.c */ #ifdef CONFIG_ACPI -extern int intel_opregion_setup(struct drm_device *dev); -extern void intel_opregion_init(struct drm_device *dev); -extern void intel_opregion_fini(struct drm_device *dev); -extern void intel_opregion_asle_intr(struct drm_device *dev); +extern int intel_opregion_setup(struct drm_i915_private *dev_priv); +extern void intel_opregion_register(struct drm_i915_private *dev_priv); +extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); +extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); -extern int intel_opregion_notify_adapter(struct drm_device *dev, +extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state); -extern int intel_opregion_get_panel_type(struct drm_device *dev); +extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); #else -static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } -static inline void intel_opregion_init(struct drm_device *dev) { return; } -static inline void intel_opregion_fini(struct drm_device *dev) { return; } -static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } +static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } +static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } +static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +{ +} static inline int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { return 0; } static inline int -intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) { return 0; } -static inline int intel_opregion_get_panel_type(struct drm_device *dev) +static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) { return -ENODEV; } @@ -3541,36 +3773,45 @@ static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } #endif /* CONFIG_ACPI */ +/* intel_device_info.c */ +static inline struct intel_device_info * +mkwrite_device_info(struct drm_i915_private *dev_priv) +{ + return (struct intel_device_info *)&dev_priv->info; +} + +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); +void intel_device_info_dump(struct drm_i915_private *dev_priv); + /* modesetting */ extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); -extern void intel_connector_unregister(struct intel_connector *); +extern int intel_connector_register(struct drm_connector *); +extern void intel_connector_unregister(struct drm_connector *); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void intel_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); -extern void intel_detect_pch(struct drm_device *dev); -extern int intel_enable_rc6(const struct drm_device *dev); -extern bool i915_semaphore_is_enabled(struct drm_device *dev); +extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ -extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); -extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); @@ -3599,6 +3840,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +/* intel_dpio_phy.c */ +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale); +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset); +void chv_phy_pre_pll_enable(struct intel_encoder *encoder); +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void chv_phy_release_cl2_override(struct intel_encoder *encoder); +void chv_phy_post_pll_disable(struct intel_encoder *encoder); + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph); +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void vlv_phy_reset_lanes(struct intel_encoder *encoder); + int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); @@ -3672,6 +3931,7 @@ __raw_write(64, q) */ #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) +#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) /* "Broadcast RGB" property */ @@ -3735,12 +3995,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) schedule_timeout_uninterruptible(remaining_jiffies); } } - -static inline void i915_trace_irq_get(struct intel_engine_cs *engine, - struct drm_i915_gem_request *req) +static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) { - if (engine->trace_irq_req == NULL && engine->irq_get(engine)) - i915_gem_request_assign(&engine->trace_irq_req, req); + struct intel_engine_cs *engine = req->engine; + + /* Before we do the heavier coherent read of the seqno, + * check the value (hopefully) in the CPU cacheline. + */ + if (i915_gem_request_completed(req)) + return true; + + /* Ensure our read of the seqno is coherent so that we + * do not "miss an interrupt" (i.e. if this is the last + * request and the seqno write from the GPU is not visible + * by the time the interrupt fires, we will see that the + * request is incomplete and go back to sleep awaiting + * another interrupt that will never come.) + * + * Strictly, we only need to do this once after an interrupt, + * but it is easier and safer to do it every time the waiter + * is woken. + */ + if (engine->irq_seqno_barrier && + READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && + cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { + struct task_struct *tsk; + + /* The ordering of irq_posted versus applying the barrier + * is crucial. The clearing of the current irq_posted must + * be visible before we perform the barrier operation, + * such that if a subsequent interrupt arrives, irq_posted + * is reasserted and our task rewoken (which causes us to + * do another __i915_request_irq_complete() immediately + * and reapply the barrier). Conversely, if the clear + * occurs after the barrier, then an interrupt that arrived + * whilst we waited on the barrier would not trigger a + * barrier on the next pass, and the read may not see the + * seqno update. + */ + engine->irq_seqno_barrier(engine); + + /* If we consume the irq, but we are no longer the bottom-half, + * the real bottom-half may not have serialised their own + * seqno check with the irq-barrier (i.e. may have inspected + * the seqno before we believe it coherent since they see + * irq_posted == false but we are still running). + */ + rcu_read_lock(); + tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + if (tsk && tsk != current) + /* Note that if the bottom-half is changed as we + * are sending the wake-up, the new bottom-half will + * be woken by whomever made the change. We only have + * to worry about when we steal the irq-posted for + * ourself. + */ + wake_up_process(tsk); + rcu_read_unlock(); + + if (i915_gem_request_completed(req)) + return true; + } + + /* We need to check whether any gpu reset happened in between + * the request being submitted and now. If a reset has occurred, + * the seqno will have been advance past ours and our request + * is complete. If we are in the process of handling a reset, + * the request is effectively complete as the rendering will + * be discarded, but we need to return in order to drop the + * struct_mutex. + */ + if (i915_reset_in_progress(&req->i915->gpu_error)) + return true; + + return false; } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aad26851cee3..11681501d7b1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return false; + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) return true; return obj->pin_display; } +static int +insert_mappable_node(struct drm_i915_private *i915, + struct drm_mm_node *node, u32 size) +{ + memset(node, 0, sizeof(*node)); + return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node, + size, 0, 0, 0, + i915->ggtt.mappable_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); +} + +static void +remove_mappable_node(struct drm_mm_node *node) +{ + drm_mm_remove_node(node); +} + /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) @@ -107,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) int i915_mutex_lock_interruptible(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = i915_gem_wait_for_error(&dev_priv->gpu_error); @@ -151,7 +172,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; @@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) @@ -218,7 +239,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; @@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, } drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); @@ -356,13 +377,13 @@ out: void *i915_gem_object_alloc(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); } void i915_gem_object_free(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); kmem_cache_free(dev_priv->objects, obj); } @@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ @@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file, /** * Creates a new mm object and returns a handle to it. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, @@ -484,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, *needs_clflush = 0; - if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) + if (WARN_ON(!i915_gem_object_has_struct_page(obj))) return -EINVAL; if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { @@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, return ret ? - EFAULT : 0; } +static inline unsigned long +slow_user_access(struct io_mapping *mapping, + uint64_t page_base, int page_offset, + char __user *user_data, + unsigned long length, bool pwrite) +{ + void __iomem *ioaddr; + void *vaddr; + uint64_t unwritten; + + ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE); + /* We can use the cpu mem copy function because this is X86. */ + vaddr = (void __force *)ioaddr + page_offset; + if (pwrite) + unwritten = __copy_from_user(vaddr, user_data, length); + else + unwritten = __copy_to_user(user_data, vaddr, length); + + io_mapping_unmap(ioaddr); + return unwritten; +} + +static int +i915_gem_gtt_pread(struct drm_device *dev, + struct drm_i915_gem_object *obj, uint64_t size, + uint64_t data_offset, uint64_t data_ptr) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_mm_node node; + char __user *user_data; + uint64_t remain; + uint64_t offset; + int ret; + + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); + if (ret) { + ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE); + if (ret) + goto out; + + ret = i915_gem_object_get_pages(obj); + if (ret) { + remove_mappable_node(&node); + goto out; + } + + i915_gem_object_pin_pages(obj); + } else { + node.start = i915_gem_obj_ggtt_offset(obj); + node.allocated = false; + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + } + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + goto out_unpin; + + user_data = u64_to_user_ptr(data_ptr); + remain = size; + offset = data_offset; + + mutex_unlock(&dev->struct_mutex); + if (likely(!i915.prefault_disable)) { + ret = fault_in_multipages_writeable(user_data, remain); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto out_unpin; + } + } + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + u32 page_base = node.start; + unsigned page_offset = offset_in_page(offset); + unsigned page_length = PAGE_SIZE - page_offset; + page_length = remain < page_length ? remain : page_length; + if (node.allocated) { + wmb(); + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, + I915_CACHE_NONE, 0); + wmb(); + } else { + page_base += offset & PAGE_MASK; + } + /* This is a slow read/write as it tries to read from + * and write to user memory which may result into page + * faults, and so we cannot perform this under struct_mutex. + */ + if (slow_user_access(ggtt->mappable, page_base, + page_offset, user_data, + page_length, false)) { + ret = -EFAULT; + break; + } + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + + mutex_lock(&dev->struct_mutex); + if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { + /* The user has modified the object whilst we tried + * reading from it, and we now have no idea what domain + * the pages should be in. As we have just been touching + * them directly, flush everything back to the GTT + * domain. + */ + ret = i915_gem_object_set_to_gtt_domain(obj, false); + } + +out_unpin: + if (node.allocated) { + wmb(); + ggtt->base.clear_range(&ggtt->base, + node.start, node.size, + true); + i915_gem_object_unpin_pages(obj); + remove_mappable_node(&node); + } else { + i915_gem_object_ggtt_unpin(obj); + } +out: + return ret; +} + static int i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; @@ -672,6 +835,9 @@ out: /** * Reads data from the object referenced by handle. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer * * On error, the contents of *data are undefined. */ @@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - /* prime objects have no backing filp to GEM pread/pwrite - * pages from. - */ - if (!obj->base.filp) { - ret = -EINVAL; - goto out; - } - trace_i915_gem_object_pread(obj, args->offset, args->size); ret = i915_gem_shmem_pread(dev, obj, args, file); + /* pread for non shmem backed objects */ + if (ret == -EFAULT || ret == -ENODEV) + ret = i915_gem_gtt_pread(dev, obj, args->size, + args->offset, args->data_ptr); + out: drm_gem_object_unreference(&obj->base); unlock: @@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping, /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. + * @dev: drm device pointer + * @obj: i915 gem object + * @args: pwrite arguments structure + * @file: drm file pointer */ static int -i915_gem_gtt_pwrite_fast(struct drm_device *dev, +i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - ssize_t remain; - loff_t offset, page_base; + struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_device *dev = obj->base.dev; + struct drm_mm_node node; + uint64_t remain, offset; char __user *user_data; - int page_offset, page_length, ret; + int ret; + bool hit_slow_path = false; + + if (obj->tiling_mode != I915_TILING_NONE) + return -EFAULT; ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); - if (ret) - goto out; + if (ret) { + ret = insert_mappable_node(i915, &node, PAGE_SIZE); + if (ret) + goto out; + + ret = i915_gem_object_get_pages(obj); + if (ret) { + remove_mappable_node(&node); + goto out; + } + + i915_gem_object_pin_pages(obj); + } else { + node.start = i915_gem_obj_ggtt_offset(obj); + node.allocated = false; + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + } ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; + intel_fb_obj_invalidate(obj, ORIGIN_GTT); + obj->dirty = true; user_data = u64_to_user_ptr(args->data_ptr); + offset = args->offset; remain = args->size; - - offset = i915_gem_obj_ggtt_offset(obj) + args->offset; - - intel_fb_obj_invalidate(obj, ORIGIN_GTT); - - while (remain > 0) { + while (remain) { /* Operation in this page * * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = offset & PAGE_MASK; - page_offset = offset_in_page(offset); - page_length = remain; - if ((page_offset + remain) > PAGE_SIZE) - page_length = PAGE_SIZE - page_offset; - + u32 page_base = node.start; + unsigned page_offset = offset_in_page(offset); + unsigned page_length = PAGE_SIZE - page_offset; + page_length = remain < page_length ? remain : page_length; + if (node.allocated) { + wmb(); /* flush the write before we modify the GGTT */ + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, I915_CACHE_NONE, 0); + wmb(); /* flush modifications to the GGTT (insert_page) */ + } else { + page_base += offset & PAGE_MASK; + } /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. + * If the object is non-shmem backed, we retry again with the + * path that handles page fault. */ if (fast_user_write(ggtt->mappable, page_base, page_offset, user_data, page_length)) { - ret = -EFAULT; - goto out_flush; + hit_slow_path = true; + mutex_unlock(&dev->struct_mutex); + if (slow_user_access(ggtt->mappable, + page_base, + page_offset, user_data, + page_length, true)) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto out_flush; + } + + mutex_lock(&dev->struct_mutex); } remain -= page_length; @@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, } out_flush: + if (hit_slow_path) { + if (ret == 0 && + (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { + /* The user has modified the object whilst we tried + * reading from it, and we now have no idea what domain + * the pages should be in. As we have just been touching + * them directly, flush everything back to the GTT + * domain. + */ + ret = i915_gem_object_set_to_gtt_domain(obj, false); + } + } + intel_fb_obj_flush(obj, false, ORIGIN_GTT); out_unpin: - i915_gem_object_ggtt_unpin(obj); + if (node.allocated) { + wmb(); + ggtt->base.clear_range(&ggtt->base, + node.start, node.size, + true); + i915_gem_object_unpin_pages(obj); + remove_mappable_node(&node); + } else { + i915_gem_object_ggtt_unpin(obj); + } out: return ret; } @@ -1006,7 +1230,7 @@ out: } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); else obj->cache_dirty = true; @@ -1016,6 +1240,9 @@ out: /** * Writes data to the object referenced by handle. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file * * On error, the contents of the buffer that were to be modified are undefined. */ @@ -1023,7 +1250,7 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_object *obj; int ret; @@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - /* prime objects have no backing filp to GEM pread/pwrite - * pages from. - */ - if (!obj->base.filp) { - ret = -EINVAL; - goto out; - } - trace_i915_gem_object_pwrite(obj, args->offset, args->size); ret = -EFAULT; @@ -1079,20 +1298,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->tiling_mode == I915_TILING_NONE && - obj->base.write_domain != I915_GEM_DOMAIN_CPU && + if (!i915_gem_object_has_struct_page(obj) || cpu_write_needs_clflush(obj)) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT || ret == -ENOSPC) { + if (ret == -EFAULT) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); - else + else if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(dev, obj, args, file); + else + ret = -ENODEV; } out: @@ -1123,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible) return 0; } -static void fake_irq(unsigned long data) -{ - wake_up_process((struct task_struct *)data); -} - -static bool missed_irq(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings); -} - static unsigned long local_clock_us(unsigned *cpu) { unsigned long t; @@ -1166,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu) return this_cpu != cpu; } -static int __i915_spin_request(struct drm_i915_gem_request *req, int state) +bool __i915_spin_request(const struct drm_i915_gem_request *req, + int state, unsigned long timeout_us) { - unsigned long timeout; unsigned cpu; /* When waiting for high frequency requests, e.g. during synchronous @@ -1181,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * takes to sleep on a request, on the order of a microsecond. */ - if (req->engine->irq_refcount) - return -EBUSY; - - /* Only spin if we know the GPU is processing this request */ - if (!i915_gem_request_started(req, true)) - return -EAGAIN; - - timeout = local_clock_us(&cpu) + 5; - while (!need_resched()) { - if (i915_gem_request_completed(req, true)) - return 0; + timeout_us += local_clock_us(&cpu); + do { + if (i915_gem_request_completed(req)) + return true; if (signal_pending_state(state, current)) break; - if (busywait_stop(timeout, cpu)) + if (busywait_stop(timeout_us, cpu)) break; cpu_relax_lowlatency(); - } + } while (!need_resched()); - if (i915_gem_request_completed(req, false)) - return 0; - - return -EAGAIN; + return false; } /** @@ -1213,6 +1412,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * @req: duh! * @interruptible: do an interruptible wait (normally yes) * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * @rps: RPS client * * Note: It is of utmost importance that the passed in seqno and reset_counter * values have been read by the caller in an smp safe manner. Where read-side @@ -1229,26 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req, s64 *timeout, struct intel_rps_client *rps) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const bool irq_test_in_progress = - ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - DEFINE_WAIT(wait); - unsigned long timeout_expire; + DEFINE_WAIT(reset); + struct intel_wait wait; + unsigned long timeout_remain; s64 before = 0; /* Only to silence a compiler warning. */ - int ret; + int ret = 0; - WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); + might_sleep(); if (list_empty(&req->list)) return 0; - if (i915_gem_request_completed(req, true)) + if (i915_gem_request_completed(req)) return 0; - timeout_expire = 0; + timeout_remain = MAX_SCHEDULE_TIMEOUT; if (timeout) { if (WARN_ON(*timeout < 0)) return -EINVAL; @@ -1256,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (*timeout == 0) return -ETIME; - timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + timeout_remain = nsecs_to_jiffies_timeout(*timeout); /* * Record current time in case interrupted by signal, or wedged. @@ -1264,75 +1460,76 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); } - if (INTEL_INFO(dev_priv)->gen >= 6) - gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); - trace_i915_gem_request_wait_begin(req); - /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req, state); - if (ret == 0) - goto out; - - if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { - ret = -ENODEV; - goto out; - } + /* This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we wait. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). Not all clients even want their results + * immediately and for them we should just let the GPU select its own + * frequency to maximise efficiency. To prevent a single client from + * forcing the clocks too high for the whole system, we only allow + * each client to waitboost once in a busy period. + */ + if (INTEL_INFO(req->i915)->gen >= 6) + gen6_rps_boost(req->i915, rps, req->emitted_jiffies); - for (;;) { - struct timer_list timer; + /* Optimistic spin for the next ~jiffie before touching IRQs */ + if (i915_spin_request(req, state, 5)) + goto complete; - prepare_to_wait(&engine->irq_queue, &wait, state); + set_current_state(state); + add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); - /* We need to check whether any gpu reset happened in between - * the request being submitted and now. If a reset has occurred, - * the request is effectively complete (we either are in the - * process of or have discarded the rendering and completely - * reset the GPU. The results of the request are lost and we - * are free to continue on with the original operation. + intel_wait_init(&wait, req->seqno); + if (intel_engine_add_wait(req->engine, &wait)) + /* In order to check that we haven't missed the interrupt + * as we enabled it, we need to kick ourselves to do a + * coherent check on the seqno before we sleep. */ - if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { - ret = 0; - break; - } - - if (i915_gem_request_completed(req, false)) { - ret = 0; - break; - } + goto wakeup; + for (;;) { if (signal_pending_state(state, current)) { ret = -ERESTARTSYS; break; } - if (timeout && time_after_eq(jiffies, timeout_expire)) { + timeout_remain = io_schedule_timeout(timeout_remain); + if (timeout_remain == 0) { ret = -ETIME; break; } - timer.function = NULL; - if (timeout || missed_irq(dev_priv, engine)) { - unsigned long expire; + if (intel_wait_complete(&wait)) + break; - setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); - expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire; - mod_timer(&timer, expire); - } + set_current_state(state); - io_schedule(); +wakeup: + /* Carefully check if the request is complete, giving time + * for the seqno to be visible following the interrupt. + * We also have to check in case we are kicked by the GPU + * reset in order to drop the struct_mutex. + */ + if (__i915_request_irq_complete(req)) + break; - if (timer.function) { - del_singleshot_timer_sync(&timer); - destroy_timer_on_stack(&timer); - } + /* Only spin if we know the GPU is processing this request */ + if (i915_spin_request(req, state, 2)) + break; } - if (!irq_test_in_progress) - engine->irq_put(engine); + remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); - finish_wait(&engine->irq_queue, &wait); - -out: + intel_engine_remove_wait(req->engine, &wait); + __set_current_state(TASK_RUNNING); +complete: trace_i915_gem_request_wait_end(req); if (timeout) { @@ -1351,6 +1548,22 @@ out: *timeout = 0; } + if (rps && req->seqno == req->engine->last_submitted_seqno) { + /* The GPU is now idle and this client has stalled. + * Since no other client has submitted a request in the + * meantime, assume that this client is the only one + * supplying work to the GPU but is unable to keep that + * work supplied because it is waiting. Since the GPU is + * then never kept fully busy, RPS autoclocking will + * keep the clocks relatively low, causing further delays. + * Compensate by giving the synchronous client credit for + * a waitboost next time. + */ + spin_lock(&req->i915->rps.client_lock); + list_del_init(&rps->link); + spin_unlock(&req->i915->rps.client_lock); + } + return ret; } @@ -1413,6 +1626,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) list_del_init(&request->list); i915_gem_request_remove_from_client(request); + if (request->previous_context) { + if (i915.enable_execlists) + intel_lr_context_unpin(request->previous_context, + request->engine); + } + + i915_gem_context_unreference(request->ctx); i915_gem_request_unreference(request); } @@ -1422,7 +1642,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; - lockdep_assert_held(&engine->dev->struct_mutex); + lockdep_assert_held(&engine->i915->drm.struct_mutex); if (list_empty(&req->list)) return; @@ -1440,6 +1660,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) /** * Waits for a request to be signaled, and cleans up the * request and object lists appropriately for that event. + * @req: request to wait on */ int i915_wait_request(struct drm_i915_gem_request *req) @@ -1450,14 +1671,14 @@ i915_wait_request(struct drm_i915_gem_request *req) interruptible = dev_priv->mm.interruptible; - BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex)); ret = __i915_wait_request(req, interruptible, NULL, NULL); if (ret) return ret; /* If the GPU hung, we want to keep the requests to find the guilty. */ - if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) + if (!i915_reset_in_progress(&dev_priv->gpu_error)) __i915_gem_request_retire__upto(req); return 0; @@ -1466,6 +1687,8 @@ i915_wait_request(struct drm_i915_gem_request *req) /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. + * @obj: i915 gem object + * @readonly: waiting for read access or write */ int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, @@ -1516,7 +1739,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, else if (obj->last_write_req == req) i915_gem_object_retire__write(obj); - if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) + if (!i915_reset_in_progress(&req->i915->gpu_error)) __i915_gem_request_retire__upto(req); } @@ -1529,7 +1752,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, bool readonly) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; int ret, i, n = 0; @@ -1580,9 +1803,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file) return &fpriv->rps; } +static enum fb_op_origin +write_origin(struct drm_i915_gem_object *obj, unsigned domain) +{ + return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ? + ORIGIN_GTT : ORIGIN_CPU; +} + /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, @@ -1633,9 +1866,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); if (write_domain != 0) - intel_fb_obj_invalidate(obj, - write_domain == I915_GEM_DOMAIN_GTT ? - ORIGIN_GTT : ORIGIN_CPU); + intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); unref: drm_gem_object_unreference(&obj->base); @@ -1646,6 +1877,9 @@ unlock: /** * Called when user space has done writes to this buffer + * @dev: drm device + * @data: ioctl data blob + * @file: drm file */ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, @@ -1676,8 +1910,11 @@ unlock: } /** - * Maps the contents of an object, returning the address it is mapped - * into. + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address + * it is mapped to. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file * * While the mapping holds a reference on the contents of the object, it doesn't * imply a ref on the object itself. @@ -1736,6 +1973,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, else addr = -ENOMEM; up_write(&mm->mmap_sem); + + /* This may race, but that's ok, it only gets set */ + WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true); } drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) @@ -1982,7 +2222,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) return size; /* Previous chips need a power-of-two fence region when tiling */ - if (INTEL_INFO(dev)->gen == 3) + if (IS_GEN3(dev)) gtt_size = 1024*1024; else gtt_size = 512*1024; @@ -1995,7 +2235,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object - * @obj: object to check + * @dev: drm device + * @size: object size + * @tiling_mode: tiling mode + * @fenced: is fenced alignemned required or not * * Return the required GTT alignment for an object, taking into account * potential fence register mapping. @@ -2021,7 +2264,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int ret; dev_priv->mm.shrinker_no_lock_stealing = true; @@ -2155,14 +2398,15 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; - mapping = file_inode(obj->base.filp)->i_mapping, + mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int ret; BUG_ON(obj->madv == __I915_MADV_PURGED); @@ -2184,9 +2428,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -2238,12 +2480,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int page_count, i; struct address_space *mapping; struct sg_table *st; struct scatterlist *sg; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ int ret; @@ -2271,7 +2513,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; @@ -2340,8 +2582,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) err_pages: sg_mark_end(sg); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - put_page(sg_page_iter_page(&sg_iter)); + for_each_sgt_page(page, sgt_iter, st) + put_page(page); sg_free_table(st); kfree(st); @@ -2369,7 +2611,7 @@ err_pages: int i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); const struct drm_i915_gem_object_ops *ops = obj->ops; int ret; @@ -2395,6 +2637,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; } +/* The 'mapping' part of i915_gem_object_pin_map() below */ +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt = obj->pages; + struct sgt_iter sgt_iter; + struct page *page; + struct page *stack_pages[32]; + struct page **pages = stack_pages; + unsigned long i = 0; + void *addr; + + /* A single page can always be kmapped */ + if (n_pages == 1) + return kmap(sg_page(sgt->sgl)); + + if (n_pages > ARRAY_SIZE(stack_pages)) { + /* Too big for stack -- allocate temporary array instead */ + pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); + if (!pages) + return NULL; + } + + for_each_sgt_page(page, sgt_iter, sgt) + pages[i++] = page; + + /* Check that we have the expected number of pages */ + GEM_BUG_ON(i != n_pages); + + addr = vmap(pages, n_pages, 0, PAGE_KERNEL); + + if (pages != stack_pages) + drm_free_large(pages); + + return addr; +} + +/* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) { int ret; @@ -2407,29 +2687,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(obj); - if (obj->mapping == NULL) { - struct page **pages; - - pages = NULL; - if (obj->base.size == PAGE_SIZE) - obj->mapping = kmap(sg_page(obj->pages->sgl)); - else - pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT, - sizeof(*pages), - GFP_TEMPORARY); - if (pages != NULL) { - struct sg_page_iter sg_iter; - int n; - - n = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, - obj->pages->nents, 0) - pages[n++] = sg_page_iter_page(&sg_iter); - - obj->mapping = vmap(pages, n, 0, PAGE_KERNEL); - drm_free_large(pages); - } - if (obj->mapping == NULL) { + if (!obj->mapping) { + obj->mapping = i915_gem_object_map(obj); + if (!obj->mapping) { i915_gem_object_unpin_pages(obj); return ERR_PTR(-ENOMEM); } @@ -2502,9 +2762,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) } static int -i915_gem_init_seqno(struct drm_device *dev, u32 seqno) +i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; @@ -2514,7 +2773,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) if (ret) return ret; } - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); + + /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ + if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { + while (intel_kick_waiters(dev_priv) || + intel_kick_signalers(dev_priv)) + yield(); + } /* Finally reset hw state */ for_each_engine(engine, dev_priv) @@ -2525,7 +2791,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (seqno == 0) @@ -2534,7 +2800,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) /* HWS page needs to be set less than what we * will inject to ring */ - ret = i915_gem_init_seqno(dev, seqno - 1); + ret = i915_gem_init_seqno(dev_priv, seqno - 1); if (ret) return ret; @@ -2550,13 +2816,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) } int -i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) +i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* reserve 0 for non-seqno */ if (dev_priv->next_seqno == 0) { - int ret = i915_gem_init_seqno(dev, 0); + int ret = i915_gem_init_seqno(dev_priv, 0); if (ret) return ret; @@ -2567,6 +2831,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) return 0; } +static void i915_gem_mark_busy(const struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + dev_priv->gt.active_engines |= intel_engine_flag(engine); + if (dev_priv->gt.awake) + return; + + intel_runtime_pm_get_noresume(dev_priv); + dev_priv->gt.awake = true; + + i915_update_gfx_val(dev_priv); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_busy(dev_priv); + + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.retire_work, + round_jiffies_up_relative(HZ)); +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -2577,16 +2861,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) { struct intel_engine_cs *engine; - struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; + u32 reserved_tail; int ret; if (WARN_ON(request == NULL)) return; engine = request->engine; - dev_priv = request->i915; ringbuf = request->ringbuf; /* @@ -2594,9 +2877,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ - intel_ring_reserved_space_use(ringbuf); - request_start = intel_ring_get_tail(ringbuf); + reserved_tail = request->reserved_space; + request->reserved_space = 0; + /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2651,56 +2935,42 @@ void __i915_add_request(struct drm_i915_gem_request *request, } /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - - i915_queue_hangcheck(engine->dev); - - queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv->dev); - /* Sanity check that the reserved size was large enough. */ - intel_ring_reserved_space_end(ringbuf); + ret = intel_ring_get_tail(ringbuf) - request_start; + if (ret < 0) + ret += ringbuf->size; + WARN_ONCE(ret > reserved_tail, + "Not enough space reserved (%d bytes) " + "for adding the request (%d bytes)\n", + reserved_tail, ret); + + i915_gem_mark_busy(engine); } -static bool i915_context_is_banned(struct drm_i915_private *dev_priv, - const struct intel_context *ctx) +static bool i915_context_is_banned(const struct i915_gem_context *ctx) { unsigned long elapsed; - elapsed = get_seconds() - ctx->hang_stats.guilty_ts; - if (ctx->hang_stats.banned) return true; + elapsed = get_seconds() - ctx->hang_stats.guilty_ts; if (ctx->hang_stats.ban_period_seconds && elapsed <= ctx->hang_stats.ban_period_seconds) { - if (!i915_gem_context_is_default(ctx)) { - DRM_DEBUG("context hanging too fast, banning!\n"); - return true; - } else if (i915_stop_ring_allow_ban(dev_priv)) { - if (i915_stop_ring_allow_warn(dev_priv)) - DRM_ERROR("gpu hanging too fast, banning!\n"); - return true; - } + DRM_DEBUG("context hanging too fast, banning!\n"); + return true; } return false; } -static void i915_set_reset_status(struct drm_i915_private *dev_priv, - struct intel_context *ctx, +static void i915_set_reset_status(struct i915_gem_context *ctx, const bool guilty) { - struct i915_ctx_hang_stats *hs; - - if (WARN_ON(!ctx)) - return; - - hs = &ctx->hang_stats; + struct i915_ctx_hang_stats *hs = &ctx->hang_stats; if (guilty) { - hs->banned = i915_context_is_banned(dev_priv, ctx); + hs->banned = i915_context_is_banned(ctx); hs->batch_active++; hs->guilty_ts = get_seconds(); } else { @@ -2712,27 +2982,15 @@ void i915_gem_request_free(struct kref *req_ref) { struct drm_i915_gem_request *req = container_of(req_ref, typeof(*req), ref); - struct intel_context *ctx = req->ctx; - - if (req->file_priv) - i915_gem_request_remove_from_client(req); - - if (ctx) { - if (i915.enable_execlists && ctx != req->i915->kernel_context) - intel_lr_context_unpin(ctx, req->engine); - - i915_gem_context_unreference(ctx); - } - kmem_cache_free(req->i915->requests, req); } static inline int __i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); struct drm_i915_gem_request *req; int ret; @@ -2754,26 +3012,16 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, if (req == NULL) return -ENOMEM; - ret = i915_gem_get_seqno(engine->dev, &req->seqno); + ret = i915_gem_get_seqno(engine->i915, &req->seqno); if (ret) goto err; kref_init(&req->ref); req->i915 = dev_priv; req->engine = engine; - req->reset_counter = reset_counter; req->ctx = ctx; i915_gem_context_reference(req->ctx); - if (i915.enable_execlists) - ret = intel_logical_ring_alloc_request_extras(req); - else - ret = intel_ring_alloc_request_extras(req); - if (ret) { - i915_gem_context_unreference(req->ctx); - goto err; - } - /* * Reserve space in the ring buffer for all the commands required to * eventually emit this request. This is to guarantee that the @@ -2781,24 +3029,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. */ + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; + if (i915.enable_execlists) - ret = intel_logical_ring_reserve_space(req); + ret = intel_logical_ring_alloc_request_extras(req); else - ret = intel_ring_reserve_space(req); - if (ret) { - /* - * At this point, the request is fully allocated even if not - * fully prepared. Thus it can be cleaned up using the proper - * free code. - */ - intel_ring_reserved_space_cancel(req->ringbuf); - i915_gem_request_unreference(req); - return ret; - } + ret = intel_ring_alloc_request_extras(req); + if (ret) + goto err_ctx; *req_out = req; return 0; +err_ctx: + i915_gem_context_unreference(ctx); err: kmem_cache_free(dev_priv->requests, req); return ret; @@ -2818,13 +3062,13 @@ err: */ struct drm_i915_gem_request * i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_request *req; int err; if (ctx == NULL) - ctx = to_i915(engine->dev)->kernel_context; + ctx = engine->i915->kernel_context; err = __i915_gem_request_alloc(engine, ctx, &req); return err ? ERR_PTR(err) : req; } @@ -2834,8 +3078,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; + /* We are called by the error capture and reset at a random + * point in time. In particular, note that neither is crucially + * ordered with an interrupt. After a hang, the GPU is dead and we + * assume that no more writes can happen (we waited long enough for + * all writes that were in transaction to be flushed) - adding an + * extra delay for a recent interrupt is pointless. Hence, we do + * not need an engine->irq_seqno_barrier() before the seqno reads. + */ list_for_each_entry(request, &engine->request_list, list) { - if (i915_gem_request_completed(request, false)) + if (i915_gem_request_completed(request)) continue; return request; @@ -2844,27 +3096,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) return NULL; } -static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) +static void i915_gem_reset_engine_status(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; bool ring_hung; request = i915_gem_find_active_request(engine); - if (request == NULL) return; ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; - i915_set_reset_status(dev_priv, request->ctx, ring_hung); - + i915_set_reset_status(request->ctx, ring_hung); list_for_each_entry_continue(request, &engine->request_list, list) - i915_set_reset_status(dev_priv, request->ctx, false); + i915_set_reset_status(request->ctx, false); } -static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) +static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) { struct intel_ringbuffer *buffer; @@ -2888,13 +3136,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, /* Ensure irq handler finishes or is cancelled. */ tasklet_kill(&engine->irq_tasklet); - spin_lock_bh(&engine->execlist_lock); - /* list_splice_tail_init checks for empty lists */ - list_splice_tail_init(&engine->execlist_queue, - &engine->execlist_retired_req_list); - spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); + intel_execlists_cancel_requests(engine); } /* @@ -2931,7 +3173,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, void i915_gem_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; /* @@ -2940,10 +3182,10 @@ void i915_gem_reset(struct drm_device *dev) * their reference to the objects, the inspection must be done first. */ for_each_engine(engine, dev_priv) - i915_gem_reset_engine_status(dev_priv, engine); + i915_gem_reset_engine_status(engine); for_each_engine(engine, dev_priv) - i915_gem_reset_engine_cleanup(dev_priv, engine); + i915_gem_reset_engine_cleanup(engine); i915_gem_context_reset(dev); @@ -2954,6 +3196,7 @@ void i915_gem_reset(struct drm_device *dev) /** * This function clears the request list as sequence numbers are passed. + * @engine: engine to retire requests on */ void i915_gem_retire_requests_ring(struct intel_engine_cs *engine) @@ -2972,7 +3215,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) struct drm_i915_gem_request, list); - if (!i915_gem_request_completed(request, true)) + if (!i915_gem_request_completed(request)) break; i915_gem_request_retire(request); @@ -2995,58 +3238,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) i915_gem_object_retire__read(obj, engine->id); } - if (unlikely(engine->trace_irq_req && - i915_gem_request_completed(engine->trace_irq_req, true))) { - engine->irq_put(engine); - i915_gem_request_assign(&engine->trace_irq_req, NULL); - } - WARN_ON(i915_verify_lists(engine->dev)); } -bool -i915_gem_retire_requests(struct drm_device *dev) +void i915_gem_retire_requests(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - bool idle = true; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (dev_priv->gt.active_engines == 0) + return; + + GEM_BUG_ON(!dev_priv->gt.awake); for_each_engine(engine, dev_priv) { i915_gem_retire_requests_ring(engine); - idle &= list_empty(&engine->request_list); - if (i915.enable_execlists) { - spin_lock_bh(&engine->execlist_lock); - idle &= list_empty(&engine->execlist_queue); - spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); - } + if (list_empty(&engine->request_list)) + dev_priv->gt.active_engines &= ~intel_engine_flag(engine); } - if (idle) - mod_delayed_work(dev_priv->wq, - &dev_priv->mm.idle_work, + if (dev_priv->gt.active_engines == 0) + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.idle_work, msecs_to_jiffies(100)); - - return idle; } static void i915_gem_retire_work_handler(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), mm.retire_work.work); - struct drm_device *dev = dev_priv->dev; - bool idle; + container_of(work, typeof(*dev_priv), gt.retire_work.work); + struct drm_device *dev = &dev_priv->drm; /* Come back later if the device is busy... */ - idle = false; if (mutex_trylock(&dev->struct_mutex)) { - idle = i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); mutex_unlock(&dev->struct_mutex); } - if (!idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, + + /* Keep the retire handler running until we are finally idle. + * We do not need to do this test under locking as in the worst-case + * we queue the retire worker once too often. + */ + if (READ_ONCE(dev_priv->gt.awake)) + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.retire_work, round_jiffies_up_relative(HZ)); } @@ -3054,25 +3291,55 @@ static void i915_gem_idle_work_handler(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), mm.idle_work.work); - struct drm_device *dev = dev_priv->dev; + container_of(work, typeof(*dev_priv), gt.idle_work.work); + struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; + unsigned int stuck_engines; + bool rearm_hangcheck; + + if (!READ_ONCE(dev_priv->gt.awake)) + return; + + if (READ_ONCE(dev_priv->gt.active_engines)) + return; + + rearm_hangcheck = + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + + if (!mutex_trylock(&dev->struct_mutex)) { + /* Currently busy, come back later */ + mod_delayed_work(dev_priv->wq, + &dev_priv->gt.idle_work, + msecs_to_jiffies(50)); + goto out_rearm; + } + + if (dev_priv->gt.active_engines) + goto out_unlock; for_each_engine(engine, dev_priv) - if (!list_empty(&engine->request_list)) - return; + i915_gem_batch_pool_fini(&engine->batch_pool); - /* we probably should sync with hangcheck here, using cancel_work_sync. - * Also locking seems to be fubar here, engine->request_list is protected - * by dev->struct_mutex. */ + GEM_BUG_ON(!dev_priv->gt.awake); + dev_priv->gt.awake = false; + rearm_hangcheck = false; - intel_mark_idle(dev); + stuck_engines = intel_kick_waiters(dev_priv); + if (unlikely(stuck_engines)) { + DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n"); + dev_priv->gpu_error.missed_irq_rings |= stuck_engines; + } - if (mutex_trylock(&dev->struct_mutex)) { - for_each_engine(engine, dev_priv) - i915_gem_batch_pool_fini(&engine->batch_pool); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_idle(dev_priv); + intel_runtime_pm_put(dev_priv); +out_unlock: + mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->struct_mutex); +out_rearm: + if (rearm_hangcheck) { + GEM_BUG_ON(!dev_priv->gt.awake); + i915_queue_hangcheck(dev_priv); } } @@ -3080,6 +3347,7 @@ i915_gem_idle_work_handler(struct work_struct *work) * Ensures that an object will eventually get non-busy by flushing any required * write domains, emitting any outstanding lazy request and retiring and * completed requests. + * @obj: object to flush */ static int i915_gem_object_flush_active(struct drm_i915_gem_object *obj) @@ -3096,14 +3364,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) if (req == NULL) continue; - if (list_empty(&req->list)) - goto retire; - - if (i915_gem_request_completed(req, true)) { - __i915_gem_request_retire__upto(req); -retire: + if (i915_gem_request_completed(req)) i915_gem_object_retire__read(obj, i); - } } return 0; @@ -3111,7 +3373,9 @@ retire: /** * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT - * @DRM_IOCTL_ARGS: standard ioctl arguments + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer * * Returns 0 if successful, else an error is returned with the remaining time in * the timeout parameter. @@ -3185,7 +3449,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ret = __i915_wait_request(req[i], true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, to_rps_client(file)); - i915_gem_request_unreference__unlocked(req[i]); + i915_gem_request_unreference(req[i]); } return ret; @@ -3208,10 +3472,10 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (to == from) return 0; - if (i915_gem_request_completed(from_req, true)) + if (i915_gem_request_completed(from_req)) return 0; - if (!i915_semaphore_is_enabled(obj->base.dev)) { + if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, i915->mm.interruptible, @@ -3345,10 +3609,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) old_write_domain); } +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->pin_count); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + static int __i915_vma_unbind(struct i915_vma *vma, bool wait) { struct drm_i915_gem_object *obj = vma->obj; - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int ret; if (list_empty(&vma->obj_link)) @@ -3377,6 +3652,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ret = i915_gem_object_put_fence(obj); if (ret) return ret; + + __i915_vma_iounmap(vma); } trace_i915_vma_unbind(vma); @@ -3422,26 +3699,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma) return __i915_vma_unbind(vma, false); } -int i915_gpu_idle(struct drm_device *dev) +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; - /* Flush everything onto the inactive list. */ - for_each_engine(engine, dev_priv) { - if (!i915.enable_execlists) { - struct drm_i915_gem_request *req; + lockdep_assert_held(&dev_priv->drm.struct_mutex); - req = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = i915_switch_context(req); - i915_add_request_no_flush(req); - if (ret) - return ret; - } + for_each_engine(engine, dev_priv) { + if (engine->last_context == NULL) + continue; ret = intel_engine_idle(engine); if (ret) @@ -3488,6 +3755,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, /** * Finds free space in the GTT aperture and binds the object or a view of it * there. + * @obj: object to bind + * @vm: address space to bind into + * @ggtt_view: global gtt view if applicable + * @alignment: requested alignment + * @flags: mask of PIN_* flags to use */ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, @@ -3731,7 +4003,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; if (i915_gem_clflush_object(obj, obj->pin_display)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3745,6 +4017,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) /** * Moves a single object to the GTT read, and possibly write domain. + * @obj: object to act on + * @write: ask for write access or read only * * This function returns when the move is complete, including waiting on * flushes to occur. @@ -3816,6 +4090,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /** * Changes the cache-level of an object across all VMA. + * @obj: object to act on + * @cache_level: new cache level to set for the object * * After this function returns, the object will be in the new cache-level * across all GTT and the contents of the backing storage will be coherent, @@ -3925,11 +4201,9 @@ out: * object is now coherent at its new cache level (with respect * to the access domain). */ - if (obj->cache_dirty && - obj->base.write_domain != I915_GEM_DOMAIN_CPU && - cpu_write_needs_clflush(obj)) { + if (obj->cache_dirty && cpu_write_needs_clflush(obj)) { if (i915_gem_clflush_object(obj, true)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); } return 0; @@ -3967,7 +4241,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; @@ -4097,6 +4371,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, /** * Moves a single object to the CPU read, and possibly write domain. + * @obj: object to act on + * @write: requesting write or read-only access * * This function returns when the move is complete, including waiting on * flushes to occur. @@ -4159,7 +4435,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) static int i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_gem_request *request, *target = NULL; @@ -4195,10 +4471,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return 0; ret = __i915_wait_request(target, true, NULL, NULL); - if (ret == 0) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - - i915_gem_request_unreference__unlocked(target); + i915_gem_request_unreference(target); return ret; } @@ -4256,7 +4529,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, uint32_t alignment, uint64_t flags) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; unsigned bound; int ret; @@ -4420,7 +4693,7 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_madvise *args = data; struct drm_i915_gem_object *obj; int ret; @@ -4490,7 +4763,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->fence_reg = I915_FENCE_REG_NONE; obj->madv = I915_MADV_WILLNEED; - i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } static const struct drm_i915_gem_object_ops i915_gem_object_ops = { @@ -4499,21 +4772,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .put_pages = i915_gem_object_put_pages_gtt, }; -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size) { struct drm_i915_gem_object *obj; struct address_space *mapping; gfp_t mask; + int ret; obj = i915_gem_object_alloc(dev); if (obj == NULL) - return NULL; + return ERR_PTR(-ENOMEM); - if (drm_gem_object_init(dev, &obj->base, size) != 0) { - i915_gem_object_free(obj); - return NULL; - } + ret = drm_gem_object_init(dev, &obj->base, size); + if (ret) + goto fail; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { @@ -4522,7 +4795,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mask |= __GFP_DMA32; } - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); @@ -4550,6 +4823,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, trace_i915_gem_object_create(obj); return obj; + +fail: + i915_gem_object_free(obj); + + return ERR_PTR(ret); } static bool discard_backing_storage(struct drm_i915_gem_object *obj) @@ -4580,7 +4858,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_vma *vma, *next; intel_runtime_pm_get(dev_priv); @@ -4655,16 +4933,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; - BUG_ON(!view); + GEM_BUG_ON(!view); list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; return NULL; } @@ -4688,7 +4962,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma) static void i915_gem_stop_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) @@ -4698,27 +4972,28 @@ i915_gem_stop_engines(struct drm_device *dev) int i915_gem_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; mutex_lock(&dev->struct_mutex); - ret = i915_gpu_idle(dev); + ret = i915_gem_wait_for_idle(dev_priv); if (ret) goto err; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); i915_gem_stop_engines(dev); + i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&dev_priv->mm.retire_work); - flush_delayed_work(&dev_priv->mm.idle_work); + cancel_delayed_work_sync(&dev_priv->gt.retire_work); + flush_delayed_work(&dev_priv->gt.idle_work); /* Assert that we sucessfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(dev_priv->mm.busy); + WARN_ON(dev_priv->gt.awake); return 0; @@ -4727,40 +5002,9 @@ err: return ret; } -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; - int i, ret; - - if (!HAS_L3_DPF(dev) || !remap_info) - return 0; - - ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3); - if (ret) - return ret; - - /* - * Note: We do not worry about the concurrent register cacheline hang - * here because no other code should access these registers other than - * at initialization time. - */ - for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); - intel_ring_emit(engine, remap_info[i]); - } - - intel_ring_advance(engine); - - return ret; -} - void i915_gem_init_swizzling(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 5 || dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) @@ -4785,7 +5029,7 @@ void i915_gem_init_swizzling(struct drm_device *dev) static void init_unused_ring(struct drm_device *dev, u32 base) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RING_CTL(base), 0); I915_WRITE(RING_HEAD(base), 0); @@ -4812,7 +5056,7 @@ static void init_unused_rings(struct drm_device *dev) int i915_gem_init_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = intel_init_render_ring_buffer(dev); @@ -4860,9 +5104,9 @@ cleanup_render_ring: int i915_gem_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - int ret, j; + int ret; /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4914,59 +5158,10 @@ i915_gem_init_hw(struct drm_device *dev) intel_mocs_init_l3cc_table(dev); /* We can't enable contexts until all firmware is loaded */ - if (HAS_GUC_UCODE(dev)) { - ret = intel_guc_ucode_load(dev); - if (ret) { - DRM_ERROR("Failed to initialize GuC, error %d\n", ret); - ret = -EIO; - goto out; - } - } - - /* - * Increment the next seqno by 0x100 so we have a visible break - * on re-initialisation - */ - ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); + ret = intel_guc_setup(dev); if (ret) goto out; - /* Now it is safe to go back round and do everything else: */ - for_each_engine(engine, dev_priv) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - break; - } - - if (engine->id == RCS) { - for (j = 0; j < NUM_L3_SLICES(dev); j++) { - ret = i915_gem_l3_remap(req, j); - if (ret) - goto err_request; - } - } - - ret = i915_ppgtt_init_ring(req); - if (ret) - goto err_request; - - ret = i915_gem_context_enable(req); - if (ret) - goto err_request; - -err_request: - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("Failed to enable %s, error=%d\n", - engine->name, ret); - i915_gem_cleanup_engines(dev); - break; - } - } - out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; @@ -4974,12 +5169,9 @@ out: int i915_gem_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; - i915.enable_execlists = intel_sanitize_enable_execlists(dev, - i915.enable_execlists); - mutex_lock(&dev->struct_mutex); if (!i915.enable_execlists) { @@ -5002,10 +5194,7 @@ int i915_gem_init(struct drm_device *dev) */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = i915_gem_init_userptr(dev); - if (ret) - goto out_unlock; - + i915_gem_init_userptr(dev_priv); i915_gem_init_ggtt(dev); ret = i915_gem_context_init(dev); @@ -5037,19 +5226,11 @@ out_unlock: void i915_gem_cleanup_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) dev_priv->gt.cleanup_engine(engine); - - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev, ALL_ENGINES); } static void @@ -5062,7 +5243,7 @@ init_engine_lists(struct intel_engine_cs *engine) void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) @@ -5073,7 +5254,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) else dev_priv->num_fence_regs = 8; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) dev_priv->num_fence_regs = I915_READ(vgtif_reg(avail_rs.fence_num)); @@ -5086,7 +5267,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) void i915_gem_load_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; dev_priv->objects = @@ -5114,22 +5295,15 @@ i915_gem_load_init(struct drm_device *dev) init_engine_lists(&dev_priv->engine[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); - INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + INIT_DELAYED_WORK(&dev_priv->gt.retire_work, i915_gem_retire_work_handler); - INIT_DELAYED_WORK(&dev_priv->mm.idle_work, + INIT_DELAYED_WORK(&dev_priv->gt.idle_work, i915_gem_idle_work_handler); + init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - /* - * Set initial sequence number for requests. - * Using this number allows the wraparound to happen early, - * catching any obvious problems. - */ - dev_priv->next_seqno = ((u32)~0 - 0x1100); - dev_priv->last_seqno = ((u32)~0 - 0x1101); - INIT_LIST_HEAD(&dev_priv->mm.fence_list); init_waitqueue_head(&dev_priv->pending_flip_queue); @@ -5148,6 +5322,34 @@ void i915_gem_load_cleanup(struct drm_device *dev) kmem_cache_destroy(dev_priv->objects); } +int i915_gem_freeze_late(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + /* Called just before we write the hibernation image. + * + * We need to update the domain tracking to reflect that the CPU + * will be accessing all the pages to create and restore from the + * hibernation, and so upon restoration those pages will be in the + * CPU domain. + * + * To make sure the hibernation image contains the latest state, + * we update that state just before writing out the image. + */ + + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -5187,7 +5389,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return -ENOMEM; file->driver_priv = file_priv; - file_priv->dev_priv = dev->dev_private; + file_priv->dev_priv = to_i915(dev); file_priv->file = file; INIT_LIST_HEAD(&file_priv->rps.link); @@ -5233,7 +5435,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(o->base.dev); struct i915_vma *vma; WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); @@ -5254,13 +5456,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; WARN(1, "global vma for this object not found. (view=%u)\n", view->type); @@ -5286,12 +5485,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5310,23 +5507,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) return false; } -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - - BUG_ON(list_empty(&o->vma_list)); + GEM_BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, obj_link) { if (vma->is_ggtt && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) + vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) return vma->node.size; } + return 0; } @@ -5347,7 +5539,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) struct page *page; /* Only default objects have per-page dirty tracking */ - if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) + if (WARN_ON(!i915_gem_object_has_struct_page(obj))) return NULL; page = i915_gem_object_get_page(obj, n); @@ -5365,8 +5557,8 @@ i915_gem_object_create_from_data(struct drm_device *dev, size_t bytes; int ret; - obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); - if (IS_ERR_OR_NULL(obj)) + obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); + if (IS_ERR(obj)) return obj; ret = i915_gem_object_set_to_cpu_domain(obj, true); diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 7bf2f3f2968e..3752d5daa4b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, if (obj == NULL) { int ret; - obj = i915_gem_alloc_object(pool->dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + obj = i915_gem_object_create(pool->dev, size); + if (IS_ERR(obj)) + return obj; ret = i915_gem_object_get_pages(obj); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e5acc3916f75..3c97f0e7a003 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -90,6 +90,8 @@ #include "i915_drv.h" #include "i915_trace.h" +#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 + /* This is a HW constraint. The value below is the largest known requirement * I've seen in a spec to date, and that was a workaround for a non-shipping * part. It should be safe to decrease this, but it's more future proof as is. @@ -97,28 +99,27 @@ #define GEN6_CONTEXT_ALIGN (64<<10) #define GEN7_CONTEXT_ALIGN 4096 -static size_t get_context_alignment(struct drm_device *dev) +static size_t get_context_alignment(struct drm_i915_private *dev_priv) { - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) return GEN6_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN; } -static int get_context_size(struct drm_device *dev) +static int get_context_size(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; u32 reg; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_GEN(dev_priv)) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) ret = HSW_CXT_TOTAL_SIZE; else ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; @@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev) return ret; } -static void i915_gem_context_clean(struct intel_context *ctx) +static void i915_gem_context_clean(struct i915_gem_context *ctx) { struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_vma *vma, *next; @@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx) void i915_gem_context_free(struct kref *ctx_ref) { - struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + int i; + lockdep_assert_held(&ctx->i915->drm.struct_mutex); trace_i915_context_free(ctx); - if (i915.enable_execlists) - intel_lr_context_free(ctx); - /* * This context is going away and we need to remove all VMAs still * around. This is to handle imported shared objects for which @@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref) i915_ppgtt_put(ctx->ppgtt); - if (ctx->legacy_hw_ctx.rcs_state) - drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct intel_context *ce = &ctx->engine[i]; + + if (!ce->state) + continue; + + WARN_ON(ce->pin_count); + if (ce->ringbuf) + intel_ringbuffer_free(ce->ringbuf); + + drm_gem_object_unreference(&ce->state->base); + } + list_del(&ctx->link); + + ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); kfree(ctx); } @@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) struct drm_i915_gem_object *obj; int ret; - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + lockdep_assert_held(&dev->struct_mutex); + + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return obj; /* * Try to make the context utilize L3 as well as LLC. @@ -209,22 +224,52 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) return obj; } -static struct intel_context * +static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) +{ + int ret; + + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) { + /* Contexts are only released when no longer active. + * Flush any pending retires to hopefully release some + * stale contexts and try again. + */ + i915_gem_retire_requests(dev_priv); + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static struct i915_gem_context * __create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_gem_context *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); + ret = assign_hw_id(dev_priv, &ctx->hw_id); + if (ret) { + kfree(ctx); + return ERR_PTR(ret); + } + kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->context_list); ctx->i915 = dev_priv; + ctx->ggtt_alignment = get_context_alignment(dev_priv); + if (dev_priv->hw_context_size) { struct drm_i915_gem_object *obj = i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); @@ -232,7 +277,7 @@ __create_hw_context(struct drm_device *dev, ret = PTR_ERR(obj); goto err_out; } - ctx->legacy_hw_ctx.rcs_state = obj; + ctx->engine[RCS].state = obj; } /* Default context will never have a file_priv */ @@ -249,9 +294,13 @@ __create_hw_context(struct drm_device *dev, /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ - ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; + ctx->remap_slice = ALL_L3_SLICES(dev_priv); ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; + ctx->ring_size = 4 * PAGE_SIZE; + ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << + GEN8_CTX_ADDRESSING_MODE_SHIFT; + ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); return ctx; @@ -265,44 +314,27 @@ err_out: * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ -static struct intel_context * +static struct i915_gem_context * i915_gem_create_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { - const bool is_global_default_ctx = file_priv == NULL; - struct intel_context *ctx; - int ret = 0; + struct i915_gem_context *ctx; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev->struct_mutex); ctx = __create_hw_context(dev, file_priv); if (IS_ERR(ctx)) return ctx; - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { - /* We may need to do things with the shrinker which - * require us to immediately switch back to the default - * context. This can cause a problem as pinning the - * default context also requires GTT space which may not - * be available. To avoid this we always pin the default - * context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, - get_context_alignment(dev), 0); - if (ret) { - DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); - goto err_destroy; - } - } - if (USES_FULL_PPGTT(dev)) { struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); - if (IS_ERR_OR_NULL(ppgtt)) { + if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - ret = PTR_ERR(ppgtt); - goto err_unpin; + idr_remove(&file_priv->context_idr, ctx->user_handle); + i915_gem_context_unreference(ctx); + return ERR_CAST(ppgtt); } ctx->ppgtt = ppgtt; @@ -311,76 +343,102 @@ i915_gem_create_context(struct drm_device *dev, trace_i915_context_create(ctx); return ctx; +} -err_unpin: - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); -err_destroy: - idr_remove(&file_priv->context_idr, ctx->user_handle); - i915_gem_context_unreference(ctx); - return ERR_PTR(ret); +/** + * i915_gem_context_create_gvt - create a GVT GEM context + * @dev: drm device * + * + * This function is used to create a GVT specific GEM context. + * + * Returns: + * pointer to i915_gem_context on success, error pointer if failed + * + */ +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev) +{ + struct i915_gem_context *ctx; + int ret; + + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return ERR_PTR(-ENODEV); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + ctx = i915_gem_create_context(dev, NULL); + if (IS_ERR(ctx)) + goto out; + + ctx->execlists_force_single_submission = true; + ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ +out: + mutex_unlock(&dev->struct_mutex); + return ctx; } -static void i915_gem_context_unpin(struct intel_context *ctx, +static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { if (i915.enable_execlists) { intel_lr_context_unpin(ctx, engine); } else { - if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); + struct intel_context *ce = &ctx->engine[engine->id]; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + i915_gem_context_unreference(ctx); } } void i915_gem_context_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - int i; + struct drm_i915_private *dev_priv = to_i915(dev); + + lockdep_assert_held(&dev->struct_mutex); if (i915.enable_execlists) { - struct intel_context *ctx; + struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->context_list, link) intel_lr_context_reset(dev_priv, ctx); } - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; - - if (engine->last_context) { - i915_gem_context_unpin(engine->last_context, engine); - engine->last_context = NULL; - } - } - - /* Force the GPU state to be reinitialised on enabling */ - dev_priv->kernel_context->legacy_hw_ctx.initialized = false; + i915_gem_context_lost(dev_priv); } int i915_gem_context_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_gem_context *ctx; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ if (WARN_ON(dev_priv->kernel_context)) return 0; - if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { + if (intel_vgpu_active(dev_priv) && + HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (!i915.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } } + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&dev_priv->context_hw_ida); + if (i915.enable_execlists) { /* NB: intentionally left blank. We will allocate our own * backing objects as we need them, thank you very much */ dev_priv->hw_context_size = 0; - } else if (HAS_HW_CONTEXTS(dev)) { - dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); + } else if (HAS_HW_CONTEXTS(dev_priv)) { + dev_priv->hw_context_size = + round_up(get_context_size(dev_priv), 4096); if (dev_priv->hw_context_size > (1<<20)) { DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", dev_priv->hw_context_size); @@ -403,67 +461,60 @@ int i915_gem_context_init(struct drm_device *dev) return 0; } -void i915_gem_context_fini(struct drm_device *dev) +void i915_gem_context_lost(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->kernel_context; - int i; - - if (dctx->legacy_hw_ctx.rcs_state) { - /* The only known way to stop the gpu from accessing the hw context is - * to reset it. Do this as the very last operation to avoid confusing - * other code, leading to spurious errors. */ - intel_gpu_reset(dev, ALL_ENGINES); + struct intel_engine_cs *engine; - /* When default context is created and switched to, base object refcount - * will be 2 (+1 from object creation and +1 from do_switch()). - * i915_gem_context_fini() will be called after gpu_idle() has switched - * to default context. So we need to unreference the base object once - * to offset the do_switch part, so that i915_gem_context_unreference() - * can then free the base object correctly. */ - WARN_ON(!dev_priv->engine[RCS].last_context); - - i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - } - - for (i = I915_NUM_ENGINES; --i >= 0;) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; + lockdep_assert_held(&dev_priv->drm.struct_mutex); + for_each_engine(engine, dev_priv) { if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL; } } - i915_gem_context_unreference(dctx); - dev_priv->kernel_context = NULL; -} + /* Force the GPU state to be restored on enabling */ + if (!i915.enable_execlists) { + struct i915_gem_context *ctx; -int i915_gem_context_enable(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; + list_for_each_entry(ctx, &dev_priv->context_list, link) { + if (!i915_gem_context_is_default(ctx)) + continue; - if (i915.enable_execlists) { - if (engine->init_context == NULL) - return 0; + for_each_engine(engine, dev_priv) + ctx->engine[engine->id].initialised = false; - ret = engine->init_context(req); - } else - ret = i915_switch_context(req); + ctx->remap_slice = ALL_L3_SLICES(dev_priv); + } - if (ret) { - DRM_ERROR("ring init context: %d\n", ret); - return ret; + for_each_engine(engine, dev_priv) { + struct intel_context *kce = + &dev_priv->kernel_context->engine[engine->id]; + + kce->initialised = true; + } } +} - return 0; +void i915_gem_context_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_gem_context *dctx = dev_priv->kernel_context; + + lockdep_assert_held(&dev->struct_mutex); + + i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; + + ida_destroy(&dev_priv->context_hw_ida); } static int context_idr_cleanup(int id, void *p, void *data) { - struct intel_context *ctx = p; + struct i915_gem_context *ctx = p; + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_unreference(ctx); return 0; } @@ -471,7 +522,7 @@ static int context_idr_cleanup(int id, void *p, void *data) int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; idr_init(&file_priv->context_idr); @@ -491,31 +542,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + lockdep_assert_held(&dev->struct_mutex); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); } -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) -{ - struct intel_context *ctx; - - ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); - if (!ctx) - return ERR_PTR(-ENOENT); - - return ctx; -} - static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(engine->dev) ? - hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : + i915_semaphore_is_enabled(dev_priv) ? + hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : 0; int len, ret; @@ -524,21 +566,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(engine->dev)) { + if (IS_GEN6(dev_priv)) { ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) + if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(engine->dev)->gen < 8) + else if (INTEL_GEN(dev_priv) < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(engine->dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) len += 2 + (num_rings ? 4*num_rings + 6 : 0); ret = intel_ring_begin(req, len); @@ -546,14 +588,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -568,7 +610,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_SET_CONTEXT); intel_ring_emit(engine, - i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | + i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP @@ -576,14 +618,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) */ intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -609,45 +651,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; } -static inline bool skip_rcs_switch(struct intel_engine_cs *engine, - struct intel_context *to) +static int remap_l3(struct drm_i915_gem_request *req, int slice) +{ + u32 *remap_info = req->i915->l3_parity.remap_info[slice]; + struct intel_engine_cs *engine = req->engine; + int i, ret; + + if (!remap_info) + return 0; + + ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2); + if (ret) + return ret; + + /* + * Note: We do not worry about the concurrent register cacheline hang + * here because no other code should access these registers other than + * at initialization time. + */ + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); + for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { + intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); + intel_ring_emit(engine, remap_info[i]); + } + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); + + return 0; +} + +static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { if (to->remap_slice) return false; - if (!to->legacy_hw_ctx.initialized) + if (!to->engine[RCS].initialised) return false; - if (to->ppgtt && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; return to == engine->last_context; } static bool -needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) +needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { - if (!to->ppgtt) + if (!ppgtt) return false; + /* Always load the ppgtt on first use */ + if (!engine->last_context) + return true; + + /* Same context without new entries, skip */ if (engine->last_context == to && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; if (engine->id != RCS) return true; - if (INTEL_INFO(engine->dev)->gen < 8) + if (INTEL_GEN(engine->i915) < 8) return true; return false; } static bool -needs_pd_load_post(struct intel_context *to, u32 hw_flags) +needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, + struct i915_gem_context *to, + u32 hw_flags) { - if (!to->ppgtt) + if (!ppgtt) return false; if (!IS_GEN8(to->i915)) @@ -661,18 +741,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags) static int do_rcs_switch(struct drm_i915_gem_request *req) { - struct intel_context *to = req->ctx; + struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; - struct intel_context *from; + struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_gem_context *from; u32 hw_flags; int ret, i; - if (skip_rcs_switch(engine, to)) + if (skip_rcs_switch(ppgtt, engine, to)) return 0; /* Trying to pin first makes error handling easier. */ - ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, - get_context_alignment(engine->dev), + ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, + to->ggtt_alignment, 0); if (ret) return ret; @@ -694,37 +775,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); + ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); if (ret) goto unpin_out; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting * a context."*/ trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) goto unpin_out; } - if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) + if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) /* NB: If we inhibit the restore, the context is not allowed to * die because future work may end up depending on valid address * space. This means we must enforce that a page table load * occur when this occurs. */ hw_flags = MI_RESTORE_INHIBIT; - else if (to->ppgtt && - intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings) + else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings) hw_flags = MI_FORCE_RESTORE; else hw_flags = 0; - /* We should never emit switch_mm more than once */ - WARN_ON(needs_pd_load_pre(engine, to) && - needs_pd_load_post(to, hw_flags)); - if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) @@ -738,8 +814,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); + from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -747,10 +823,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->legacy_hw_ctx.rcs_state->dirty = 1; + from->engine[RCS].state->dirty = 1; /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(from->engine[RCS].state); i915_gem_context_unreference(from); } i915_gem_context_reference(to); @@ -759,9 +835,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* GEN8 does *not* require an explicit reload if the PDPs have been * setup, and we do not wish to move them. */ - if (needs_pd_load_post(to, hw_flags)) { + if (needs_pd_load_post(ppgtt, to, hw_flags)) { trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); /* The hardware context switch is emitted, but we haven't * actually changed the state - so it's probably safe to bail * here. Still, let the user know something dangerous has @@ -771,33 +847,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return ret; } - if (to->ppgtt) - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + if (ppgtt) + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<<i))) continue; - ret = i915_gem_l3_remap(req, i); + ret = remap_l3(req, i); if (ret) return ret; to->remap_slice &= ~(1<<i); } - if (!to->legacy_hw_ctx.initialized) { + if (!to->engine[RCS].initialised) { if (engine->init_context) { ret = engine->init_context(req); if (ret) return ret; } - to->legacy_hw_ctx.initialized = true; + to->engine[RCS].initialised = true; } return 0; unpin_out: - i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(to->engine[RCS].state); return ret; } @@ -817,25 +893,24 @@ unpin_out: int i915_switch_context(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; WARN_ON(i915.enable_execlists); - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + lockdep_assert_held(&req->i915->drm.struct_mutex); - if (engine->id != RCS || - req->ctx->legacy_hw_ctx.rcs_state == NULL) { - struct intel_context *to = req->ctx; + if (!req->ctx->engine[engine->id].state) { + struct i915_gem_context *to = req->ctx; + struct i915_hw_ppgtt *ppgtt = + to->ppgtt ?: req->i915->mm.aliasing_ppgtt; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { int ret; trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) return ret; - /* Doing a PD load always reloads the page dirs */ - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } if (to != engine->last_context) { @@ -861,7 +936,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!contexts_enabled(dev)) @@ -890,7 +965,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (args->pad != 0) @@ -903,13 +978,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); } - idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); + idr_remove(&file_priv->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); @@ -922,14 +997,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -951,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, else args->value = to_i915(dev)->ggtt.base.total; break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE); + break; default: ret = -EINVAL; break; @@ -965,14 +1043,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -996,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; } break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + if (args->size) { + ret = -EINVAL; + } else { + if (args->value) + ctx->flags |= CONTEXT_NO_ERROR_CAPTURE; + else + ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE; + } + break; default: ret = -EINVAL; break; @@ -1004,3 +1092,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, return ret; } + +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_reset_stats *args = data; + struct i915_ctx_hang_stats *hs; + struct i915_gem_context *ctx; + int ret; + + if (args->flags || args->pad) + return -EINVAL; + + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) { + mutex_unlock(&dev->struct_mutex); + return PTR_ERR(ctx); + } + hs = &ctx->hang_stats; + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = hs->batch_active; + args->batch_pending = hs->batch_pending; + + mutex_unlock(&dev->struct_mutex); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h new file mode 100644 index 000000000000..91315557e421 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h @@ -0,0 +1,45 @@ +/* + * Copyright 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _I915_GEM_DMABUF_H_ +#define _I915_GEM_DMABUF_H_ + +#include <linux/dma-buf.h> + +static inline struct reservation_object * +i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj) +{ + struct dma_buf *dma_buf; + + if (obj->base.dma_buf) + dma_buf = obj->base.dma_buf; + else if (obj->base.import_attach) + dma_buf = obj->base.import_attach->dmabuf; + else + return NULL; + + return dma_buf->resv; +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ea1f8d1bd228..3c1280ec7ff6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -33,6 +33,37 @@ #include "intel_drv.h" #include "i915_trace.h" +static int switch_to_pinned_context(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + + if (i915.enable_execlists) + return 0; + + for_each_engine(engine, dev_priv) { + struct drm_i915_gem_request *req; + int ret; + + if (engine->last_context == NULL) + continue; + + if (engine->last_context == dev_priv->kernel_context) + continue; + + req = i915_gem_request_alloc(engine, dev_priv->kernel_context); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = i915_switch_context(req); + i915_add_request_no_flush(req); + if (ret) + return ret; + } + + return 0; +} + + static bool mark_free(struct i915_vma *vma, struct list_head *unwind) { @@ -150,11 +181,19 @@ none: /* Only idle the GPU and repeat the search once */ if (pass++ == 0) { - ret = i915_gpu_idle(dev); + struct drm_i915_private *dev_priv = to_i915(dev); + + if (i915_is_ggtt(vm)) { + ret = switch_to_pinned_context(dev_priv); + if (ret) + return ret; + } + + ret = i915_gem_wait_for_idle(dev_priv); if (ret) return ret; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); goto search_again; } @@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) trace_i915_gem_evict_vm(vm); if (do_idle) { - ret = i915_gpu_idle(vm->dev); + struct drm_i915_private *dev_priv = to_i915(vm->dev); + + if (i915_is_ggtt(vm)) { + ret = switch_to_pinned_context(dev_priv); + if (ret) + return ret; + } + + ret = i915_gem_wait_for_idle(dev_priv); if (ret) return ret; - i915_gem_retire_requests(vm->dev); + i915_gem_retire_requests(dev_priv); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 33df74d98269..1978633e7549 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma) static int i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head *vmas, - struct intel_context *ctx, + struct i915_gem_context *ctx, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; int retry; i915_gem_retire_requests_ring(engine); @@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct intel_engine_cs *engine, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_relocation_entry *reloc; struct i915_address_space *vm; @@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->engine->dev); + i915_gem_chipset_flush(req->engine->i915); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev, return 0; } -static struct intel_context * +static struct i915_gem_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *engine, const u32 ctx_id) { - struct intel_context *ctx = NULL; + struct i915_gem_context *ctx = NULL; struct i915_ctx_hang_stats *hs; if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); - ctx = i915_gem_context_get(file->driver_priv, ctx_id); + ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); if (IS_ERR(ctx)) return ctx; @@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } - if (i915.enable_execlists && !ctx->engine[engine->id].state) { - int ret = intel_lr_context_deferred_alloc(ctx, engine); - if (ret) { - DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); - return ERR_PTR(ret); - } - } - return ctx; } @@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } @@ -1150,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret, i; if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { @@ -1233,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, { struct drm_device *dev = params->dev; struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 exec_start, exec_len; int instp_mode; u32 instp_mask; @@ -1336,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_ring < 0) { /* If not, use the ping-pong mechanism to select one. */ - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; dev_priv->mm.bsd_ring_dispatch_index ^= 1; - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } return file_priv->bsd_ring; @@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ struct i915_execbuffer_params *params = ¶ms_master; @@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dispatch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (!file->is_master || !capable(CAP_SYS_ADMIN)) + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) return -EPERM; dispatch_flags |= I915_DISPATCH_SECURE; @@ -1485,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dispatch_flags |= I915_DISPATCH_RS; } + /* Take a local wakeref for preparing to dispatch the execbuf as + * we expect to access the hardware fairly frequently in the + * process. Upon first dispatch, we acquire another prolonged + * wakeref that we hold until the GPU has been idle for at least + * 100ms. + */ intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); @@ -1561,7 +1559,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, batch_obj, args->batch_start_offset, args->batch_len, - file->is_master); + drm_is_current_master(file)); if (IS_ERR(parsed_batch_obj)) { ret = PTR_ERR(parsed_batch_obj); goto err; diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index a2b938ec01a7..251d7a95af89 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -58,7 +58,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; @@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, static void i915_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (obj) { @@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, static void i830_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val; if (obj) { @@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Ensure that all CPU reads are completed before installing a fence * and all writes before removing the fence. @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int reg = fence_number(dev_priv, fence); i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) int i915_gem_object_put_fence(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_fence_reg *fence; int ret; @@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) static struct drm_i915_fence_reg * i915_find_fence_reg(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_fence_reg *reg, *avail; int i; @@ -367,7 +367,7 @@ int i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool enable = obj->tiling_mode != I915_TILING_NONE; struct drm_i915_fence_reg *reg; int ret; @@ -433,7 +433,7 @@ bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); WARN_ON(!ggtt_vma || @@ -457,7 +457,7 @@ void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); dev_priv->fence_regs[obj->fence_reg].pin_count--; } @@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) */ void i915_gem_restore_fences(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { @@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int i; if (obj->bit_17 == NULL) return; i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); + for_each_sgt_page(page, sgt_iter, obj->pages) { char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { @@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) + + for_each_sgt_page(page, sgt_iter, obj->pages) { + if (page_to_phys(page) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 92acdff9dad3..10f1e32767e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -93,6 +93,13 @@ * */ +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, base); +} + static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED, }; -static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; bool has_full_48bit_ppgtt; - has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; - has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; - has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; + has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; + has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; + has_full_48bit_ppgtt = + IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) has_full_ppgtt = false; /* emulation is too hard */ + if (!has_aliasing_ppgtt) + return 0; + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. */ - if (INTEL_INFO(dev)->gen < 9 && - (enable_ppgtt == 0 || !has_aliasing_ppgtt)) + if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; if (enable_ppgtt == 1) @@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); return 0; } #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } - if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) + if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) return has_full_48bit_ppgtt ? 3 : 2; else return has_aliasing_ppgtt ? 1 : 0; @@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev, static int gen8_init_scratch(struct i915_address_space *vm) { struct drm_device *dev = vm->dev; + int ret; vm->scratch_page = alloc_scratch_page(dev); if (IS_ERR(vm->scratch_page)) @@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch_pt = alloc_pt(dev); if (IS_ERR(vm->scratch_pt)) { - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pt); + ret = PTR_ERR(vm->scratch_pt); + goto free_scratch_page; } vm->scratch_pd = alloc_pd(dev); if (IS_ERR(vm->scratch_pd)) { - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pd); + ret = PTR_ERR(vm->scratch_pd); + goto free_pt; } if (USES_FULL_48BIT_PPGTT(dev)) { vm->scratch_pdp = alloc_pdp(dev); if (IS_ERR(vm->scratch_pdp)) { - free_pd(dev, vm->scratch_pd); - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pdp); + ret = PTR_ERR(vm->scratch_pdp); + goto free_pd; } } @@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; + +free_pd: + free_pd(dev, vm->scratch_pd); +free_pt: + free_pt(dev, vm->scratch_pt); +free_scratch_page: + free_scratch_page(dev, vm->scratch_page); + + return ret; } static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) @@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(vm->dev)) + if (intel_vgpu_active(to_i915(vm->dev))) gen8_ppgtt_notify_vgt(ppgtt, false); if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) @@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 0, 0, GEN8_PML4E_SHIFT); - if (intel_vgpu_active(ppgtt->base.dev)) { + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { ret = gen8_preallocate_top_level_pdps(ppgtt); if (ret) goto free_scratch; } } - if (intel_vgpu_active(ppgtt->base.dev)) + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) gen8_ppgtt_notify_vgt(ppgtt, true); return 0; @@ -1552,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) struct i915_page_table *unused; gen6_pte_t scratch_pte; uint32_t pd_entry; - uint32_t pte, pde, temp; + uint32_t pte, pde; uint32_t start = ppgtt->base.start, length = ppgtt->base.total; scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0); - gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { u32 expected; gen6_pte_t *pt_vaddr; const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); @@ -1622,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_page_table *pt; - uint32_t pde, temp; + uint32_t pde; - gen6_for_each_pde(pt, pd, start, length, temp, pde) + gen6_for_each_pde(pt, pd, start, length, pde) gen6_write_pde(pd, pde, pt); /* Make sure write is complete before other code can use this page @@ -1665,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } -static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - return 0; -} - static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { @@ -1713,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - + struct drm_i915_private *dev_priv = req->i915; I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - - POSTING_READ(RING_PP_DIR_DCLV(engine)); - return 0; } static void gen8_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { @@ -1739,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev) static void gen7_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; uint32_t ecochk, ecobits; @@ -1764,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) static void gen6_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t ecochk, gab_ctl, ecobits; ecobits = I915_READ(GAC_ECO_BITS); @@ -1821,20 +1823,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES; - struct sg_page_iter sg_iter; + gen6_pte_t *pt_vaddr = NULL; + struct sgt_iter sgt_iter; + dma_addr_t addr; - pt_vaddr = NULL; - for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + for_each_sgt_dma(addr, sgt_iter, pages) { if (pt_vaddr == NULL) pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr[act_pte] = - vm->pte_encode(sg_page_iter_dma_address(&sg_iter), - cache_level, true, flags); + vm->pte_encode(addr, cache_level, true, flags); if (++act_pte == GEN6_PTES) { kunmap_px(ppgtt, pt_vaddr); @@ -1843,6 +1844,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, act_pte = 0; } } + if (pt_vaddr) kunmap_px(ppgtt, pt_vaddr); } @@ -1857,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; uint32_t start, length, start_save, length_save; - uint32_t pde, temp; + uint32_t pde; int ret; if (WARN_ON(start_in + length_in > ppgtt->base.total)) @@ -1873,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, * need allocation. The second stage marks use ptes within the page * tables. */ - gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { if (pt != vm->scratch_pt) { WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); continue; @@ -1898,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, start = start_save; length = length_save; - gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); bitmap_zero(tmp_bitmap, GEN6_PTES); @@ -1967,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm) static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_page_directory *pd = &ppgtt->pd; + struct drm_device *dev = vm->dev; struct i915_page_table *pt; uint32_t pde; drm_mm_remove_node(&ppgtt->node); - gen6_for_all_pdes(pt, ppgtt, pde) { + gen6_for_all_pdes(pt, pd, pde) if (pt != vm->scratch_pt) - free_pt(ppgtt->base.dev, pt); - } + free_pt(dev, pt); gen6_free_scratch(vm); } @@ -2041,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, uint64_t start, uint64_t length) { struct i915_page_table *unused; - uint32_t pde, temp; + uint32_t pde; - gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) + gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; } @@ -2055,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) int ret; ppgtt->base.pte_encode = ggtt->base.pte_encode; - if (IS_GEN6(dev)) { + if (intel_vgpu_active(dev_priv) || IS_GEN6(dev)) ppgtt->switch_mm = gen6_mm_switch; - } else if (IS_HASWELL(dev)) { + else if (IS_HASWELL(dev)) ppgtt->switch_mm = hsw_mm_switch; - } else if (IS_GEN7(dev)) { + else if (IS_GEN7(dev)) ppgtt->switch_mm = gen7_mm_switch; - } else + else BUG(); - if (intel_vgpu_active(dev)) - ppgtt->switch_mm = vgpu_mm_switch; - ret = gen6_ppgtt_alloc(ppgtt); if (ret) return ret; @@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm, struct drm_i915_private *dev_priv) { drm_mm_init(&vm->mm, vm->start, vm->total); - vm->dev = dev_priv->dev; + vm->dev = &dev_priv->drm; INIT_LIST_HEAD(&vm->active_list); INIT_LIST_HEAD(&vm->inactive_list); list_add_tail(&vm->global_link, &dev_priv->vm_list); @@ -2123,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm, static void gtt_write_workarounds(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place @@ -2140,9 +2140,9 @@ static void gtt_write_workarounds(struct drm_device *dev) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); } -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; ret = __hw_ppgtt_init(dev, ppgtt); @@ -2179,20 +2179,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev) return 0; } -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) -{ - struct drm_i915_private *dev_priv = req->i915; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - - if (i915.enable_execlists) - return 0; - - if (!ppgtt) - return 0; - - return ppgtt->switch_mm(ppgtt, req); -} - struct i915_hw_ppgtt * i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) { @@ -2257,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv) if (unlikely(ggtt->do_idle_maps)) { dev_priv->mm.interruptible = false; - if (i915_gpu_idle(dev_priv->dev)) { - DRM_ERROR("Couldn't idle GPU\n"); + if (i915_gem_wait_for_idle(dev_priv)) { + DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); } @@ -2275,12 +2261,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } -void i915_check_and_clear_faults(struct drm_device *dev) +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; for_each_engine(engine, dev_priv) { @@ -2324,7 +2309,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, true); @@ -2352,29 +2337,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) #endif } +static void gen8_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level level, + u32 unused) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (offset >> PAGE_SHIFT); + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + gen8_set_pte(pte, gen8_pte_encode(addr, level, true)); + + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, enum i915_cache_level level, u32 unused) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen8_pte_t __iomem *gtt_entries = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; /* shut up gcc */ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen8_pte_t __iomem *gtt_entries; + gen8_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_dma_address(sg_iter.sg) + - (sg_iter.sg_pgoffset << PAGE_SHIFT); - gen8_set_pte(>t_entries[i], - gen8_pte_encode(addr, level, true)); - i++; + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = gen8_pte_encode(addr, level, true); + gen8_set_pte(>t_entries[i++], gtt_entry); } /* @@ -2385,8 +2390,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * hardware should work, we must keep this posting read for paranoia. */ if (i != 0) - WARN_ON(readq(>t_entries[i-1]) - != gen8_pte_encode(addr, level, true)); + WARN_ON(readq(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2424,6 +2428,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); } +static void gen6_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level level, + u32 flags) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (offset >> PAGE_SHIFT); + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + iowrite32(vm->pte_encode(addr, level, true, flags), pte); + + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + /* * Binds an object into the global gtt with the specified cache level. The object * will be accessible to the GPU via commands whose operands reference offsets @@ -2436,21 +2462,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen6_pte_t __iomem *gtt_entries = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen6_pte_t __iomem *gtt_entries; + gen6_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); - i++; + gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = vm->pte_encode(addr, level, true, flags); + iowrite32(gtt_entry, >t_entries[i++]); } /* XXX: This serves as a posting read to make sure that the PTE has @@ -2459,10 +2485,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * of NUMA access patterns. Therefore, even with the way we assume * hardware should work, we must keep this posting read for paranoia. */ - if (i != 0) { - unsigned long gtt = readl(>t_entries[i-1]); - WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); - } + if (i != 0) + WARN_ON(readl(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2474,13 +2498,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +static void nop_clear_range(struct i915_address_space *vm, + uint64_t start, + uint64_t length, + bool use_scratch) +{ +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, uint64_t start, uint64_t length, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen8_pte_t scratch_pte, __iomem *gtt_base = @@ -2512,7 +2543,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen6_pte_t scratch_pte, __iomem *gtt_base = @@ -2538,12 +2569,30 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +static void i915_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level cache_level, + u32 unused) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, enum i915_cache_level cache_level, u32 unused) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; int rpm_atomic_seq; @@ -2561,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool unused) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; int rpm_atomic_seq; @@ -2642,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { struct drm_device *dev = vma->vm->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = vma->obj; const uint64_t size = min_t(uint64_t, obj->base.size, @@ -2668,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma) void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool interruptible; interruptible = do_idling(dev_priv); @@ -2727,11 +2776,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, i915_address_space_init(&ggtt->base, dev_priv); ggtt->base.total += PAGE_SIZE; - if (intel_vgpu_active(dev)) { - ret = intel_vgt_balloon(dev); - if (ret) - return ret; - } + ret = intel_vgt_balloon(dev_priv); + if (ret) + return ret; if (!HAS_LLC(dev)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; @@ -2831,8 +2878,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) i915_gem_cleanup_stolen(dev); if (drm_mm_initialized(&ggtt->base.mm)) { - if (intel_vgpu_active(dev)) - intel_vgt_deballoon(); + intel_vgt_deballoon(dev_priv); drm_mm_takedown(&ggtt->base.mm); list_del(&ggtt->base.global_link); @@ -3069,13 +3115,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ret = ggtt_probe_common(dev, ggtt->size); - ggtt->base.clear_range = gen8_ggtt_clear_range; - if (IS_CHERRYVIEW(dev_priv)) - ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; - else - ggtt->base.insert_entries = gen8_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->base.insert_page = gen8_ggtt_insert_page; + ggtt->base.clear_range = nop_clear_range; + if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) + ggtt->base.clear_range = gen8_ggtt_clear_range; + + ggtt->base.insert_entries = gen8_ggtt_insert_entries; + if (IS_CHERRYVIEW(dev_priv)) + ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; return ret; } @@ -3108,6 +3157,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ret = ggtt_probe_common(dev, ggtt->size); ggtt->base.clear_range = gen6_ggtt_clear_range; + ggtt->base.insert_page = gen6_ggtt_insert_page; ggtt->base.insert_entries = gen6_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; @@ -3129,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) struct drm_i915_private *dev_priv = to_i915(dev); int ret; - ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); + ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); if (!ret) { DRM_ERROR("failed to set up gmch\n"); return -EIO; @@ -3138,7 +3188,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, &ggtt->mappable_base, &ggtt->mappable_end); - ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); + ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm); + ggtt->base.insert_page = i915_ggtt_insert_page; ggtt->base.insert_entries = i915_ggtt_insert_entries; ggtt->base.clear_range = i915_ggtt_clear_range; ggtt->base.bind_vma = ggtt_bind_vma; @@ -3219,14 +3270,6 @@ int i915_ggtt_init_hw(struct drm_device *dev) if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); #endif - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; @@ -3250,9 +3293,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct i915_vma *vma; - bool flush; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); /* First fill our portion of the GTT with scratch pages */ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, @@ -3260,19 +3302,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) /* Cache flush objects bound into GGTT and rebind them. */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - flush = false; list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->vm != &ggtt->base) continue; WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); - - flush = true; } - if (flush) - i915_gem_clflush_object(obj, obj->pin_display); + if (obj->pin_display) + WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); } if (INTEL_INFO(dev)->gen >= 8) { @@ -3398,9 +3437,11 @@ static struct sg_table * intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { + const size_t n_pages = obj->base.size / PAGE_SIZE; unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages_uv; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; struct sg_table *st; @@ -3409,7 +3450,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ - page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, + page_addr_list = drm_malloc_gfp(n_pages, sizeof(dma_addr_t), GFP_TEMPORARY); if (!page_addr_list) @@ -3432,11 +3473,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, /* Populate source page list from the object. */ i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); - i++; - } + for_each_sgt_dma(dma_addr, sgt_iter, obj->pages) + page_addr_list[i++] = dma_addr; + GEM_BUG_ON(i != n_pages); st->nents = 0; sg = st->sgl; @@ -3634,3 +3674,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, return obj->base.size; } } + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (WARN_ON(!vma->obj->map_and_fenceable)) + return ERR_PTR(-ENODEV); + + GEM_BUG_ON(!vma->is_ggtt); + GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); + + ptr = vma->iomap; + if (ptr == NULL) { + ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + vma->node.start, + vma->node.size); + if (ptr == NULL) + return ERR_PTR(-ENOMEM); + + vma->iomap = ptr; + } + + vma->pin_count++; + return ptr; +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0008543d55f6..aa5f31d1c2ed 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -34,6 +34,8 @@ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ +#include <linux/io-mapping.h> + struct drm_i915_file_private; typedef uint32_t gen6_pte_t; @@ -175,6 +177,7 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + void __iomem *iomap; /** Flags and address space this VMA is bound to */ #define GLOBAL_BIND (1<<0) @@ -316,6 +319,11 @@ struct i915_address_space { uint64_t start, uint64_t length, bool use_scratch); + void (*insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level cache_level, + u32 flags); void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, uint64_t start, @@ -382,27 +390,27 @@ struct i915_hw_ppgtt { void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; -/* For each pde iterates over every pde between from start until start + length. - * If start, and start+length are not perfectly divisible, the macro will round - * down, and up as needed. The macro modifies pde, start, and length. Dev is - * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, - * and length = 2G effectively iterates over every PDE in the system. - * - * XXX: temp is not actually needed, but it saves doing the ALIGN operation. +/* + * gen6_for_each_pde() iterates over every pde from start until start+length. + * If start and start+length are not perfectly divisible, the macro will round + * down and up as needed. Start=0 and length=2G effectively iterates over + * every PDE in the system. The macro modifies ALL its parameters except 'pd', + * so each of the other parameters should preferably be a simple variable, or + * at most an lvalue with no side-effects! */ -#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ - for (iter = gen6_pde_index(start); \ - length > 0 && iter < I915_PDES ? \ - (pt = (pd)->page_table[iter]), 1 : 0; \ - iter++, \ - temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ - temp = min_t(unsigned, temp, length), \ - start += temp, length -= temp) - -#define gen6_for_all_pdes(pt, ppgtt, iter) \ - for (iter = 0; \ - pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ - iter++) +#define gen6_for_each_pde(pt, pd, start, length, iter) \ + for (iter = gen6_pde_index(start); \ + length > 0 && iter < I915_PDES && \ + (pt = (pd)->page_table[iter], true); \ + ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) + +#define gen6_for_all_pdes(pt, pd, iter) \ + for (iter = 0; \ + iter < I915_PDES && \ + (pt = (pd)->page_table[iter], true); \ + ++iter) static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) { @@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev); -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); int i915_ppgtt_init_hw(struct drm_device *dev); -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv); @@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } -void i915_check_and_clear_faults(struct drm_device *dev); +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); @@ -560,4 +566,36 @@ size_t i915_ggtt_view_size(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); +/** + * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture + * @vma: VMA to iomap + * + * The passed in VMA has to be pinned in the global GTT mappable region. + * An extra pinning of the VMA is acquired for the return iomapping, + * the caller must call i915_vma_unpin_iomap to relinquish the pinning + * after the iomapping is no longer required. + * + * Callers must hold the struct_mutex. + * + * Returns a valid iomapped pointer or ERR_PTR. + */ +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); + +/** + * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap + * @vma: VMA to unpin + * + * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). + * + * Callers must hold the struct_mutex. This function is only valid to be + * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). + */ +static inline void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + GEM_BUG_ON(vma->pin_count == 0); + GEM_BUG_ON(vma->iomap == NULL); + vma->pin_count--; +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 71611bf21fca..f75bbd67a13a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -29,7 +29,7 @@ #include "intel_renderstate.h" static const struct intel_renderstate_rodata * -render_state_get_rodata(struct drm_device *dev, const int gen) +render_state_get_rodata(const int gen) { switch (gen) { case 6: @@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_init(struct render_state *so, struct drm_device *dev) +static int render_state_init(struct render_state *so, + struct drm_i915_private *dev_priv) { int ret; - so->gen = INTEL_INFO(dev)->gen; - so->rodata = render_state_get_rodata(dev, so->gen); + so->gen = INTEL_GEN(dev_priv); + so->rodata = render_state_get_rodata(so->gen); if (so->rodata == NULL) return 0; if (so->rodata->batch_items * 4 > 4096) return -EINVAL; - so->obj = i915_gem_alloc_object(dev, 4096); - if (so->obj == NULL) - return -ENOMEM; + so->obj = i915_gem_object_create(&dev_priv->drm, 4096); + if (IS_ERR(so->obj)) + return PTR_ERR(so->obj); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); if (ret) @@ -93,6 +94,7 @@ free_gem: static int render_state_setup(struct render_state *so) { + struct drm_device *dev = so->obj->base.dev; const struct intel_renderstate_rodata *rodata = so->rodata; unsigned int i = 0, reloc_index = 0; struct page *page; @@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so) so->aux_batch_offset = i * sizeof(u32); + if (HAS_POOLED_EU(dev)) { + /* + * We always program 3x6 pool config but depending upon which + * subslice is disabled HW drops down to appropriate config + * shown below. + * + * In the below table 2x6 config always refers to + * fused-down version, native 2x6 is not available and can + * be ignored + * + * SNo subslices config eu pool configuration + * ----------------------------------------------------------- + * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) + * 2 ss0 disabled (2x6) - 0x00777000 (3+9) + * 3 ss1 disabled (2x6) - 0x00770000 (6+6) + * 4 ss2 disabled (2x6) - 0x00007000 (9+3) + */ + u32 eu_pool_config = 0x00777000; + + OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); + OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); + OUT_BATCH(d, i, eu_pool_config); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + } + OUT_BATCH(d, i, MI_BATCH_BUFFER_END); so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; @@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, if (WARN_ON(engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, engine->dev); + ret = render_state_init(so, engine->i915); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 66571466e9a8..6f10b421487b 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long count = 0; trace_i915_gem_shrink(dev_priv, target, flags); - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); + + /* + * Unbinding of objects will require HW access; Let us not wake the + * device just to recover a little memory. If absolutely necessary, + * we will force the wake during oom-notifier. + */ + if ((flags & I915_SHRINK_BOUND) && + !intel_runtime_pm_get_if_in_use(dev_priv)) + flags &= ~I915_SHRINK_BOUND; /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, list_splice(&still_in_list, phase->list); } - i915_gem_retire_requests(dev_priv->dev); + if (flags & I915_SHRINK_BOUND) + intel_runtime_pm_put(dev_priv); + + i915_gem_retire_requests(dev_priv); return count; } @@ -245,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_i915_gem_object *obj; unsigned long count; bool unlock; @@ -253,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) if (!i915_gem_shrinker_lock(dev, &unlock)) return 0; + i915_gem_retire_requests(dev_priv); + count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (can_release_pages(obj)) @@ -274,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; unsigned long freed; bool unlock; @@ -309,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, { unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; - while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { + while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) { schedule_timeout_killable(1); if (fatal_signal_pending(current)) return false; @@ -330,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, { dev_priv->mm.interruptible = slu->was_interruptible; if (slu->unlock) - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } static int @@ -345,7 +359,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; + intel_runtime_pm_get(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv); + intel_runtime_pm_put(dev_priv); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -386,17 +402,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct shrinker_lock_uninterruptible slu; - unsigned long freed_pages; + struct i915_vma *vma, *next; + unsigned long freed_pages = 0; + int ret; if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; - freed_pages = i915_gem_shrink(dev_priv, -1UL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE | - I915_SHRINK_VMAPS); + /* Force everything onto the inactive lists */ + ret = i915_gem_wait_for_idle(dev_priv); + if (ret) + goto out; + + intel_runtime_pm_get(dev_priv); + freed_pages += i915_gem_shrink(dev_priv, -1UL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE | + I915_SHRINK_VMAPS); + intel_runtime_pm_put(dev_priv); + + /* We also want to clear any cached iomaps as they wrap vmap */ + list_for_each_entry_safe(vma, next, + &dev_priv->ggtt.base.inactive_list, vm_link) { + unsigned long count = vma->node.size >> PAGE_SHIFT; + if (vma->iomap && i915_vma_unbind(vma) == 0) + freed_pages += count; + } +out: i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); *(unsigned long *)ptr += freed_pages; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 44004e3f09e4..66be299a1486 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -111,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 3) { u32 bsm; - pci_read_config_dword(dev->pdev, BSM, &bsm); + pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm); - base = bsm & BSM_MASK; + base = bsm & INTEL_BSM_MASK; } else if (IS_I865G(dev)) { u16 toud = 0; @@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) void i915_gem_cleanup_stolen(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!drm_mm_initialized(&dev_priv->mm.stolen)) return; @@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (obj->stolen) { i915_gem_stolen_remove_node(dev_priv, obj->stolen); @@ -601,7 +601,7 @@ cleanup: struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_device *dev, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b9bdb34032cd..8030199731db 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (IS_GEN3(obj->base.dev)) { if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { @@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int ret = 0; @@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ if (obj->map_and_fenceable && !i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_gem_object_ggtt_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); if (ret == 0) { if (obj->pages && @@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 32d9726e38b1..2314c88323e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; BUG_ON(obj->userptr.work != NULL); __i915_gem_userptr_set_active(obj, false); @@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_gtt_finish_object(obj); - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return 0; } -int -i915_gem_init_userptr(struct drm_device *dev) +void i915_gem_init_userptr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->mm_lock); hash_init(dev_priv->mm_structs); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 89725c9efc25..9d73d2216adc 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { struct drm_device *dev = error_priv->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_object *obj; int i, j, offset, elt; @@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->ring); i++) @@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } + if (error->ring[i].num_waiters) { + err_printf(m, "%s --- %d waiters\n", + dev_priv->engine[i].name, + error->ring[i].num_waiters); + for (j = 0; j < error->ring[i].num_waiters; j++) { + err_printf(m, " seqno 0x%08x for %s [%d]\n", + error->ring[i].waiters[j].seqno, + error->ring[i].waiters[j].comm, + error->ring[i].waiters[j].pid); + } + } + if ((obj = error->ring[i].ringbuffer)) { err_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->engine[i].name, @@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, hws_page[elt+1], hws_page[elt+2], hws_page[elt+3]); - offset += 16; + offset += 16; } } @@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref) i915_error_object_free(error->ring[i].ringbuffer); i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].ctx); - kfree(error->ring[i].requests); i915_error_object_free(error->ring[i].wa_ctx); + kfree(error->ring[i].requests); + kfree(error->ring[i].waiters); } i915_error_object_free(error->semaphore_obj); @@ -824,19 +837,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, return error_code; } -static void i915_gem_record_fences(struct drm_device *dev, +static void i915_gem_record_fences(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ(FENCE_REG(i)); - } else if (IS_GEN5(dev) || IS_GEN4(dev)) { + } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); } @@ -851,7 +863,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, struct intel_engine_cs *to; enum intel_engine_id id; - if (!i915_semaphore_is_enabled(dev_priv->dev)) + if (!i915_semaphore_is_enabled(dev_priv)) return; if (!error->semaphore_obj) @@ -893,31 +905,71 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, } } -static void i915_record_ring_state(struct drm_device *dev, +static void engine_record_waiters(struct intel_engine_cs *engine, + struct drm_i915_error_ring *ering) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct drm_i915_error_waiter *waiter; + struct rb_node *rb; + int count; + + ering->num_waiters = 0; + ering->waiters = NULL; + + spin_lock(&b->lock); + count = 0; + for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb)) + count++; + spin_unlock(&b->lock); + + waiter = NULL; + if (count) + waiter = kmalloc_array(count, + sizeof(struct drm_i915_error_waiter), + GFP_ATOMIC); + if (!waiter) + return; + + ering->waiters = waiter; + + spin_lock(&b->lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = container_of(rb, typeof(*w), node); + + strcpy(waiter->comm, w->tsk->comm); + waiter->pid = w->tsk->pid; + waiter->seqno = w->seqno; + waiter++; + + if (++ering->num_waiters == count) + break; + } + spin_unlock(&b->lock); +} + +static void i915_record_ring_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) gen8_record_semaphore_state(dev_priv, error, engine, ering); else gen6_record_semaphore_state(dev_priv, engine, ering); } - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } @@ -929,20 +981,20 @@ static void i915_record_ring_state(struct drm_device *dev, ering->instdone = I915_READ(GEN2_INSTDONE); } - ering->waiting = waitqueue_active(&engine->irq_queue); + ering->waiting = intel_engine_has_waiter(engine); ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); ering->acthd = intel_ring_get_active_head(engine); - ering->seqno = engine->get_seqno(engine); + ering->seqno = intel_engine_get_seqno(engine); ering->last_seqno = engine->last_submitted_seqno; ering->start = I915_READ_START(engine); ering->head = I915_READ_HEAD(engine); ering->tail = I915_READ_TAIL(engine); ering->ctl = I915_READ_CTL(engine); - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { default: case RCS: @@ -958,7 +1010,7 @@ static void i915_record_ring_state(struct drm_device *dev, mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(engine->i915)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -971,18 +1023,18 @@ static void i915_record_ring_state(struct drm_device *dev, ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_action = engine->hangcheck.action; - if (USES_PPGTT(dev)) { + if (USES_PPGTT(dev_priv)) { int i; ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE_READ(engine)); - else if (IS_GEN7(dev)) + else if (IS_GEN7(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE(engine)); - else if (INTEL_INFO(dev)->gen >= 8) + else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -998,7 +1050,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_gem_object *obj; /* Currently render ring is the only HW context user */ @@ -1016,34 +1068,33 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, } } -static void i915_gem_record_rings(struct drm_device *dev, +static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *request; int i, count; for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = &dev_priv->engine[i]; - struct intel_ringbuffer *rbuf; error->ring[i].pid = -1; - if (engine->dev == NULL) + if (!intel_engine_initialized(engine)) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, engine, &error->ring[i]); + i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); + engine_record_waiters(engine, &error->ring[i]); request = i915_gem_find_active_request(engine); if (request) { struct i915_address_space *vm; + struct intel_ringbuffer *rb; - vm = request->ctx && request->ctx->ppgtt ? - &request->ctx->ppgtt->base : - &ggtt->base; + vm = request->ctx->ppgtt ? + &request->ctx->ppgtt->base : &ggtt->base; /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten @@ -1070,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_device *dev, } rcu_read_unlock(); } - } - if (i915.enable_execlists) { - /* TODO: This is only a small fix to keep basic error - * capture working, but we need to add more information - * for it to be useful (e.g. dump the context being - * executed). - */ - if (request) - rbuf = request->ctx->engine[engine->id].ringbuf; - else - rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf; - } else - rbuf = engine->buffer; - - error->ring[i].cpu_ring_head = rbuf->head; - error->ring[i].cpu_ring_tail = rbuf->tail; + error->simulated |= + request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE; - error->ring[i].ringbuffer = - i915_error_ggtt_object_create(dev_priv, rbuf->obj); + rb = request->ringbuf; + error->ring[i].cpu_ring_head = rb->head; + error->ring[i].cpu_ring_tail = rb->tail; + error->ring[i].ringbuffer = + i915_error_ggtt_object_create(dev_priv, + rb->obj); + } error->ring[i].hws_page = i915_error_ggtt_object_create(dev_priv, @@ -1234,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i; /* General organization @@ -1301,15 +1343,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); - i915_get_extra_instdone(dev, error->extra_instdone); + i915_get_extra_instdone(dev_priv, error->extra_instdone); } -static void i915_error_capture_msg(struct drm_device *dev, +static void i915_error_capture_msg(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, u32 engine_mask, const char *error_msg) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 ecode; int ring_id = -1, len; @@ -1317,7 +1358,7 @@ static void i915_error_capture_msg(struct drm_device *dev, len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%d:0x%08x", - INTEL_INFO(dev)->gen, ring_id, ecode); + INTEL_GEN(dev_priv), ring_id, ecode); if (ring_id != -1 && error->ring[ring_id].pid != -1) len += scnprintf(error->error_msg + len, @@ -1352,14 +1393,17 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg) { static bool warned; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; + if (READ_ONCE(dev_priv->gpu_error.first_error)) + return; + /* Account for pipe specific data like PIPE*STAT */ error = kzalloc(sizeof(*error), GFP_ATOMIC); if (!error) { @@ -1372,23 +1416,25 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); + i915_gem_record_fences(dev_priv, error); + i915_gem_record_rings(dev_priv, error); do_gettimeofday(&error->time); - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); + error->overlay = intel_overlay_capture_error_state(dev_priv); + error->display = intel_display_capture_error_state(dev_priv); - i915_error_capture_msg(dev, error, engine_mask, error_msg); + i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - if (dev_priv->gpu_error.first_error == NULL) { - dev_priv->gpu_error.first_error = error; - error = NULL; + if (!error->simulated) { + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + if (!dev_priv->gpu_error.first_error) { + dev_priv->gpu_error.first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); } - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); if (error) { i915_error_state_free(&error->ref); @@ -1400,7 +1446,8 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", + dev_priv->drm.primary->index); warned = true; } } @@ -1408,7 +1455,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); spin_lock_irq(&dev_priv->gpu_error.lock); error_priv->error = dev_priv->gpu_error.first_error; @@ -1426,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv) void i915_destroy_error_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_error_state *error; spin_lock_irq(&dev_priv->gpu_error.lock); @@ -1450,17 +1497,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } /* NB: please notice the memset */ -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, + uint32_t *instdone) { - struct drm_i915_private *dev_priv = dev->dev_private; memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - if (IS_GEN2(dev) || IS_GEN3(dev)) + if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) instdone[0] = I915_READ(GEN2_INSTDONE); - else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { + else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN4_INSTDONE1); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index 80786d9f9ad3..cf5a65be4fe0 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -67,11 +67,11 @@ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) +/* Defines WOPCM space available to GuC firmware */ #define GUC_WOPCM_SIZE _MMIO(0xc050) -#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ - /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ -#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) +#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ +#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ #define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index d40c13fb6643..2112e029db6a 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); - /* No HOST2GUC command should take longer than 10ms */ - ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10); + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No HOST2GUC command should ever take longer than 10ms. + */ + ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10); + if (ret) + ret = wait_for(host2guc_action_response(dev_priv, &status), 10); if (status != GUC2HOST_STATUS_SUCCESS) { /* * Either the GuC explicitly returned an error (which @@ -153,13 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, struct i915_guc_client *client) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_device *dev = dev_priv->dev; u32 data[2]; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev) || - NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -175,94 +179,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, * client object which contains the page being used for the doorbell */ -static void guc_init_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int guc_update_doorbell_id(struct intel_guc *guc, + struct i915_guc_client *client, + u16 new_id) { + struct sg_table *sg = guc->ctx_pool_obj->pages; + void *doorbell_bitmap = guc->doorbell_bitmap; struct guc_doorbell_info *doorbell; + struct guc_context_desc desc; + size_t len; doorbell = client->client_base + client->doorbell_offset; - doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = 0; -} - -static int guc_ring_doorbell(struct i915_guc_client *gc) -{ - struct guc_process_desc *desc; - union guc_doorbell_qw db_cmp, db_exc, db_ret; - union guc_doorbell_qw *db; - int attempt = 2, ret = -EAGAIN; - - desc = gc->client_base + gc->proc_desc_offset; - - /* Update the tail so it is visible to GuC */ - desc->tail = gc->wq_tail; - - /* current cookie */ - db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = gc->cookie; - - /* cookie to be updated */ - db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = gc->cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - - /* pointer of current doorbell cacheline */ - db = gc->client_base + gc->doorbell_offset; - - while (attempt--) { - /* lets ring the doorbell */ - db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, - db_cmp.value_qw, db_exc.value_qw); - - /* if the exchange was successfully executed */ - if (db_ret.value_qw == db_cmp.value_qw) { - /* db was successfully rung */ - gc->cookie = db_exc.cookie; - ret = 0; - break; - } + if (client->doorbell_id != GUC_INVALID_DOORBELL_ID && + test_bit(client->doorbell_id, doorbell_bitmap)) { + /* Deactivate the old doorbell */ + doorbell->db_status = GUC_DOORBELL_DISABLED; + (void)host2guc_release_doorbell(guc, client); + __clear_bit(client->doorbell_id, doorbell_bitmap); + } - /* XXX: doorbell was lost and need to acquire it again */ - if (db_ret.db_status == GUC_DOORBELL_DISABLED) - break; + /* Update the GuC's idea of the doorbell ID */ + len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), + sizeof(desc) * client->ctx_index); + if (len != sizeof(desc)) + return -EFAULT; + desc.db_id = new_id; + len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), + sizeof(desc) * client->ctx_index); + if (len != sizeof(desc)) + return -EFAULT; - DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", - db_cmp.cookie, db_ret.cookie); + client->doorbell_id = new_id; + if (new_id == GUC_INVALID_DOORBELL_ID) + return 0; - /* update the cookie to newly read cookie from GuC */ - db_cmp.cookie = db_ret.cookie; - db_exc.cookie = db_ret.cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - } + /* Activate the new doorbell */ + __set_bit(new_id, doorbell_bitmap); + doorbell->cookie = 0; + doorbell->db_status = GUC_DOORBELL_ENABLED; + return host2guc_allocate_doorbell(guc, client); +} - return ret; +static int guc_init_doorbell(struct intel_guc *guc, + struct i915_guc_client *client, + uint16_t db_id) +{ + return guc_update_doorbell_id(guc, client, db_id); } static void guc_disable_doorbell(struct intel_guc *guc, struct i915_guc_client *client) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct guc_doorbell_info *doorbell; - i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); - int value; - - doorbell = client->client_base + client->doorbell_offset; + (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID); - doorbell->db_status = GUC_DOORBELL_DISABLED; + /* XXX: wait for any interrupts */ + /* XXX: wait for workqueue to drain */ +} - I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); +static uint16_t +select_doorbell_register(struct intel_guc *guc, uint32_t priority) +{ + /* + * The bitmap tracks which doorbell registers are currently in use. + * It is split into two halves; the first half is used for normal + * priority contexts, the second half for high-priority ones. + * Note that logically higher priorities are numerically less than + * normal ones, so the test below means "is it high-priority?" + */ + const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); + const uint16_t half = GUC_MAX_DOORBELLS / 2; + const uint16_t start = hi_pri ? half : 0; + const uint16_t end = start + half; + uint16_t id; - value = I915_READ(drbreg); - WARN_ON((value & GEN8_DRB_VALID) != 0); + id = find_next_zero_bit(guc->doorbell_bitmap, end, start); + if (id == end) + id = GUC_INVALID_DOORBELL_ID; - I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); - I915_WRITE(drbreg, 0); + DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", + hi_pri ? "high" : "normal", id); - /* XXX: wait for any interrupts */ - /* XXX: wait for workqueue to drain */ + return id; } /* @@ -289,37 +287,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) return offset; } -static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) -{ - /* - * The bitmap is split into two halves; the first half is used for - * normal priority contexts, the second half for high-priority ones. - * Note that logically higher priorities are numerically less than - * normal ones, so the test below means "is it high-priority?" - */ - const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); - const uint16_t half = GUC_MAX_DOORBELLS / 2; - const uint16_t start = hi_pri ? half : 0; - const uint16_t end = start + half; - uint16_t id; - - id = find_next_zero_bit(guc->doorbell_bitmap, end, start); - if (id == end) - id = GUC_INVALID_DOORBELL_ID; - else - bitmap_set(guc->doorbell_bitmap, id, 1); - - DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", - hi_pri ? "high" : "normal", id); - - return id; -} - -static void release_doorbell(struct intel_guc *guc, uint16_t id) -{ - bitmap_clear(guc->doorbell_bitmap, id, 1); -} - /* * Initialise the process descriptor shared with the GuC firmware. */ @@ -361,10 +328,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc, struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; - struct intel_context *ctx = client->owner; + struct i915_gem_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; - enum intel_engine_id id; u32 gfx_addr; memset(&desc, 0, sizeof(desc)); @@ -374,10 +340,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct drm_i915_gem_object *obj; - uint64_t ctx_desc; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -386,20 +352,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - obj = ctx->engine[id].state; - if (!obj) + if (!ce->state) break; /* XXX: continue? */ - ctx_desc = intel_lr_context_descriptor(ctx, engine); - lrc->context_desc = (u32)ctx_desc; + lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(obj); + gfx_addr = i915_gem_obj_ggtt_offset(ce->state); lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ctx->engine[id].ringbuf->obj; + obj = ce->ringbuf->obj; gfx_addr = i915_gem_obj_ggtt_offset(obj); lrc->ring_begin = gfx_addr; @@ -427,7 +391,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.wq_size = client->wq_size; /* - * XXX: Take LRCs from an existing intel_context if this is not an + * XXX: Take LRCs from an existing context if this is not an * IsKMDCreatedContext client */ desc.desc_private = (uintptr_t)client; @@ -451,47 +415,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -int i915_guc_wq_check_space(struct i915_guc_client *gc) +/** + * i915_guc_wq_check_space() - check that the GuC can accept a request + * @request: request associated with the commands + * + * Return: 0 if space is available + * -EAGAIN if space is not currently available + * + * This function must be called (and must return 0) before a request + * is submitted to the GuC via i915_guc_submit() below. Once a result + * of 0 has been returned, it remains valid until (but only until) + * the next call to submit(). + * + * This precheck allows the caller to determine in advance that space + * will be available for the next submission before committing resources + * to it, and helps avoid late failures with complicated recovery paths. + */ +int i915_guc_wq_check_space(struct drm_i915_gem_request *request) { + const size_t wqi_size = sizeof(struct guc_wq_item); + struct i915_guc_client *gc = request->i915->guc.execbuf_client; struct guc_process_desc *desc; - u32 size = sizeof(struct guc_wq_item); - int ret = -ETIMEDOUT, timeout_counter = 200; + u32 freespace; - if (!gc) - return 0; + GEM_BUG_ON(gc == NULL); desc = gc->client_base + gc->proc_desc_offset; - while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - ret = 0; - break; - } + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + if (likely(freespace >= wqi_size)) + return 0; - if (timeout_counter) - usleep_range(1000, 2000); - }; + gc->no_wq_space += 1; - return ret; + return -EAGAIN; } -static int guc_add_workqueue_item(struct i915_guc_client *gc, - struct drm_i915_gem_request *rq) +static void guc_add_workqueue_item(struct i915_guc_client *gc, + struct drm_i915_gem_request *rq) { + /* wqi_len is in DWords, and does not include the one-word header */ + const size_t wqi_size = sizeof(struct guc_wq_item); + const u32 wqi_len = wqi_size/sizeof(u32) - 1; struct guc_process_desc *desc; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off, space; + u32 freespace, tail, wq_off, wq_page; desc = gc->client_base + gc->proc_desc_offset; - space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); - if (WARN_ON(space < sizeof(struct guc_wq_item))) - return -ENOSPC; /* shouldn't happen */ - /* postincrement WQ tail for next time */ - wq_off = gc->wq_tail; - gc->wq_tail += sizeof(struct guc_wq_item); - gc->wq_tail &= gc->wq_size - 1; + /* Free space is guaranteed, see i915_guc_wq_check_space() above */ + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + GEM_BUG_ON(freespace < wqi_size); + + /* The GuC firmware wants the tail index in QWords, not bytes */ + tail = rq->tail; + GEM_BUG_ON(tail & 7); + tail >>= 3; + GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -500,19 +481,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, * XXX: if not the case, we need save data to a temp wqi and copy it to * workqueue buffer dw by dw. */ - WARN_ON(sizeof(struct guc_wq_item) != 16); - WARN_ON(wq_off & 3); + BUILD_BUG_ON(wqi_size != 16); + + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += wqi_size; + gc->wq_tail &= gc->wq_size - 1; + GEM_BUG_ON(wq_off & (wqi_size - 1)); - /* wq starts from the page after doorbell / process_desc */ - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, - (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT)); + /* WQ starts from the page after doorbell / process_desc */ + wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); - /* len does not include the header */ - wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; + /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wq_len << WQ_LEN_SHIFT) | + (wqi_len << WQ_LEN_SHIFT) | (rq->engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; @@ -520,48 +505,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->engine); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->ringbuf->tail >> 3; wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; - wqi->fence_id = 0; /*XXX: what fence to be here */ + wqi->fence_id = rq->seqno; kunmap_atomic(base); +} - return 0; +static int guc_ring_doorbell(struct i915_guc_client *gc) +{ + struct guc_process_desc *desc; + union guc_doorbell_qw db_cmp, db_exc, db_ret; + union guc_doorbell_qw *db; + int attempt = 2, ret = -EAGAIN; + + desc = gc->client_base + gc->proc_desc_offset; + + /* Update the tail so it is visible to GuC */ + desc->tail = gc->wq_tail; + + /* current cookie */ + db_cmp.db_status = GUC_DOORBELL_ENABLED; + db_cmp.cookie = gc->cookie; + + /* cookie to be updated */ + db_exc.db_status = GUC_DOORBELL_ENABLED; + db_exc.cookie = gc->cookie + 1; + if (db_exc.cookie == 0) + db_exc.cookie = 1; + + /* pointer of current doorbell cacheline */ + db = gc->client_base + gc->doorbell_offset; + + while (attempt--) { + /* lets ring the doorbell */ + db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, + db_cmp.value_qw, db_exc.value_qw); + + /* if the exchange was successfully executed */ + if (db_ret.value_qw == db_cmp.value_qw) { + /* db was successfully rung */ + gc->cookie = db_exc.cookie; + ret = 0; + break; + } + + /* XXX: doorbell was lost and need to acquire it again */ + if (db_ret.db_status == GUC_DOORBELL_DISABLED) + break; + + DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", + db_cmp.cookie, db_ret.cookie); + + /* update the cookie to newly read cookie from GuC */ + db_cmp.cookie = db_ret.cookie; + db_exc.cookie = db_ret.cookie + 1; + if (db_exc.cookie == 0) + db_exc.cookie = 1; + } + + return ret; } /** * i915_guc_submit() - Submit commands through GuC - * @client: the guc client where commands will go through * @rq: request associated with the commands * - * Return: 0 if succeed + * Return: 0 on success, otherwise an errno. + * (Note: nonzero really shouldn't happen!) + * + * The caller must have already called i915_guc_wq_check_space() above + * with a result of 0 (success) since the last request submission. This + * guarantees that there is space in the work queue for the new request, + * so enqueuing the item cannot fail. + * + * Bad Things Will Happen if the caller violates this protocol e.g. calls + * submit() when check() says there's no space, or calls submit() multiple + * times with no intervening check(). + * + * The only error here arises if the doorbell hardware isn't functioning + * as expected, which really shouln't happen. */ -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq) +int i915_guc_submit(struct drm_i915_gem_request *rq) { - struct intel_guc *guc = client->guc; - unsigned int engine_id = rq->engine->guc_id; - int q_ret, b_ret; + unsigned int engine_id = rq->engine->id; + struct intel_guc *guc = &rq->i915->guc; + struct i915_guc_client *client = guc->execbuf_client; + int b_ret; - q_ret = guc_add_workqueue_item(client, rq); - if (q_ret == 0) - b_ret = guc_ring_doorbell(client); + guc_add_workqueue_item(client, rq); + b_ret = guc_ring_doorbell(client); client->submissions[engine_id] += 1; - if (q_ret) { - client->q_fail += 1; - client->retcode = q_ret; - } else if (b_ret) { + client->retcode = b_ret; + if (b_ret) client->b_fail += 1; - client->retcode = q_ret = b_ret; - } else { - client->retcode = 0; - } + guc->submissions[engine_id] += 1; guc->last_seqno[engine_id] = rq->seqno; - return q_ret; + return b_ret; } /* @@ -572,7 +614,7 @@ int i915_guc_submit(struct i915_guc_client *client, /** * gem_allocate_guc_obj() - Allocate gem object for GuC usage - * @dev: drm device + * @dev_priv: driver private data structure * @size: size of object * * This is a wrapper to create a gem obj. In order to use it inside GuC, the @@ -581,14 +623,13 @@ int i915_guc_submit(struct i915_guc_client *client, * * Return: A drm_i915_gem_object if successful, otherwise NULL. */ -static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, - u32 size) +static struct drm_i915_gem_object * +gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - obj = i915_gem_alloc_object(dev, size); - if (!obj) + obj = i915_gem_object_create(&dev_priv->drm, size); + if (IS_ERR(obj)) return NULL; if (i915_gem_object_get_pages(obj)) { @@ -623,10 +664,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj) drm_gem_object_unreference(&obj->base); } -static void guc_client_free(struct drm_device *dev, - struct i915_guc_client *client) +static void +guc_client_free(struct drm_i915_private *dev_priv, + struct i915_guc_client *client) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; if (!client) @@ -639,17 +680,10 @@ static void guc_client_free(struct drm_device *dev, if (client->client_base) { /* - * If we got as far as setting up a doorbell, make sure - * we shut it down before unmapping & deallocating the - * memory. So first disable the doorbell, then tell the - * GuC that we've finished with it, finally deallocate - * it in our bitmap + * If we got as far as setting up a doorbell, make sure we + * shut it down before unmapping & deallocating the memory. */ - if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { - guc_disable_doorbell(guc, client); - host2guc_release_doorbell(guc, client); - release_doorbell(guc, client->doorbell_id); - } + guc_disable_doorbell(guc, client); kunmap(kmap_to_page(client->client_base)); } @@ -664,9 +698,51 @@ static void guc_client_free(struct drm_device *dev, kfree(client); } +/* + * Borrow the first client to set up & tear down every doorbell + * in turn, to ensure that all doorbell h/w is (re)initialised. + */ +static void guc_init_doorbell_hw(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct i915_guc_client *client = guc->execbuf_client; + uint16_t db_id, i; + int err; + + db_id = client->doorbell_id; + + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { + i915_reg_t drbreg = GEN8_DRBREGL(i); + u32 value = I915_READ(drbreg); + + err = guc_update_doorbell_id(guc, client, i); + + /* Report update failure or unexpectedly active doorbell */ + if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED))) + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n", + i, drbreg.reg, value, err); + } + + /* Restore to original value */ + err = guc_update_doorbell_id(guc, client, db_id); + if (err) + DRM_ERROR("Failed to restore doorbell to %d, err %d\n", + db_id, err); + + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { + i915_reg_t drbreg = GEN8_DRBREGL(i); + u32 value = I915_READ(drbreg); + + if (i != db_id && (value & GUC_DOORBELL_ENABLED)) + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n", + i, drbreg.reg, value); + + } +} + /** * guc_client_alloc() - Allocate an i915_guc_client - * @dev: drm device + * @dev_priv: driver private data structure * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, @@ -676,14 +752,15 @@ static void guc_client_free(struct drm_device *dev, * * Return: An i915_guc_client object if success, else NULL. */ -static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, - uint32_t priority, - struct intel_context *ctx) +static struct i915_guc_client * +guc_client_alloc(struct drm_i915_private *dev_priv, + uint32_t priority, + struct i915_gem_context *ctx) { struct i915_guc_client *client; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; struct drm_i915_gem_object *obj; + uint16_t db_id; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) @@ -702,7 +779,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, } /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); + obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE); if (!obj) goto err; @@ -712,6 +789,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; + db_id = select_doorbell_register(guc, client->priority); + if (db_id == GUC_INVALID_DOORBELL_ID) + /* XXX: evict a doorbell instead? */ + goto err; + client->doorbell_offset = select_doorbell_cacheline(guc); /* @@ -724,29 +806,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, else client->proc_desc_offset = (GUC_DB_SIZE / 2); - client->doorbell_id = assign_doorbell(guc, client->priority); - if (client->doorbell_id == GUC_INVALID_DOORBELL_ID) - /* XXX: evict a doorbell instead */ - goto err; - guc_init_proc_desc(guc, client); guc_init_ctx_desc(guc, client); - guc_init_doorbell(guc, client); - - /* XXX: Any cache flushes needed? General domain mgmt calls? */ - - if (host2guc_allocate_doorbell(guc, client)) + if (guc_init_doorbell(guc, client, db_id)) goto err; - DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", - priority, client, client->ctx_index, client->doorbell_id); + DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n", + priority, client, client->ctx_index); + DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", + client->doorbell_id, client->doorbell_offset); return client; err: DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); - guc_client_free(dev, client); + guc_client_free(dev_priv, client); return NULL; } @@ -771,7 +846,7 @@ static void guc_create_log(struct intel_guc *guc) obj = guc->log_obj; if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, size); + obj = gem_allocate_guc_obj(dev_priv, size); if (!obj) { /* logging will be off */ i915.guc_log_level = -1; @@ -831,7 +906,7 @@ static void guc_create_ads(struct intel_guc *guc) obj = guc->ads_obj; if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); + obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size)); if (!obj) return; @@ -885,66 +960,65 @@ static void guc_create_ads(struct intel_guc *guc) * Set up the memory resources to be shared with the GuC. At this point, * we require just one object that can be mapped through the GGTT. */ -int i915_guc_submission_init(struct drm_device *dev) +int i915_guc_submission_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; const size_t ctxsize = sizeof(struct guc_context_desc); const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; const size_t gemsize = round_up(poolsize, PAGE_SIZE); struct intel_guc *guc = &dev_priv->guc; + /* Wipe bitmap & delete client in case of reinitialisation */ + bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); + i915_guc_submission_disable(dev_priv); + if (!i915.enable_guc_submission) return 0; /* not enabled */ if (guc->ctx_pool_obj) return 0; /* already allocated */ - guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); + guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize); if (!guc->ctx_pool_obj) return -ENOMEM; ida_init(&guc->ctx_ids); - guc_create_log(guc); - guc_create_ads(guc); return 0; } -int i915_guc_submission_enable(struct drm_device *dev) +int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ - client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); + client = guc_client_alloc(dev_priv, + GUC_CTX_PRIORITY_KMD_NORMAL, + dev_priv->kernel_context); if (!client) { DRM_ERROR("Failed to create execbuf guc_client\n"); return -ENOMEM; } guc->execbuf_client = client; - host2guc_sample_forcewake(guc, client); + guc_init_doorbell_hw(guc); return 0; } -void i915_guc_submission_disable(struct drm_device *dev) +void i915_guc_submission_disable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - guc_client_free(dev, guc->execbuf_client); + guc_client_free(dev_priv, guc->execbuf_client); guc->execbuf_client = NULL; } -void i915_guc_submission_fini(struct drm_device *dev) +void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; gem_release_guc_obj(dev_priv->guc.ads_obj); @@ -965,12 +1039,12 @@ void i915_guc_submission_fini(struct drm_device *dev) */ int intel_guc_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; @@ -991,12 +1065,12 @@ int intel_guc_suspend(struct drm_device *dev) */ int intel_guc_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index aab47f7bb61b..1c2aec392412 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, dev_priv->gt_irq_mask &= ~interrupt_mask; dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); } void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, mask); + POSTING_READ_FW(GTIMR); } void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) @@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) __gen6_disable_pm_irq(dev_priv, mask); } -void gen6_reset_rps_interrupts(struct drm_device *dev) +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; i915_reg_t reg = gen6_pm_iir(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -349,14 +348,11 @@ void gen6_reset_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -void gen6_enable_rps_interrupts(struct drm_device *dev) +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); - - WARN_ON(dev_priv->rps.pm_iir); - WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); + WARN_ON_ONCE(dev_priv->rps.pm_iir); + WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); dev_priv->rps.interrupts_enabled = true; I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | dev_priv->pm_rps_events); @@ -367,32 +363,13 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) { - /* - * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - * - * TODO: verify if this can be reproduced on VLV,CHV. - */ - if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) - mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; - - if (INTEL_INFO(dev_priv)->gen >= 8) - mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; - - return mask; + return (mask & ~dev_priv->rps.pm_intr_keep); } -void gen6_disable_rps_interrupts(struct drm_device *dev) +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.interrupts_enabled = false; - spin_unlock_irq(&dev_priv->irq_lock); - - cancel_work_sync(&dev_priv->rps.work); - - spin_lock_irq(&dev_priv->irq_lock); I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); @@ -401,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) ~dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); + synchronize_irq(dev_priv->drm.irq); - synchronize_irq(dev->irq); + /* Now that we will not be generating any more work, flush any + * outsanding tasks. As we are called on the RPS idle path, + * we will reset the GPU to minimum frequencies, so the current + * state of the worker can be discarded. + */ + cancel_work_sync(&dev_priv->rps.work); + gen6_reset_rps_interrupts(dev_priv); } /** @@ -582,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, status_mask); else enable_mask = status_mask << 16; @@ -596,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, status_mask); else enable_mask = status_mask << 16; @@ -605,19 +589,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion - * @dev: drm device + * @dev_priv: i915 device private */ -static void i915_enable_asle_pipestat(struct drm_device *dev) +static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) + if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); @@ -685,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) */ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; struct intel_crtc *intel_crtc = @@ -732,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -741,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct drm_display_mode *mode = &crtc->base.hwmode; enum pipe pipe = crtc->pipe; int position, vtotal; @@ -750,7 +732,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; @@ -767,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) * problem. We may need to extend this to include other platforms, * but so far testing only shows the problem on HSW. */ - if (HAS_DDI(dev) && !position) { + if (HAS_DDI(dev_priv) && !position) { int i, temp; for (i = 0; i < 100; i++) { @@ -793,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int position; @@ -835,7 +817,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, if (stime) *stime = ktime_get(); - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ @@ -897,7 +879,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, else position += vtotal - vbl_end; - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { *vpos = position; *hpos = 0; } else { @@ -914,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int intel_get_crtc_scanline(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); unsigned long irqflags; int position; @@ -955,9 +937,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, &crtc->hwmode); } -static void ironlake_rps_change_irq_handler(struct drm_device *dev) +static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; @@ -986,7 +967,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) new_delay = dev_priv->ips.min_delay; } - if (ironlake_set_drps(dev, new_delay)) + if (ironlake_set_drps(dev_priv, new_delay)) dev_priv->ips.cur_delay = new_delay; spin_unlock(&mchdev_lock); @@ -996,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) static void notify_ring(struct intel_engine_cs *engine) { - if (!intel_engine_initialized(engine)) - return; - - trace_i915_gem_request_notify(engine); - engine->user_interrupts++; - - wake_up_all(&engine->irq_queue); + smp_store_mb(engine->breadcrumbs.irq_posted, true); + if (intel_engine_wakeup(engine)) { + trace_i915_gem_request_notify(engine); + engine->breadcrumbs.irq_wakeups++; + } } static void vlv_c0_read(struct drm_i915_private *dev_priv, @@ -1083,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) - if (engine->irq_refcount) + if (intel_engine_has_waiter(engine)) return true; return false; @@ -1104,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work) return; } - /* - * The RPS work is synced during runtime suspend, we don't require a - * wakeref. TODO: instead of disabling the asserts make sure that we - * always hold an RPM reference while the work is running. - */ - DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); - pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ @@ -1123,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work) WARN_ON(pm_iir & ~dev_priv->pm_rps_events); if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - goto out; + return; mutex_lock(&dev_priv->rps.hw_lock); @@ -1175,11 +1147,9 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay += adj; new_delay = clamp_t(int, new_delay, min, max); - intel_set_rps(dev_priv->dev, new_delay); + intel_set_rps(dev_priv, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); -out: - ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } @@ -1205,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work) * In order to prevent a get/put style interface, acquire struct mutex * any time we access those registers. */ - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); /* If we've screwed up tracking, just let the interrupt fire again */ if (WARN_ON(!dev_priv->l3_parity.which_slice)) @@ -1241,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work) parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); parity_event[5] = NULL; - kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, + kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, KOBJ_CHANGE, parity_event); DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", @@ -1261,7 +1231,7 @@ out: gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); spin_unlock_irq(&dev_priv->irq_lock); - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, @@ -1287,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + if (gt_iir & GT_RENDER_USER_INTERRUPT) notify_ring(&dev_priv->engine[RCS]); if (gt_iir & ILK_BSD_USER_INTERRUPT) notify_ring(&dev_priv->engine[VCS]); @@ -1297,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { - - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + if (gt_iir & GT_RENDER_USER_INTERRUPT) notify_ring(&dev_priv->engine[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) notify_ring(&dev_priv->engine[VCS]); @@ -1506,27 +1473,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, } -static void gmbus_irq_handler(struct drm_device *dev) +static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } -static void dp_aux_irq_handler(struct drm_device *dev) +static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } #if defined(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_pipe_crc_entry *entry; int head, tail; @@ -1550,7 +1513,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, entry = &pipe_crc->entries[head]; - entry->frame = dev->driver->get_vblank_counter(dev, pipe); + entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, + pipe); entry->crc[0] = crc0; entry->crc[1] = crc1; entry->crc[2] = crc2; @@ -1566,27 +1530,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, } #else static inline void -display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) {} #endif -static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 0, 0, 0, 0); } -static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), I915_READ(PIPE_CRC_RES_2_IVB(pipe)), I915_READ(PIPE_CRC_RES_3_IVB(pipe)), @@ -1594,22 +1557,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) I915_READ(PIPE_CRC_RES_5_IVB(pipe))); } -static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t res1, res2; - if (INTEL_INFO(dev)->gen >= 3) + if (INTEL_GEN(dev_priv) >= 3) res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); else res1 = 0; - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); else res2 = 0; - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_RED(pipe)), I915_READ(PIPE_CRC_RES_GREEN(pipe)), I915_READ(PIPE_CRC_RES_BLUE(pipe)), @@ -1626,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); if (dev_priv->rps.interrupts_enabled) { dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - queue_work(dev_priv->wq, &dev_priv->rps.work); + schedule_work(&dev_priv->rps.work); } spin_unlock(&dev_priv->irq_lock); } @@ -1643,18 +1606,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) } } -static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) +static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) { - if (!drm_handle_vblank(dev, pipe)) - return false; + bool ret; - return true; + ret = drm_handle_vblank(&dev_priv->drm, pipe); + if (ret) + intel_finish_page_flip_mmio(dev_priv, pipe); + + return ret; } -static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, - u32 pipe_stats[I915_MAX_PIPES]) +static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; spin_lock(&dev_priv->irq_lock); @@ -1710,31 +1676,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, spin_unlock(&dev_priv->irq_lock); } -static void valleyview_pipestat_irq_handler(struct drm_device *dev, +static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); - if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip(dev, pipe); - } + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) + intel_finish_page_flip_cs(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) @@ -1747,12 +1710,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) return hotplug_status; } -static void i9xx_hpd_irq_handler(struct drm_device *dev, +static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) { u32 pin_mask = 0, long_mask = 0; - if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; if (hotplug_trigger) { @@ -1760,11 +1724,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, hotplug_trigger, hpd_status_g4x, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); } else { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; @@ -1772,7 +1736,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, hpd_status_i915, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } } } @@ -1780,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, static irqreturn_t valleyview_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -1831,7 +1795,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1850,9 +1814,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gen6_rps_irq_handler(dev_priv, pm_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1863,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) static irqreturn_t cherryview_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -1911,7 +1875,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1927,9 +1891,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) gen8_gt_irq_handler(dev_priv, gt_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1937,10 +1901,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) return ret; } -static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* @@ -1966,16 +1930,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, pch_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -1985,10 +1948,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_HDCP_MASK) DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); @@ -2018,9 +1981,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); } -static void ivb_err_int_handler(struct drm_device *dev) +static void ivb_err_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 err_int = I915_READ(GEN7_ERR_INT); enum pipe pipe; @@ -2032,19 +1994,18 @@ static void ivb_err_int_handler(struct drm_device *dev) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev)) - ivb_pipe_crc_irq_handler(dev, pipe); + if (IS_IVYBRIDGE(dev_priv)) + ivb_pipe_crc_irq_handler(dev_priv, pipe); else - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); } } I915_WRITE(GEN7_ERR_INT, err_int); } -static void cpt_serr_int_handler(struct drm_device *dev) +static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 serr_int = I915_READ(SERR_INT); if (serr_int & SERR_INT_POISON) @@ -2062,13 +2023,12 @@ static void cpt_serr_int_handler(struct drm_device *dev) I915_WRITE(SERR_INT, serr_int); } -static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2078,10 +2038,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK_CPT) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); @@ -2096,12 +2056,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev); + cpt_serr_int_handler(dev_priv); } -static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -2130,16 +2089,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pin_mask) - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } -static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); @@ -2149,97 +2108,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, ilk_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (de_iir & DE_POISON) DRM_ERROR("Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { u32 pch_iir = I915_READ(SDEIIR); - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); + if (HAS_PCH_CPT(dev_priv)) + cpt_irq_handler(dev_priv, pch_iir); else - ibx_irq_handler(dev, pch_iir); + ibx_irq_handler(dev_priv, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); } - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev); + if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev_priv); } -static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev); + ivb_err_int_handler(dev_priv); if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); for_each_pipe(dev_priv, pipe) { if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = I915_READ(SDEIIR); - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev_priv, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -2257,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -2277,7 +2232,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) * able to process them after we restore SDEIER (as soon as we restore * it, we'll get an interrupt if SDEIIR still has something to process * due to its back queue). */ - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { sde_ier = I915_READ(SDEIER); I915_WRITE(SDEIER, 0); POSTING_READ(SDEIER); @@ -2289,7 +2244,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (gt_iir) { I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) snb_gt_irq_handler(dev_priv, gt_iir); else ilk_gt_irq_handler(dev_priv, gt_iir); @@ -2299,13 +2254,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (de_iir) { I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 7) - ivb_display_irq_handler(dev, de_iir); + if (INTEL_GEN(dev_priv) >= 7) + ivb_display_irq_handler(dev_priv, de_iir); else - ilk_display_irq_handler(dev, de_iir); + ilk_display_irq_handler(dev_priv, de_iir); } - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { I915_WRITE(GEN6_PMIIR, pm_iir); @@ -2316,7 +2271,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { I915_WRITE(SDEIER, sde_ier); POSTING_READ(SDEIER); } @@ -2327,10 +2282,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) return ret; } -static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); @@ -2340,13 +2295,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, bxt_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; @@ -2357,7 +2311,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; if (iir & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); else DRM_ERROR("Unexpected DE Misc interrupt\n"); } @@ -2381,26 +2335,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) GEN9_AUX_CHANNEL_D; if (iir & tmp_mask) { - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); found = true; } if (IS_BROXTON(dev_priv)) { tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; if (tmp_mask) { - bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + bxt_hpd_irq_handler(dev_priv, tmp_mask, + hpd_bxt); found = true; } } else if (IS_BROADWELL(dev_priv)) { tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; if (tmp_mask) { - ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + ilk_hpd_irq_handler(dev_priv, + tmp_mask, hpd_bdw); found = true; } } - if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { - gmbus_irq_handler(dev); + if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { + gmbus_irq_handler(dev_priv); found = true; } @@ -2427,8 +2383,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); flip_done = iir; if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2436,13 +2392,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) else flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (flip_done) + intel_finish_page_flip_cs(dev_priv, pipe); if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); @@ -2459,7 +2413,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) fault_errors); } - if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && + if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && master_ctl & GEN8_DE_PCH_IRQ) { /* * FIXME(BDW): Assume for now that the new interrupt handling @@ -2472,9 +2426,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) - spt_irq_handler(dev, iir); + spt_irq_handler(dev_priv, iir); else - cpt_irq_handler(dev, iir); + cpt_irq_handler(dev_priv, iir); } else { /* * Like on previous PCH there seems to be something @@ -2490,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 master_ctl; u32 gt_iir[4] = {}; irqreturn_t ret; @@ -2521,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return ret; } -static void i915_error_wake_up(struct drm_i915_private *dev_priv, - bool reset_completed) +static void i915_error_wake_up(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *engine; - /* * Notify all waiters for GPU completion events that reset state has * been changed, and that they need to restart their wait after @@ -2534,36 +2485,28 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - for_each_engine(engine, dev_priv) - wake_up_all(&engine->irq_queue); + wake_up_all(&dev_priv->gpu_error.wait_queue); /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ wake_up_all(&dev_priv->pending_flip_queue); - - /* - * Signal tasks blocked in i915_gem_wait_for_error that the pending - * reset state is cleared. - */ - if (reset_completed) - wake_up_all(&dev_priv->gpu_error.reset_queue); } /** * i915_reset_and_wakeup - do process context error handling work - * @dev: drm device + * @dev_priv: i915 device private * * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_reset_and_wakeup(struct drm_device *dev) +static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int ret; - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); /* * Note that there's only one work item which does gpu resets, so we @@ -2577,8 +2520,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ if (i915_reset_in_progress(&dev_priv->gpu_error)) { DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, - reset_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* * In most cases it's guaranteed that we get here with an RPM @@ -2589,7 +2531,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ intel_runtime_pm_get(dev_priv); - intel_prepare_reset(dev); + intel_prepare_reset(dev_priv); /* * All state reset _must_ be completed before we update the @@ -2597,27 +2539,26 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * pending state and not properly drop locks, resulting in * deadlocks with the reset work. */ - ret = i915_reset(dev); + ret = i915_reset(dev_priv); - intel_finish_reset(dev); + intel_finish_reset(dev_priv); intel_runtime_pm_put(dev_priv); if (ret == 0) - kobject_uevent_env(&dev->primary->kdev->kobj, + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); /* * Note: The wake_up also serves as a memory barrier so that * waiters see the update value of the reset counter atomic_t. */ - i915_error_wake_up(dev_priv, true); + wake_up_all(&dev_priv->gpu_error.reset_queue); } } -static void i915_report_and_clear_eir(struct drm_device *dev) +static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); int pipe, i; @@ -2627,9 +2568,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err("render error detected, EIR: 0x%08x\n", eir); - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); @@ -2651,7 +2592,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); @@ -2673,7 +2614,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); for (i = 0; i < ARRAY_SIZE(instdone); i++) pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); @@ -2709,18 +2650,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev) /** * i915_handle_error - handle a gpu error - * @dev: drm device + * @dev_priv: i915 device private * @engine_mask: mask representing engines that are hung * Do some basic checking of register state at error time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). + * @fmt: Error message format string */ -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...) { - struct drm_i915_private *dev_priv = dev->dev_private; va_list args; char error_msg[80]; @@ -2728,8 +2670,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); - i915_capture_error_state(dev, engine_mask, error_msg); - i915_report_and_clear_eir(dev); + i915_capture_error_state(dev_priv, engine_mask, error_msg); + i915_report_and_clear_eir(dev_priv); if (engine_mask) { atomic_or(I915_RESET_IN_PROGRESS_FLAG, @@ -2748,10 +2690,10 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, * ensure that the waiters see the updated value of the reset * counter atomic_t. */ - i915_error_wake_up(dev_priv, false); + i915_error_wake_up(dev_priv); } - i915_reset_and_wakeup(dev); + i915_reset_and_wakeup(dev_priv); } /* Called from drm generic code, passed 'crtc' which @@ -2759,7 +2701,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, */ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2776,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2790,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2803,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2818,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) */ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2830,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2842,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2853,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2869,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) +ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) { - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(engine->i915) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2884,10 +2826,10 @@ static struct intel_engine_cs * semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, u64 offset) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; - if (INTEL_INFO(dev_priv)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { for_each_engine(signaller, dev_priv) { if (engine == signaller) continue; @@ -2916,7 +2858,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2942,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) + if (!ipehr_is_semaphore_wait(engine, ipehr)) return NULL; /* @@ -2954,7 +2896,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) * ringbuffer itself. */ head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; + backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2976,7 +2918,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; offset = ioread32(engine->buffer->virtual_start + head + 8); @@ -2986,7 +2928,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; u32 seqno; @@ -3000,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) return -1; - if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) + if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) return 1; /* cursory check for an unkickable deadlock */ @@ -3028,7 +2970,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) if (engine->id != RCS) return true; - i915_get_extra_instdone(engine->dev, instdone); + i915_get_extra_instdone(engine->i915, instdone); /* There might be unstable subunit states even when * actual head is not moving. Filter out the unstable ones by @@ -3069,8 +3011,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; enum intel_ring_hangcheck_action ha; u32 tmp; @@ -3078,7 +3019,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != HANGCHECK_HUNG) return ha; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -3088,19 +3029,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); @@ -3113,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } -static unsigned kick_waiters(struct intel_engine_cs *engine) +static unsigned long kick_waiters(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = to_i915(engine->dev); - unsigned user_interrupts = READ_ONCE(engine->user_interrupts); + struct drm_i915_private *i915 = engine->i915; + unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); - if (engine->hangcheck.user_interrupts == user_interrupts && + if (engine->hangcheck.user_interrupts == irq_count && !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { - if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) + if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) DRM_ERROR("Hangcheck timer elapsed... %s idle\n", engine->name); - else - DRM_INFO("Fake missed irq on %s\n", - engine->name); - wake_up_all(&engine->irq_queue); + + intel_engine_enable_fake_irq(engine); } - return user_interrupts; + return irq_count; } /* * This is called when the chip hasn't reported back with completed @@ -3144,11 +3083,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); - struct drm_device *dev = dev_priv->dev; struct intel_engine_cs *engine; - enum intel_engine_id id; - int busy_count = 0, rings_hung = 0; - bool stuck[I915_NUM_ENGINES] = { 0 }; + unsigned int hung = 0, stuck = 0; + int busy_count = 0; #define BUSY 1 #define KICK 5 #define HUNG 20 @@ -3157,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915.enable_hangcheck) return; - /* - * The hangcheck work is synced during runtime suspend, we don't - * require a wakeref. TODO: instead of disabling the asserts make - * sure that we hold a reference when this work is running. - */ - DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + if (!READ_ONCE(dev_priv->gt.awake)) + return; /* As enabling the GPU requires fairly extensive mmio access, * periodically arm the mmio checker to see if we are triggering @@ -3170,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + bool busy = intel_engine_has_waiter(engine); u64 acthd; u32 seqno; unsigned user_interrupts; - bool busy = true; semaphore_clear_deadlocks(dev_priv); @@ -3189,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) engine->irq_seqno_barrier(engine); acthd = intel_ring_get_active_head(engine); - seqno = engine->get_seqno(engine); + seqno = intel_engine_get_seqno(engine); /* Reset stuck interrupts between batch advances */ user_interrupts = 0; @@ -3197,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (engine->hangcheck.seqno == seqno) { if (ring_idle(engine, seqno)) { engine->hangcheck.action = HANGCHECK_IDLE; - if (waitqueue_active(&engine->irq_queue)) { + if (busy) { /* Safeguard against driver failure */ user_interrupts = kick_waiters(engine); engine->hangcheck.score += BUSY; - } else - busy = false; + } } else { /* We always increment the hangcheck score * if the ring is busy and still processing @@ -3234,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work) break; case HANGCHECK_HUNG: engine->hangcheck.score += HUNG; - stuck[id] = true; break; } } + + if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { + hung |= intel_engine_flag(engine); + if (engine->hangcheck.action != HANGCHECK_HUNG) + stuck |= intel_engine_flag(engine); + } } else { engine->hangcheck.action = HANGCHECK_ACTIVE; @@ -3262,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work) busy_count += busy; } - for_each_engine_id(engine, dev_priv, id) { - if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { - DRM_INFO("%s on %s\n", - stuck[id] ? "stuck" : "no progress", - engine->name); - rings_hung |= intel_engine_flag(engine); - } - } + if (hung) { + char msg[80]; + int len; - if (rings_hung) { - i915_handle_error(dev, rings_hung, "Engine(s) hung"); - goto out; + /* If some rings hung but others were still busy, only + * blame the hanging rings in the synopsis. + */ + if (stuck != hung) + hung &= ~stuck; + len = scnprintf(msg, sizeof(msg), + "%s on ", stuck == hung ? "No progress" : "Hang"); + for_each_engine_masked(engine, dev_priv, hung) + len += scnprintf(msg + len, sizeof(msg) - len, + "%s, ", engine->name); + msg[len-2] = '\0'; + + return i915_handle_error(dev_priv, hung, msg); } + /* Reset timer in case GPU hangs without another request being added */ if (busy_count) - /* Reset timer case chip hangs without another request - * being added */ - i915_queue_hangcheck(dev); - -out: - ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); -} - -void i915_queue_hangcheck(struct drm_device *dev) -{ - struct i915_gpu_error *e = &to_i915(dev)->gpu_error; - - if (!i915.enable_hangcheck) - return; - - /* Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, - round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); + i915_queue_hangcheck(dev_priv); } static void ibx_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_NOP(dev)) return; @@ -3324,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev) */ static void ibx_irq_pre_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_NOP(dev)) return; @@ -3336,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev) static void gen5_gt_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); GEN5_IRQ_RESET(GT); if (INTEL_INFO(dev)->gen >= 6) @@ -3396,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) */ static void ironlake_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(HWSTAM, 0xffffffff); @@ -3411,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev) static void valleyview_irq_preinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); @@ -3434,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) static void gen8_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; I915_WRITE(GEN8_MASTER_IRQ, 0); @@ -3480,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); } static void cherryview_irq_preinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); @@ -3500,31 +3422,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 intel_hpd_enabled_irqs(struct drm_device *dev, +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(dev, encoder) + for_each_intel_encoder(&dev_priv->drm, encoder) if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } -static void ibx_hpd_irq_setup(struct drm_device *dev) +static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (HAS_PCH_IBX(dev)) { + if (HAS_PCH_IBX(dev_priv)) { hotplug_irqs = SDE_HOTPLUG_MASK; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); } ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3543,18 +3463,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) * When CPU and PCH are on the same package, port A * HPD must be enabled in both north and south. */ - if (HAS_PCH_LPT_LP(dev)) + if (HAS_PCH_LPT_LP(dev_priv)) hotplug |= PORTA_HOTPLUG_ENABLE; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } -static void spt_hpd_irq_setup(struct drm_device *dev) +static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; hotplug_irqs = SDE_HOTPLUG_MASK_SPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3569,24 +3488,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev) I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); } -static void ilk_hpd_irq_setup(struct drm_device *dev) +static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { hotplug_irqs = DE_DP_A_HOTPLUG_IVB; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } else { hotplug_irqs = DE_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } @@ -3601,15 +3519,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev) hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); - ibx_hpd_irq_setup(dev); + ibx_hpd_irq_setup(dev_priv); } -static void bxt_hpd_irq_setup(struct drm_device *dev) +static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3642,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev) static void ibx_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; if (HAS_PCH_NOP(dev)) @@ -3659,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev) static void gen5_gt_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pm_irqs, gt_irqs; pm_irqs = gt_irqs = 0; @@ -3673,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) gt_irqs |= GT_RENDER_USER_INTERRUPT; if (IS_GEN5(dev)) { - gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | - ILK_BSD_USER_INTERRUPT; + gt_irqs |= ILK_BSD_USER_INTERRUPT; } else { gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; } @@ -3696,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 display_mask, extra_mask; if (INTEL_INFO(dev)->gen >= 7) { @@ -3775,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) static int valleyview_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen5_gt_irq_postinstall(dev); @@ -3827,6 +3743,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) uint32_t de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; + u32 de_misc_masked = GEN8_DE_MISC_GSE; enum pipe pipe; if (INTEL_INFO(dev_priv)->gen >= 9) { @@ -3862,11 +3779,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_pipe_enables); GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); + GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); } static int gen8_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_SPLIT(dev)) ibx_irq_pre_postinstall(dev); @@ -3885,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) static int cherryview_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen8_gt_irq_postinstall(dev_priv); @@ -3902,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev) static void gen8_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3912,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3932,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev) static void cherryview_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3952,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev) static void ironlake_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3962,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev) static void i8xx_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; for_each_pipe(dev_priv, pipe) @@ -3974,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4006,13 +3924,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i8xx_handle_vblank(struct drm_device *dev, +static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4027,19 +3944,18 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u16 iir, new_iir; u32 pipe_stats[2]; int pipe; @@ -4089,15 +4005,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, plane, pipe, iir)) + i8xx_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4116,7 +4032,7 @@ out: static void i8xx_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; for_each_pipe(dev_priv, pipe) { @@ -4131,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev) static void i915_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (I915_HAS_HOTPLUG(dev)) { @@ -4149,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev) static int i915_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 enable_mask; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4182,7 +4098,7 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_WRITE(IER, enable_mask); POSTING_READ(IER); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -4197,13 +4113,12 @@ static int i915_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i915_handle_vblank(struct drm_device *dev, +static bool i915_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4218,19 +4133,18 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4273,11 +4187,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) break; /* Consume port. Then clear IIR or we'll miss events */ - if (I915_HAS_HOTPLUG(dev) && + if (I915_HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4288,18 +4202,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, plane, pipe, iir)) + i915_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4307,7 +4221,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4335,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) static void i915_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (I915_HAS_HOTPLUG(dev)) { @@ -4357,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev) static void i965_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); @@ -4373,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev) static int i965_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 enable_mask; u32 error_mask; @@ -4391,7 +4305,7 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask |= I915_USER_INTERRUPT; - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; /* Interrupt setup is already guaranteed to be single-threaded, this is @@ -4406,7 +4320,7 @@ static int i965_irq_postinstall(struct drm_device *dev) * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. */ - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { error_mask = ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV | @@ -4424,26 +4338,25 @@ static int i965_irq_postinstall(struct drm_device *dev) i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); return 0; } -static void i915_hpd_irq_setup(struct drm_device *dev) +static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en; assert_spin_locked(&dev_priv->irq_lock); /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ - hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); + hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); /* Programming the CRT detection parameters tends to generate a spurious hotplug event about three seconds later. So just do it once. */ - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; @@ -4458,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; int ret = IRQ_NONE, pipe; @@ -4510,7 +4423,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4523,24 +4436,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, pipe, pipe, iir)) + i915_handle_vblank(dev_priv, pipe, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4567,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) static void i965_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (!dev_priv) @@ -4597,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev) */ void intel_irq_init(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; intel_hpd_init_work(dev_priv); @@ -4611,6 +4524,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv) else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + dev_priv->rps.pm_intr_keep = 0; + + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; + INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); @@ -4674,12 +4601,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (INTEL_INFO(dev_priv)->gen == 2) { + if (IS_GEN2(dev_priv)) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev_priv)->gen == 3) { + } else if (IS_GEN3(dev_priv)) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; @@ -4717,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ dev_priv->pm.irqs_enabled = true; - return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); + return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); } /** @@ -4729,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - drm_irq_uninstall(dev_priv->dev); + drm_irq_uninstall(&dev_priv->drm); intel_hpd_cancel_work(dev_priv); dev_priv->pm.irqs_enabled = false; } @@ -4743,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { - dev_priv->dev->driver->irq_uninstall(dev_priv->dev); + dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); dev_priv->pm.irqs_enabled = false; - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); } /** @@ -4758,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) { dev_priv->pm.irqs_enabled = true; - dev_priv->dev->driver->irq_preinstall(dev_priv->dev); - dev_priv->dev->driver->irq_postinstall(dev_priv->dev); + dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); + dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 1779f02e6df8..b6e404c91eed 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = { .verbose_state_checks = 1, .nuclear_pageflip = 0, .edp_vswing = 0, - .enable_guc_submission = false, + .enable_guc_loading = 0, + .enable_guc_submission = 0, .guc_log_level = -1, .enable_dp_mst = true, .inject_load_failure = 0, + .enable_dpcd_backlight = false, + .enable_gvt = false, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing, "(0=use value from vbt [default], 1=low power swing(200mV)," "2=default swing(400mV))"); -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); -MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); +module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); +MODULE_PARM_DESC(enable_guc_loading, + "Enable GuC firmware loading " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); +MODULE_PARM_DESC(enable_guc_submission, + "Enable GuC submission " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); module_param_named(guc_log_level, i915.guc_log_level, int, 0400); MODULE_PARM_DESC(guc_log_level, @@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst, module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); MODULE_PARM_DESC(inject_load_failure, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); +module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); +MODULE_PARM_DESC(enable_dpcd_backlight, + "Enable support for DPCD backlight control (default:false)"); + +module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); +MODULE_PARM_DESC(enable_gvt, + "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 02bc27804291..0ad020b4a925 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -45,6 +45,8 @@ struct i915_params { int enable_ips; int invert_brightness; int enable_cmd_parser; + int enable_guc_loading; + int enable_guc_submission; int guc_log_level; int use_mmio_flip; int mmio_debug; @@ -57,10 +59,11 @@ struct i915_params { bool load_detect_test; bool reset; bool disable_display; - bool enable_guc_submission; bool verbose_state_checks; bool nuclear_pageflip; bool enable_dp_mst; + bool enable_dpcd_backlight; + bool enable_gvt; }; extern struct i915_params i915 __read_mostly; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c new file mode 100644 index 000000000000..949c01686a66 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -0,0 +1,503 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/console.h> +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> + +#include "i915_drv.h" + +#define GEN_DEFAULT_PIPEOFFSETS \ + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ + PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ + TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } + +#define GEN_CHV_PIPEOFFSETS \ + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ + CHV_PIPE_C_OFFSET }, \ + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ + CHV_TRANSCODER_C_OFFSET, }, \ + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ + CHV_PALETTE_C_OFFSET } + +#define CURSOR_OFFSETS \ + .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } + +#define IVB_CURSOR_OFFSETS \ + .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } + +#define BDW_COLORS \ + .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } +#define CHV_COLORS \ + .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } + +static const struct intel_device_info intel_i830_info = { + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_845g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i85x_info = { + .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i865g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i915g_info = { + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i915gm_info = { + .gen = 3, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i945g_info = { + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i945gm_info = { + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, + .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i965g_info = { + .gen = 4, .is_broadwater = 1, .num_pipes = 2, + .has_hotplug = 1, + .has_overlay = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i965gm_info = { + .gen = 4, .is_crestline = 1, .num_pipes = 2, + .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, + .has_overlay = 1, + .supports_tv = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_g33_info = { + .gen = 3, .is_g33 = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_g45_info = { + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_gm45_info = { + .gen = 4, .is_g4x = 1, .num_pipes = 2, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .supports_tv = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_pineview_info = { + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_ironlake_d_info = { + .gen = 5, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_ironlake_m_info = { + .gen = 5, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_sandybridge_d_info = { + .gen = 6, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_sandybridge_m_info = { + .gen = 6, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +#define GEN7_FEATURES \ + .gen = 7, .num_pipes = 3, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .has_fbc = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_llc = 1, \ + GEN_DEFAULT_PIPEOFFSETS, \ + IVB_CURSOR_OFFSETS + +static const struct intel_device_info intel_ivybridge_d_info = { + GEN7_FEATURES, + .is_ivybridge = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .is_mobile = 1, +}; + +static const struct intel_device_info intel_ivybridge_q_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .num_pipes = 0, /* legal, last one wins */ +}; + +#define VLV_FEATURES \ + .gen = 7, .num_pipes = 2, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .display_mmio_offset = VLV_DISPLAY_BASE, \ + GEN_DEFAULT_PIPEOFFSETS, \ + CURSOR_OFFSETS + +static const struct intel_device_info intel_valleyview_m_info = { + VLV_FEATURES, + .is_valleyview = 1, + .is_mobile = 1, +}; + +static const struct intel_device_info intel_valleyview_d_info = { + VLV_FEATURES, + .is_valleyview = 1, +}; + +#define HSW_FEATURES \ + GEN7_FEATURES, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .has_ddi = 1, \ + .has_fpga_dbg = 1 + +static const struct intel_device_info intel_haswell_d_info = { + HSW_FEATURES, + .is_haswell = 1, +}; + +static const struct intel_device_info intel_haswell_m_info = { + HSW_FEATURES, + .is_haswell = 1, + .is_mobile = 1, +}; + +#define BDW_FEATURES \ + HSW_FEATURES, \ + BDW_COLORS + +static const struct intel_device_info intel_broadwell_d_info = { + BDW_FEATURES, + .gen = 8, + .is_broadwell = 1, +}; + +static const struct intel_device_info intel_broadwell_m_info = { + BDW_FEATURES, + .gen = 8, .is_mobile = 1, + .is_broadwell = 1, +}; + +static const struct intel_device_info intel_broadwell_gt3d_info = { + BDW_FEATURES, + .gen = 8, + .is_broadwell = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_broadwell_gt3m_info = { + BDW_FEATURES, + .gen = 8, .is_mobile = 1, + .is_broadwell = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_cherryview_info = { + .gen = 8, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .is_cherryview = 1, + .display_mmio_offset = VLV_DISPLAY_BASE, + GEN_CHV_PIPEOFFSETS, + CURSOR_OFFSETS, + CHV_COLORS, +}; + +static const struct intel_device_info intel_skylake_info = { + BDW_FEATURES, + .is_skylake = 1, + .gen = 9, +}; + +static const struct intel_device_info intel_skylake_gt3_info = { + BDW_FEATURES, + .is_skylake = 1, + .gen = 9, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_broxton_info = { + .is_broxton = 1, + .gen = 9, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .num_pipes = 3, + .has_ddi = 1, + .has_fpga_dbg = 1, + .has_fbc = 1, + .has_pooled_eu = 0, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, + BDW_COLORS, +}; + +static const struct intel_device_info intel_kabylake_info = { + BDW_FEATURES, + .is_kabylake = 1, + .gen = 9, +}; + +static const struct intel_device_info intel_kabylake_gt3_info = { + BDW_FEATURES, + .is_kabylake = 1, + .gen = 9, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +/* + * Make sure any device matches here are from most specific to most + * general. For example, since the Quanta match is based on the subsystem + * and subvendor IDs, we need it to come before the more general IVB + * PCI ID matches, otherwise we'll use the wrong info struct above. + */ +static const struct pci_device_id pciidlist[] = { + INTEL_I830_IDS(&intel_i830_info), + INTEL_I845G_IDS(&intel_845g_info), + INTEL_I85X_IDS(&intel_i85x_info), + INTEL_I865G_IDS(&intel_i865g_info), + INTEL_I915G_IDS(&intel_i915g_info), + INTEL_I915GM_IDS(&intel_i915gm_info), + INTEL_I945G_IDS(&intel_i945g_info), + INTEL_I945GM_IDS(&intel_i945gm_info), + INTEL_I965G_IDS(&intel_i965g_info), + INTEL_G33_IDS(&intel_g33_info), + INTEL_I965GM_IDS(&intel_i965gm_info), + INTEL_GM45_IDS(&intel_gm45_info), + INTEL_G45_IDS(&intel_g45_info), + INTEL_PINEVIEW_IDS(&intel_pineview_info), + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), + INTEL_HSW_D_IDS(&intel_haswell_d_info), + INTEL_HSW_M_IDS(&intel_haswell_m_info), + INTEL_VLV_M_IDS(&intel_valleyview_m_info), + INTEL_VLV_D_IDS(&intel_valleyview_d_info), + INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), + INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), + INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), + INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), + INTEL_CHV_IDS(&intel_cherryview_info), + INTEL_SKL_GT1_IDS(&intel_skylake_info), + INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_BXT_IDS(&intel_broxton_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), + INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), + {0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +extern int i915_driver_load(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct intel_device_info *intel_info = + (struct intel_device_info *) ent->driver_data; + + if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { + DRM_INFO("This hardware requires preliminary hardware support.\n" + "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); + return -ENODEV; + } + + /* Only bind to function 0 of the device. Early generations + * used function 1 as a placeholder for multi-head. This causes + * us confusion instead, especially on the systems where both + * functions have the same PCI-ID! + */ + if (PCI_FUNC(pdev->devfn)) + return -ENODEV; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return -EPROBE_DEFER; + + return i915_driver_load(pdev, ent); +} + +extern void i915_driver_unload(struct drm_device *dev); + +static void i915_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + i915_driver_unload(dev); + drm_dev_unref(dev); +} + +extern const struct dev_pm_ops i915_pm_ops; + +static struct pci_driver i915_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = i915_pci_probe, + .remove = i915_pci_remove, + .driver.pm = &i915_pm_ops, +}; + +static int __init i915_init(void) +{ + bool use_kms = true; + + /* + * Enable KMS by default, unless explicitly overriden by + * either the i915.modeset prarameter or by the + * vga_text_mode_force boot option. + */ + + if (i915.modeset == 0) + use_kms = false; + + if (vgacon_text_force() && i915.modeset == -1) + use_kms = false; + + if (!use_kms) { + /* Silently fail loading to not upset userspace. */ + DRM_DEBUG_DRIVER("KMS disabled.\n"); + return 0; + } + + return pci_register_driver(&i915_pci_driver); +} + +static void __exit i915_exit(void) +{ + if (!i915_pci_driver.driver.owner) + return; + + pci_unregister_driver(&i915_pci_driver); +} + +module_init(i915_init); +module_exit(i915_exit); + +MODULE_AUTHOR("Tungsten Graphics, Inc."); +MODULE_AUTHOR("Intel Corporation"); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h new file mode 100644 index 000000000000..c0cb2974caac --- /dev/null +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -0,0 +1,113 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _I915_PVINFO_H_ +#define _I915_PVINFO_H_ + +/* The MMIO offset of the shared info between guest and host emulator */ +#define VGT_PVINFO_PAGE 0x78000 +#define VGT_PVINFO_SIZE 0x1000 + +/* + * The following structure pages are defined in GEN MMIO space + * for virtualization. (One page for now) + */ +#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ +#define VGT_VERSION_MAJOR 1 +#define VGT_VERSION_MINOR 0 + +#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) +#define INTEL_VGT_IF_VERSION \ + INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) + +/* + * notifications from guest to vgpu device model + */ +enum vgt_g2v_type { + VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, + VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, + VGT_G2V_EXECLIST_CONTEXT_CREATE, + VGT_G2V_EXECLIST_CONTEXT_DESTROY, + VGT_G2V_MAX, +}; + +struct vgt_if { + u64 magic; /* VGT_MAGIC */ + uint16_t version_major; + uint16_t version_minor; + u32 vgt_id; /* ID of vGT instance */ + u32 rsv1[12]; /* pad to offset 0x40 */ + /* + * Data structure to describe the balooning info of resources. + * Each VM can only have one portion of continuous area for now. + * (May support scattered resource in future) + * (starting from offset 0x40) + */ + struct { + /* Aperture register balooning */ + struct { + u32 base; + u32 size; + } mappable_gmadr; /* aperture */ + /* GMADR register balooning */ + struct { + u32 base; + u32 size; + } nonmappable_gmadr; /* non aperture */ + /* allowed fence registers */ + u32 fence_num; + u32 rsv2[3]; + } avail_rs; /* available/assigned resource */ + u32 rsv3[0x200 - 24]; /* pad to half page */ + /* + * The bottom half page is for response from Gfx driver to hypervisor. + */ + u32 rsv4; + u32 display_ready; /* ready for display owner switch */ + + u32 rsv5[4]; + + u32 g2v_notify; + u32 rsv6[7]; + + struct { + u32 lo; + u32 hi; + } pdp[4]; + + u32 execlist_context_descriptor_lo; + u32 execlist_context_descriptor_hi; + + u32 rsv7[0x200 - 24]; /* pad to one page */ +} __packed; + +#define vgtif_reg(x) \ + _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))) + +/* vGPU display status to be used by the host side */ +#define VGT_DRV_DISPLAY_NOT_READY 0 +#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ + +#endif /* _I915_PVINFO_H_ */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3fcf7dd5b6ca..ce14fe09d962 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -445,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) +#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) +#define GEN9_MEDIA_POOL_ENABLE (1 << 31) #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) @@ -716,6 +718,9 @@ enum skl_disp_power_wells { /* Not actual bit groups. Used as IDs for lookup_power_well() */ SKL_DISP_PW_ALWAYS_ON, SKL_DISP_PW_DC_OFF, + + BXT_DPIO_CMN_A, + BXT_DPIO_CMN_BC, }; #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) @@ -889,7 +894,7 @@ enum skl_disp_power_wells { * PLLs can be routed to any transcoder A/B/C. * * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is - * digital port D (CHV) or port A (BXT). + * digital port D (CHV) or port A (BXT). :: * * * Dual channel PHY (VLV/CHV/BXT) @@ -1276,6 +1281,15 @@ enum skl_disp_power_wells { #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) +#define _BXT_PHY_CTL_DDI_A 0x64C00 +#define _BXT_PHY_CTL_DDI_B 0x64C10 +#define _BXT_PHY_CTL_DDI_C 0x64C20 +#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10) +#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9) +#define BXT_PHY_LANE_ENABLED (1 << 8) +#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ + _BXT_PHY_CTL_DDI_B) + #define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI 0x64C90 #define COMMON_RESET_DIS (1 << 31) @@ -1672,6 +1686,9 @@ enum skl_disp_power_wells { #define GEN7_TLB_RD_ADDR _MMIO(0x4700) +#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) +#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18) + #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) @@ -2171,6 +2188,9 @@ enum skl_disp_power_wells { #define FBC_LL_SIZE (1536) +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN (1<<30) + /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) #define DPFC_CONTROL _MMIO(0x3208) @@ -2461,6 +2481,8 @@ enum skl_disp_power_wells { #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024) + #define _FPA0 0x6040 #define _FPA1 0x6044 #define _FPB0 0x6048 @@ -3032,6 +3054,18 @@ enum skl_disp_power_wells { /* Same as Haswell, but 72064 bytes now. */ #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) +enum { + INTEL_ADVANCED_CONTEXT = 0, + INTEL_LEGACY_32B_CONTEXT, + INTEL_ADVANCED_AD_CONTEXT, + INTEL_LEGACY_64B_CONTEXT +}; + +#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 +#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\ + INTEL_LEGACY_64B_CONTEXT : \ + INTEL_LEGACY_32B_CONTEXT) + #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 @@ -6045,6 +6079,9 @@ enum skl_disp_power_wells { #define FORCE_ARB_IDLE_PLANES (1 << 14) #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) +#define CHICKEN_PAR2_1 _MMIO(0x42090) +#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14) + #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 #define HSW_FBCQ_DIS (1 << 22) @@ -6084,6 +6121,7 @@ enum skl_disp_power_wells { #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) +#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) @@ -6108,7 +6146,14 @@ enum skl_disp_power_wells { #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define GEN8_L3SQCREG1 _MMIO(0xB100) -#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 +/* + * Note that on CHV the following has an off-by-one error wrt. to BSpec. + * Using the formula in BSpec leads to a hang, while the formula here works + * fine and matches the formulas for all other platforms. A BSpec change + * request has been filed to clarify this. + */ +#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) +#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) #define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C @@ -7028,7 +7073,8 @@ enum skl_disp_power_wells { #define GEN6_RPDEUC _MMIO(0xA084) #define GEN6_RPDEUCSW _MMIO(0xA088) #define GEN6_RC_STATE _MMIO(0xA094) -#define RC6_STATE (1 << 18) +#define RC_SW_TARGET_STATE_SHIFT 16 +#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT) #define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) #define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) #define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) @@ -7042,13 +7088,17 @@ enum skl_disp_power_wells { #define VLV_RCEDATA _MMIO(0xA0BC) #define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) #define GEN6_PMINTRMSK _MMIO(0xA168) -#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) +#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) +#define GEN8_MISC_CTRL0 _MMIO(0xA180) #define VLV_PWRDWNUPCTL _MMIO(0xA294) #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) #define GEN9_PG_ENABLE _MMIO(0xA210) #define GEN9_RENDER_PG_ENABLE (1<<0) #define GEN9_MEDIA_PG_ENABLE (1<<1) +#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) +#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) +#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) #define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) #define PIXEL_OVERLAP_CNT_MASK (3 << 30) @@ -7578,14 +7628,15 @@ enum skl_disp_power_wells { #define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_675_617 (3<<26) -#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) - #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) +#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) +#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) @@ -8161,6 +8212,8 @@ enum skl_disp_power_wells { #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) +#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) +#define BXT_DPHY_DEFEATURE_EN (1 << 8) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 34e061a9ef06..5cfe4c7716b4 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -31,7 +31,7 @@ static void i915_save_display(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Display arbitration control */ if (INTEL_INFO(dev)->gen <= 4) @@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask = 0xffffffff; /* Display arbitration */ @@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev) int i915_save_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; mutex_lock(&dev->struct_mutex); @@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev) int i915_restore_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2d576b7ff299..d61829e54f93 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -38,12 +38,12 @@ static u32 calc_residency(struct drm_device *dev, i915_reg_t reg) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 raw_time; /* 32b value may overflow during fixed point math */ u64 units = 128ULL, div = 100000ULL; u32 ret; - if (!intel_enable_rc6(dev)) + if (!intel_enable_rc6()) return 0; intel_runtime_pm_get(dev_priv); @@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = dev_to_drm_minor(kdev); - return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); } static ssize_t @@ -167,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; - struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(drm_dev); int slice = (int)(uintptr_t)attr->private; int ret; @@ -203,8 +202,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; - struct drm_i915_private *dev_priv = drm_dev->dev_private; - struct intel_context *ctx; + struct drm_i915_private *dev_priv = to_i915(drm_dev); + struct i915_gem_context *ctx; u32 *temp = NULL; /* Just here to make handling failures easy */ int slice = (int)(uintptr_t)attr->private; int ret; @@ -228,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj, } } - ret = i915_gpu_idle(drm_dev); - if (ret) { - kfree(temp); - mutex_unlock(&drm_dev->struct_mutex); - return ret; - } - /* TODO: Ideally we really want a GPU reset here to make sure errors * aren't propagated. Since I cannot find a stable way to reset the GPU * at this point it is left as a TODO. @@ -276,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -310,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -331,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return snprintf(buf, PAGE_SIZE, "%d\n", @@ -342,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -360,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; ssize_t ret; @@ -397,7 +389,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -410,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -428,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; ssize_t ret; @@ -461,7 +453,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -488,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (attr == &dev_attr_gt_RP0_freq_mhz) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index dc0def210097..534154e05fbe 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink, ), TP_fast_assign( - __entry->dev = i915->dev->primary->index; + __entry->dev = i915->drm.primary->index; __entry->target = target; __entry->flags = flags; ), @@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ), TP_fast_assign( - __entry->dev = from->dev->primary->index; + __entry->dev = from->i915->drm.primary->index; __entry->sync_from = from->id; __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); @@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->flags = flags; - i915_trace_irq_get(engine, req); + intel_engine_enable_signaling(req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->engine->dev->primary->index; + __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; @@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -560,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->drm.primary->index; __entry->ring = engine->id; - __entry->seqno = engine->get_seqno(engine); + __entry->seqno = intel_engine_get_seqno(engine); ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->blocking = - mutex_is_locked(&engine->dev->struct_mutex); + mutex_is_locked(&req->i915->drm.struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -740,19 +734,19 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, * the context. */ DECLARE_EVENT_CLASS(i915_context, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx), TP_STRUCT__entry( __field(u32, dev) - __field(struct intel_context *, ctx) + __field(struct i915_gem_context *, ctx) __field(struct i915_address_space *, vm) ), TP_fast_assign( __entry->ctx = ctx; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; - __entry->dev = ctx->i915->dev->primary->index; + __entry->dev = ctx->i915->drm.primary->index; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p", @@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context, ) DEFINE_EVENT(i915_context, i915_context_create, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); DEFINE_EVENT(i915_context, i915_context_free, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); @@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free, * called only if full ppgtt is enabled. */ TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), + TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to), TP_ARGS(engine, to), TP_STRUCT__entry( __field(u32, ring) - __field(struct intel_context *, to) + __field(struct i915_gem_context *, to) __field(struct i915_address_space *, vm) __field(u32, dev) ), @@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->drm.primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index d02efb8cad4d..f6acb5a0e701 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -53,20 +53,19 @@ /** * i915_check_vgpu - detect virtual GPU - * @dev: drm device * + * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_device *dev) +void i915_check_vgpu(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint64_t magic; uint32_t version; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - if (!IS_HASWELL(dev)) + if (!IS_HASWELL(dev_priv)) return; magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); @@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info; * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. */ -void intel_vgt_deballoon(void) +void intel_vgt_deballoon(struct drm_i915_private *dev_priv) { int i; + if (!intel_vgpu_active(dev_priv)) + return; + DRM_DEBUG("VGT deballoon.\n"); for (i = 0; i < 4; i++) { @@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm, * of its graphic space being zero. Yet there are some portions ballooned out( * the shadow part, which are marked as reserved by drm allocator). From the * host point of view, the graphic address space is partitioned by multiple - * vGPUs in different VMs. + * vGPUs in different VMs. :: * * vGPU1 view Host view * 0 ------> +-----------+ +-----------+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * mappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * v |///////////| | Host | + * | |###########| | | + * v |###########| | Host | * +=======+===========+ +===========+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * unmappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * | |///////////| | Host | - * v |///////////| | | + * | |###########| | | + * | |###########| | Host | + * v |###########| | | * total GM size ------> +-----------+ +-----------+ * * Returns: * zero on success, non-zero if configuration invalid or ballooning failed */ -int intel_vgt_balloon(struct drm_device *dev) +int intel_vgt_balloon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; @@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev) unsigned long unmappable_base, unmappable_size, unmappable_end; int ret; + if (!intel_vgpu_active(dev_priv)) + return 0; + mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); @@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev) err: DRM_ERROR("VGT balloon fail\n"); - intel_vgt_deballoon(); + intel_vgt_deballoon(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 3c83b47b5f69..3c3b2d24e830 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -24,94 +24,10 @@ #ifndef _I915_VGPU_H_ #define _I915_VGPU_H_ -/* The MMIO offset of the shared info between guest and host emulator */ -#define VGT_PVINFO_PAGE 0x78000 -#define VGT_PVINFO_SIZE 0x1000 +#include "i915_pvinfo.h" -/* - * The following structure pages are defined in GEN MMIO space - * for virtualization. (One page for now) - */ -#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ -#define VGT_VERSION_MAJOR 1 -#define VGT_VERSION_MINOR 0 - -#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) -#define INTEL_VGT_IF_VERSION \ - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) - -/* - * notifications from guest to vgpu device model - */ -enum vgt_g2v_type { - VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, - VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, - VGT_G2V_EXECLIST_CONTEXT_CREATE, - VGT_G2V_EXECLIST_CONTEXT_DESTROY, - VGT_G2V_MAX, -}; - -struct vgt_if { - uint64_t magic; /* VGT_MAGIC */ - uint16_t version_major; - uint16_t version_minor; - uint32_t vgt_id; /* ID of vGT instance */ - uint32_t rsv1[12]; /* pad to offset 0x40 */ - /* - * Data structure to describe the balooning info of resources. - * Each VM can only have one portion of continuous area for now. - * (May support scattered resource in future) - * (starting from offset 0x40) - */ - struct { - /* Aperture register balooning */ - struct { - uint32_t base; - uint32_t size; - } mappable_gmadr; /* aperture */ - /* GMADR register balooning */ - struct { - uint32_t base; - uint32_t size; - } nonmappable_gmadr; /* non aperture */ - /* allowed fence registers */ - uint32_t fence_num; - uint32_t rsv2[3]; - } avail_rs; /* available/assigned resource */ - uint32_t rsv3[0x200 - 24]; /* pad to half page */ - /* - * The bottom half page is for response from Gfx driver to hypervisor. - */ - uint32_t rsv4; - uint32_t display_ready; /* ready for display owner switch */ - - uint32_t rsv5[4]; - - uint32_t g2v_notify; - uint32_t rsv6[7]; - - struct { - uint32_t lo; - uint32_t hi; - } pdp[4]; - - uint32_t execlist_context_descriptor_lo; - uint32_t execlist_context_descriptor_hi; - - uint32_t rsv7[0x200 - 24]; /* pad to one page */ -} __packed; - -#define vgtif_reg(x) \ - _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)) - -/* vGPU display status to be used by the host side */ -#define VGT_DRV_DISPLAY_NOT_READY 0 -#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ - -extern void i915_check_vgpu(struct drm_device *dev); -extern int intel_vgt_balloon(struct drm_device *dev); -extern void intel_vgt_deballoon(void); +void i915_check_vgpu(struct drm_i915_private *dev_priv); +int intel_vgt_balloon(struct drm_i915_private *dev_priv); +void intel_vgt_deballoon(struct drm_i915_private *dev_priv); #endif /* _I915_VGPU_H_ */ diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 50ff90aea721..c5a166752eda 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev, /* plane scaler case: assign as a plane scaler */ /* find the plane that set the bit as scaler_user */ - plane = drm_state->planes[i]; + plane = drm_state->planes[i].ptr; /* * to enable/disable hq mode, add planes that are using scaler @@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev, continue; } - plane_state = to_intel_plane_state(drm_state->plane_states[i]); + plane_state = intel_atomic_get_existing_plane_state(drm_state, + intel_plane); scaler_id = &plane_state->scaler_id; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 02a7527ce7bb..6700a7be7f78 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc, { if (((mode->clock == TMDS_297M) || (mode->clock == TMDS_296M)) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) return true; else return false; @@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, i915_reg_t reg_elda, uint32_t bits_elda, i915_reg_t reg_edid) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint8_t *eld = connector->eld; uint32_t tmp; int i; @@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, static void g4x_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); uint32_t eldv, tmp; DRM_DEBUG_KMS("Disable audio codec\n"); @@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint8_t *eld = connector->eld; uint32_t eldv; uint32_t tmp; @@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector, static void hsw_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum pipe pipe = intel_crtc->pipe; uint32_t tmp; @@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; I915_WRITE(HSW_AUD_CFG(pipe), tmp); @@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum pipe pipe = intel_crtc->pipe; struct i915_audio_component *acomp = dev_priv->audio_component; @@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); @@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, static void ilk_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); @@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; I915_WRITE(aud_config, tmp); @@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); @@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); @@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct drm_connector *connector; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); enum port port = intel_dig_port->port; @@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /* ELD Conn_Type */ connector->eld[5] &= ~(3 << 2); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(crtc->config)) connector->eld[5] |= (1 << 2); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); enum port port = intel_dig_port->port; @@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev, static int i915_audio_component_get_cdclk_freq(struct device *dev) { struct drm_i915_private *dev_priv = dev_to_i915(dev); - int ret; if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) return -ENODEV; - intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - ret = dev_priv->display.get_display_clock_speed(dev_priv->dev); - - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); - - return ret; + return dev_priv->cdclk_freq; } static int i915_audio_component_sync_audio_rate(struct device *dev, @@ -755,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev, if (WARN_ON(acomp->ops || acomp->dev)) return -EEXIST; - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); acomp->ops = &i915_audio_component_ops; acomp->dev = i915_dev; BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; dev_priv->audio_component = acomp; - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); return 0; } @@ -773,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev, struct i915_audio_component *acomp = data; struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); acomp->ops = NULL; acomp->dev = NULL; dev_priv->audio_component = NULL; - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); } static const struct component_ops i915_audio_component_bind_ops = { @@ -805,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops); + ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ @@ -827,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) if (!dev_priv->audio_component_registered) return; - component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops); + component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); dev_priv->audio_component_registered = false; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b9022fa053d6..c6e69e4cfa83 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -218,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; - ret = intel_opregion_get_panel_type(dev_priv->dev); + ret = intel_opregion_get_panel_type(dev_priv); if (ret >= 0) { WARN_ON(ret > 0xf); panel_type = ret; @@ -323,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, return; } + dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; + if (bdb->version >= 191 && + get_blocksize(backlight_data) >= sizeof(*backlight_data)) { + const struct bdb_lfp_backlight_control_method *method; + + method = &backlight_data->backlight_control[panel_type]; + dev_priv->vbt.backlight.type = method->type; + } + dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; @@ -768,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv, return; } + /* + * These fields are introduced from the VBT version 197 onwards, + * so making sure that these bits are set zero in the previous + * versions. + */ + if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) { + dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0; + dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0; + } + /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } @@ -1407,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) int intel_bios_init(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; const struct vbt_header *vbt = dev_priv->opregion.vbt; const struct bdb_header *bdb; u8 __iomem *bios = NULL; diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index ab0ea315eddb..8405b5a367d7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -30,6 +30,14 @@ #ifndef _INTEL_BIOS_H_ #define _INTEL_BIOS_H_ +enum intel_backlight_type { + INTEL_BACKLIGHT_PMIC, + INTEL_BACKLIGHT_LPSS, + INTEL_BACKLIGHT_DISPLAY_DDI, + INTEL_BACKLIGHT_DSI_DCS, + INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE, +}; + struct edp_power_seq { u16 t1_t3; u16 t8; @@ -113,7 +121,13 @@ struct mipi_config { u16 dual_link:2; u16 lane_cnt:2; u16 pixel_overlap:3; - u16 rsvd3:9; + u16 rgb_flip:1; +#define DL_DCS_PORT_A 0x00 +#define DL_DCS_PORT_C 0x01 +#define DL_DCS_PORT_A_AND_C 0x02 + u16 dl_dcs_cabc_ports:2; + u16 dl_dcs_backlight_ports:2; + u16 rsvd3:4; u16 rsvd4; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c new file mode 100644 index 000000000000..b074f3d6d127 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -0,0 +1,595 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kthread.h> + +#include "i915_drv.h" + +static void intel_breadcrumbs_fake_irq(unsigned long data) +{ + struct intel_engine_cs *engine = (struct intel_engine_cs *)data; + + /* + * The timer persists in case we cannot enable interrupts, + * or if we have previously seen seqno/interrupt incoherency + * ("missed interrupt" syndrome). Here the worker will wake up + * every jiffie in order to kick the oldest waiter to do the + * coherent seqno check. + */ + rcu_read_lock(); + if (intel_engine_wakeup(engine)) + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); + rcu_read_unlock(); +} + +static void irq_enable(struct intel_engine_cs *engine) +{ + /* Enabling the IRQ may miss the generation of the interrupt, but + * we still need to force the barrier before reading the seqno, + * just in case. + */ + engine->breadcrumbs.irq_posted = true; + + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock_irq(&engine->i915->irq_lock); +} + +static void irq_disable(struct intel_engine_cs *engine) +{ + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock_irq(&engine->i915->irq_lock); + + engine->breadcrumbs.irq_posted = false; +} + +static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) +{ + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + struct drm_i915_private *i915 = engine->i915; + + assert_spin_locked(&b->lock); + if (b->rpm_wakelock) + return; + + /* Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. For completeness, + * record an rpm reference for ourselves to cover the + * interrupt we unmask. + */ + intel_runtime_pm_get_noresume(i915); + b->rpm_wakelock = true; + + /* No interrupts? Kick the waiter every jiffie! */ + if (intel_irqs_enabled(i915)) { + if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) + irq_enable(engine); + b->irq_enabled = true; + } + + if (!b->irq_enabled || + test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) + mod_timer(&b->fake_irq, jiffies + 1); + + /* Ensure that even if the GPU hangs, we get woken up. + * + * However, note that if no one is waiting, we never notice + * a gpu hang. Eventually, we will have to wait for a resource + * held by the GPU and so trigger a hangcheck. In the most + * pathological case, this will be upon memory starvation! + */ + i915_queue_hangcheck(i915); +} + +static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) +{ + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + + assert_spin_locked(&b->lock); + if (!b->rpm_wakelock) + return; + + if (b->irq_enabled) { + irq_disable(engine); + b->irq_enabled = false; + } + + intel_runtime_pm_put(engine->i915); + b->rpm_wakelock = false; +} + +static inline struct intel_wait *to_wait(struct rb_node *node) +{ + return container_of(node, struct intel_wait, node); +} + +static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, + struct intel_wait *wait) +{ + assert_spin_locked(&b->lock); + + /* This request is completed, so remove it from the tree, mark it as + * complete, and *then* wake up the associated task. + */ + rb_erase(&wait->node, &b->waiters); + RB_CLEAR_NODE(&wait->node); + + wake_up_process(wait->tsk); /* implicit smp_wmb() */ +} + +static bool __intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node **p, *parent, *completed; + bool first; + u32 seqno; + + /* Insert the request into the retirement ordered list + * of waiters by walking the rbtree. If we are the oldest + * seqno in the tree (the first to be retired), then + * set ourselves as the bottom-half. + * + * As we descend the tree, prune completed branches since we hold the + * spinlock we know that the first_waiter must be delayed and can + * reduce some of the sequential wake up latency if we take action + * ourselves and wake up the completed tasks in parallel. Also, by + * removing stale elements in the tree, we may be able to reduce the + * ping-pong between the old bottom-half and ourselves as first-waiter. + */ + first = true; + parent = NULL; + completed = NULL; + seqno = intel_engine_get_seqno(engine); + + /* If the request completed before we managed to grab the spinlock, + * return now before adding ourselves to the rbtree. We let the + * current bottom-half handle any pending wakeups and instead + * try and get out of the way quickly. + */ + if (i915_seqno_passed(seqno, wait->seqno)) { + RB_CLEAR_NODE(&wait->node); + return first; + } + + p = &b->waiters.rb_node; + while (*p) { + parent = *p; + if (wait->seqno == to_wait(parent)->seqno) { + /* We have multiple waiters on the same seqno, select + * the highest priority task (that with the smallest + * task->prio) to serve as the bottom-half for this + * group. + */ + if (wait->tsk->prio > to_wait(parent)->tsk->prio) { + p = &parent->rb_right; + first = false; + } else { + p = &parent->rb_left; + } + } else if (i915_seqno_passed(wait->seqno, + to_wait(parent)->seqno)) { + p = &parent->rb_right; + if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) + completed = parent; + else + first = false; + } else { + p = &parent->rb_left; + } + } + rb_link_node(&wait->node, parent, p); + rb_insert_color(&wait->node, &b->waiters); + GEM_BUG_ON(!first && !b->irq_seqno_bh); + + if (completed) { + struct rb_node *next = rb_next(completed); + + GEM_BUG_ON(!next && !first); + if (next && next != &wait->node) { + GEM_BUG_ON(first); + b->first_wait = to_wait(next); + smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + /* As there is a delay between reading the current + * seqno, processing the completed tasks and selecting + * the next waiter, we may have missed the interrupt + * and so need for the next bottom-half to wakeup. + * + * Also as we enable the IRQ, we may miss the + * interrupt for that seqno, so we have to wake up + * the next bottom-half in order to do a coherent check + * in case the seqno passed. + */ + __intel_breadcrumbs_enable_irq(b); + if (READ_ONCE(b->irq_posted)) + wake_up_process(to_wait(next)->tsk); + } + + do { + struct intel_wait *crumb = to_wait(completed); + completed = rb_prev(completed); + __intel_breadcrumbs_finish(b, crumb); + } while (completed); + } + + if (first) { + GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); + b->first_wait = wait; + smp_store_mb(b->irq_seqno_bh, wait->tsk); + /* After assigning ourselves as the new bottom-half, we must + * perform a cursory check to prevent a missed interrupt. + * Either we miss the interrupt whilst programming the hardware, + * or if there was a previous waiter (for a later seqno) they + * may be woken instead of us (due to the inherent race + * in the unlocked read of b->irq_seqno_bh in the irq handler) + * and so we miss the wake up. + */ + __intel_breadcrumbs_enable_irq(b); + } + GEM_BUG_ON(!b->irq_seqno_bh); + GEM_BUG_ON(!b->first_wait); + GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); + + return first; +} + +bool intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + bool first; + + spin_lock(&b->lock); + first = __intel_engine_add_wait(engine, wait); + spin_unlock(&b->lock); + + return first; +} + +void intel_engine_enable_fake_irq(struct intel_engine_cs *engine) +{ + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); +} + +static inline bool chain_wakeup(struct rb_node *rb, int priority) +{ + return rb && to_wait(rb)->tsk->prio <= priority; +} + +static inline int wakeup_priority(struct intel_breadcrumbs *b, + struct task_struct *tsk) +{ + if (tsk == b->signaler) + return INT_MIN; + else + return tsk->prio; +} + +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + /* Quick check to see if this waiter was already decoupled from + * the tree by the bottom-half to avoid contention on the spinlock + * by the herd. + */ + if (RB_EMPTY_NODE(&wait->node)) + return; + + spin_lock(&b->lock); + + if (RB_EMPTY_NODE(&wait->node)) + goto out_unlock; + + if (b->first_wait == wait) { + const int priority = wakeup_priority(b, wait->tsk); + struct rb_node *next; + + GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); + + /* We are the current bottom-half. Find the next candidate, + * the first waiter in the queue on the remaining oldest + * request. As multiple seqnos may complete in the time it + * takes us to wake up and find the next waiter, we have to + * wake up that waiter for it to perform its own coherent + * completion check. + */ + next = rb_next(&wait->node); + if (chain_wakeup(next, priority)) { + /* If the next waiter is already complete, + * wake it up and continue onto the next waiter. So + * if have a small herd, they will wake up in parallel + * rather than sequentially, which should reduce + * the overall latency in waking all the completed + * clients. + * + * However, waking up a chain adds extra latency to + * the first_waiter. This is undesirable if that + * waiter is a high priority task. + */ + u32 seqno = intel_engine_get_seqno(engine); + + while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { + struct rb_node *n = rb_next(next); + + __intel_breadcrumbs_finish(b, to_wait(next)); + next = n; + if (!chain_wakeup(next, priority)) + break; + } + } + + if (next) { + /* In our haste, we may have completed the first waiter + * before we enabled the interrupt. Do so now as we + * have a second waiter for a future seqno. Afterwards, + * we have to wake up that waiter in case we missed + * the interrupt, or if we have to handle an + * exception rather than a seqno completion. + */ + b->first_wait = to_wait(next); + smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + if (b->first_wait->seqno != wait->seqno) + __intel_breadcrumbs_enable_irq(b); + wake_up_process(b->irq_seqno_bh); + } else { + b->first_wait = NULL; + WRITE_ONCE(b->irq_seqno_bh, NULL); + __intel_breadcrumbs_disable_irq(b); + } + } else { + GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); + } + + GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); + rb_erase(&wait->node, &b->waiters); + +out_unlock: + GEM_BUG_ON(b->first_wait == wait); + GEM_BUG_ON(rb_first(&b->waiters) != + (b->first_wait ? &b->first_wait->node : NULL)); + GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); + spin_unlock(&b->lock); +} + +static bool signal_complete(struct drm_i915_gem_request *request) +{ + if (!request) + return false; + + /* If another process served as the bottom-half it may have already + * signalled that this wait is already completed. + */ + if (intel_wait_complete(&request->signaling.wait)) + return true; + + /* Carefully check if the request is complete, giving time for the + * seqno to be visible or if the GPU hung. + */ + if (__i915_request_irq_complete(request)) + return true; + + return false; +} + +static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) +{ + return container_of(rb, struct drm_i915_gem_request, signaling.node); +} + +static void signaler_set_rtpriority(void) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); +} + +static int intel_breadcrumbs_signaler(void *arg) +{ + struct intel_engine_cs *engine = arg; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct drm_i915_gem_request *request; + + /* Install ourselves with high priority to reduce signalling latency */ + signaler_set_rtpriority(); + + do { + set_current_state(TASK_INTERRUPTIBLE); + + /* We are either woken up by the interrupt bottom-half, + * or by a client adding a new signaller. In both cases, + * the GPU seqno may have advanced beyond our oldest signal. + * If it has, propagate the signal, remove the waiter and + * check again with the next oldest signal. Otherwise we + * need to wait for a new interrupt from the GPU or for + * a new client. + */ + request = READ_ONCE(b->first_signal); + if (signal_complete(request)) { + /* Wake up all other completed waiters and select the + * next bottom-half for the next user interrupt. + */ + intel_engine_remove_wait(engine, + &request->signaling.wait); + + /* Find the next oldest signal. Note that as we have + * not been holding the lock, another client may + * have installed an even older signal than the one + * we just completed - so double check we are still + * the oldest before picking the next one. + */ + spin_lock(&b->lock); + if (request == b->first_signal) { + struct rb_node *rb = + rb_next(&request->signaling.node); + b->first_signal = rb ? to_signaler(rb) : NULL; + } + rb_erase(&request->signaling.node, &b->signals); + spin_unlock(&b->lock); + + i915_gem_request_unreference(request); + } else { + if (kthread_should_stop()) + break; + + schedule(); + } + } while (1); + __set_current_state(TASK_RUNNING); + + return 0; +} + +void intel_engine_enable_signaling(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node *parent, **p; + bool first, wakeup; + + if (unlikely(READ_ONCE(request->signaling.wait.tsk))) + return; + + spin_lock(&b->lock); + if (unlikely(request->signaling.wait.tsk)) { + wakeup = false; + goto unlock; + } + + request->signaling.wait.tsk = b->signaler; + request->signaling.wait.seqno = request->seqno; + i915_gem_request_reference(request); + + /* First add ourselves into the list of waiters, but register our + * bottom-half as the signaller thread. As per usual, only the oldest + * waiter (not just signaller) is tasked as the bottom-half waking + * up all completed waiters after the user interrupt. + * + * If we are the oldest waiter, enable the irq (after which we + * must double check that the seqno did not complete). + */ + wakeup = __intel_engine_add_wait(engine, &request->signaling.wait); + + /* Now insert ourselves into the retirement ordered list of signals + * on this engine. We track the oldest seqno as that will be the + * first signal to complete. + */ + parent = NULL; + first = true; + p = &b->signals.rb_node; + while (*p) { + parent = *p; + if (i915_seqno_passed(request->seqno, + to_signaler(parent)->seqno)) { + p = &parent->rb_right; + first = false; + } else { + p = &parent->rb_left; + } + } + rb_link_node(&request->signaling.node, parent, p); + rb_insert_color(&request->signaling.node, &b->signals); + if (first) + smp_store_mb(b->first_signal, request); + +unlock: + spin_unlock(&b->lock); + + if (wakeup) + wake_up_process(b->signaler); +} + +int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct task_struct *tsk; + + spin_lock_init(&b->lock); + setup_timer(&b->fake_irq, + intel_breadcrumbs_fake_irq, + (unsigned long)engine); + + /* Spawn a thread to provide a common bottom-half for all signals. + * As this is an asynchronous interface we cannot steal the current + * task for handling the bottom-half to the user interrupt, therefore + * we create a thread to do the coherent seqno dance after the + * interrupt and then signal the waitqueue (via the dma-buf/fence). + */ + tsk = kthread_run(intel_breadcrumbs_signaler, engine, + "i915/signal:%d", engine->id); + if (IS_ERR(tsk)) + return PTR_ERR(tsk); + + b->signaler = tsk; + + return 0; +} + +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + if (!IS_ERR_OR_NULL(b->signaler)) + kthread_stop(b->signaler); + + del_timer_sync(&b->fake_irq); +} + +unsigned int intel_kick_waiters(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int mask = 0; + + /* To avoid the task_struct disappearing beneath us as we wake up + * the process, we must first inspect the task_struct->state under the + * RCU lock, i.e. as we call wake_up_process() we must be holding the + * rcu_read_lock(). + */ + rcu_read_lock(); + for_each_engine(engine, i915) + if (unlikely(intel_engine_wakeup(engine))) + mask |= intel_engine_flag(engine); + rcu_read_unlock(); + + return mask; +} + +unsigned int intel_kick_signalers(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int mask = 0; + + for_each_engine(engine, i915) { + if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { + wake_up_process(engine->breadcrumbs.signaler); + mask |= intel_engine_flag(engine); + } + } + + return mask; +} diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 1b3f97449395..bc0fef3d3335 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) { struct drm_crtc *crtc = crtc_state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int i, pipe = intel_crtc->pipe; uint16_t coeffs[9] = { 0, }; @@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = to_intel_crtc(crtc)->pipe; uint32_t mode; @@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) void intel_color_set_csc(struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->display.load_csc_matrix) dev_priv->display.load_csc_matrix(crtc_state); @@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc, struct drm_property_blob *blob) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; int i; if (HAS_GMCH_DISPLAY(dev)) { - if (intel_crtc->config->has_dsi_encoder) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state) { struct drm_crtc *crtc = crtc_state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state); @@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *intel_state = to_intel_crtc_state(state); enum pipe pipe = to_intel_crtc(crtc)->pipe; uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; @@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; struct drm_color_lut *lut; uint32_t i, lut_size; @@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) void intel_color_load_luts(struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->display.load_luts(crtc_state); } @@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc, void intel_color_init(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); drm_mode_crtc_set_gamma_size(crtc, 256); @@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc) /* Enable color management support when we have degamma & gamma LUTs. */ if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && INTEL_INFO(dev)->color.gamma_lut_size != 0) - drm_helper_crtc_enable_color_mgmt(crtc, + drm_crtc_enable_color_mgmt(crtc, INTEL_INFO(dev)->color.degamma_lut_size, + true, INTEL_INFO(dev)->color.gamma_lut_size); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3fbb6fc66451..827b6ef4e9ae 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); enum intel_display_power_domain power_domain; u32 tmp; @@ -98,7 +98,7 @@ out: static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; @@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; @@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; @@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { @@ -326,11 +328,26 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + bool reenable_hpd; u32 adpa; bool ret; u32 save_adpa; + /* + * Doing a force trigger causes a hpd interrupt to get sent, which can + * get us stuck in a loop if we're polling: + * - We enable power wells and reset the ADPA + * - output_poll_exec does force probe on VGA, triggering a hpd + * - HPD handler waits for poll to unlock dev->mode_config.mutex + * - output_poll_exec shuts off the ADPA, unlocks + * dev->mode_config.mutex + * - HPD handler runs, resets ADPA and brings us back to the start + * + * Just disable HPD interrupts here to prevent this + */ + reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); + save_adpa = adpa = I915_READ(crt->adpa_reg); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); @@ -338,8 +355,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) { + if (intel_wait_for_register(dev_priv, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) { DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); I915_WRITE(crt->adpa_reg, save_adpa); } @@ -353,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); + if (reenable_hpd) + intel_hpd_enable(dev_priv, crt->base.hpd_pin); + return ret; } @@ -367,7 +389,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 stat; bool ret = false; int i, tries = 0; @@ -394,9 +416,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (wait_for((I915_READ(PORT_HOTPLUG_EN) & - CRT_HOTPLUG_FORCE_DETECT) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN, + CRT_HOTPLUG_FORCE_DETECT, 0, + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); } @@ -449,7 +471,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; @@ -485,7 +507,7 @@ static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) { struct drm_device *dev = crt->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t save_bclrpat; uint32_t save_vtotal; uint32_t vtotal, vactive; @@ -600,7 +622,7 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; enum intel_display_power_domain power_domain; @@ -681,7 +703,7 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; enum intel_display_power_domain power_domain; @@ -713,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector, return 0; } -static void intel_crt_reset(struct drm_connector *connector) +void intel_crt_reset(struct drm_encoder *encoder) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crt *crt = intel_attached_crt(connector); + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); if (INTEL_INFO(dev)->gen >= 5) { u32 adpa; @@ -739,10 +761,11 @@ static void intel_crt_reset(struct drm_connector *connector) */ static const struct drm_connector_funcs intel_crt_connector_funcs = { - .reset = intel_crt_reset, .dpms = drm_atomic_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -753,10 +776,10 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { + .reset = intel_crt_reset, .destroy = intel_encoder_destroy, }; @@ -791,7 +814,7 @@ void intel_crt_init(struct drm_device *dev) struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t adpa_reg; u32 adpa; @@ -839,7 +862,7 @@ void intel_crt_init(struct drm_device *dev) &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC, NULL); + DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); @@ -876,12 +899,9 @@ void intel_crt_init(struct drm_device *dev) crt->base.get_hw_state = intel_crt_get_hw_state; } intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - drm_connector_register(connector); - if (!I915_HAS_HOTPLUG(dev)) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; @@ -902,5 +922,5 @@ void intel_crt_init(struct drm_device *dev) dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; } - intel_crt_reset(connector); + intel_crt_reset(&crt->base.base); } diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 2b3b428d9cd2..3edb9580928e 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t i; uint32_t *dmc_payload; - uint32_t required_min_version; + uint32_t required_version; if (!fw) return NULL; @@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; if (IS_KABYLAKE(dev_priv)) { - required_min_version = KBL_CSR_VERSION_REQUIRED; + required_version = KBL_CSR_VERSION_REQUIRED; } else if (IS_SKYLAKE(dev_priv)) { - required_min_version = SKL_CSR_VERSION_REQUIRED; + required_version = SKL_CSR_VERSION_REQUIRED; } else if (IS_BROXTON(dev_priv)) { - required_min_version = BXT_CSR_VERSION_REQUIRED; + required_version = BXT_CSR_VERSION_REQUIRED; } else { MISSING_CASE(INTEL_REVID(dev_priv)); - required_min_version = 0; + required_version = 0; } - if (csr->version < required_min_version) { - DRM_INFO("Refusing to load old DMC firmware v%u.%u," - " please upgrade to v%u.%u or later" - " [" FIRMWARE_URL "].\n", + if (csr->version != required_version) { + DRM_INFO("Refusing to load DMC firmware v%u.%u," + " please use v%u.%u [" FIRMWARE_URL "].\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version), - CSR_VERSION_MAJOR(required_min_version), - CSR_VERSION_MINOR(required_min_version)); + CSR_VERSION_MAJOR(required_version), + CSR_VERSION_MINOR(required_version)); return NULL; } @@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work) csr = &dev_priv->csr; ret = request_firmware(&fw, dev_priv->csr.fw_path, - &dev_priv->dev->pdev->dev); + &dev_priv->drm.pdev->dev); if (fw) dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); @@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work) CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - dev_notice(dev_priv->dev->dev, + dev_notice(dev_priv->drm.dev, "Failed to load DMC firmware" " [" FIRMWARE_URL "]," " disabling runtime power management.\n"); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 01e523df363b..dd1d6fe12297 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, default: WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); /* fallthrough and treat as unknown */ - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_UNKNOWN: @@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) ddi_translations = ddi_translations_edp; size = n_edp_entries; break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; @@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, void hsw_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; u32 temp, i, rx_ctl_val; @@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) if (pipe_config->has_pch_encoder) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->fdi_m_n); - else if (pipe_config->has_dp_encoder) + else if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) @@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) static void skl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; uint32_t dpll_ctl1, dpll; @@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, static void hsw_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; u32 val, pll; @@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, { struct intel_shared_dpll *pll; struct intel_dpll_hw_state *state; - intel_clock_t clock; + struct dpll clock; /* For DDI ports we always use a shared PLL. */ if (WARN_ON(dpll == DPLL_ID_PRIVATE)) @@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, static void bxt_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_ddi_get_encoder_port(encoder); uint32_t dpll = port; @@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; int type = intel_encoder->type; uint32_t temp; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { WARN_ON(transcoder_is_dsi(cpu_transcoder)); temp = TRANS_MSA_SYNC_CLK; @@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; uint32_t temp; temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); @@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; enum port port = intel_ddi_get_encoder_port(intel_encoder); @@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) temp |= TRANS_DDI_MODE_SELECT_FDI; temp |= (intel_crtc->config->fdi_lanes - 1) << 1; - } else if (type == INTEL_OUTPUT_DISPLAYPORT || + } else if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder = intel_connector->encoder; int type = intel_connector->base.connector_type; enum port port = intel_ddi_get_encoder_port(intel_encoder); @@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(encoder); enum intel_display_power_domain power_domain; u32 tmp; @@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); out: + if (ret && IS_BROXTON(dev_priv)) { + tmp = I915_READ(BXT_PHY_CTL(port)); + if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | + BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) + DRM_ERROR("Port %c enabled but PHY powered down? " + "(PHY_CTL %08x)\n", port_name(port), tmp); + } + intel_display_power_put(dev_priv, power_domain); return ret; @@ -1351,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -1363,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) { - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; if (cpu_transcoder != TRANSCODER_EDP) @@ -1384,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; - if (type == INTEL_OUTPUT_DISPLAYPORT) { + if (type == INTEL_OUTPUT_DP) { if (dp_iboost) { iboost = dp_iboost; } else { @@ -1442,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); ddi_translations = bxt_ddi_translations_edp; - } else if (type == INTEL_OUTPUT_DISPLAYPORT + } else if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); ddi_translations = bxt_ddi_translations_dp; @@ -1616,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_ddi_clk_select(intel_encoder, crtc->config); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_set_link_params(intel_dp, crtc->config); @@ -1640,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; uint32_t val; @@ -1661,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (wait) intel_wait_ddi_buf_idle(dev_priv, port); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); intel_edp_panel_vdd_on(intel_dp); @@ -1687,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; @@ -1726,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int type = intel_encoder->type; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (intel_crtc->config->has_audio) { intel_audio_codec_disable(intel_encoder); @@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) } } -static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { + enum port port; + if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) return false; @@ -1770,38 +1780,51 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, return false; } + for_each_port_masked(port, + phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) : + BIT(PORT_A)) { + u32 tmp = I915_READ(BXT_PHY_CTL(port)); + + if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { + DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " + "for port %c powered down " + "(PHY_CTL %08x)\n", + phy, port_name(port), tmp); + + return false; + } + } + return true; } -static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) { u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; } -static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) + if (intel_wait_for_register(dev_priv, + BXT_PORT_REF_DW3(phy), + GRC_DONE, GRC_DONE, + 10)) DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); } -static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy); - -static void broxton_phy_init(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - enum port port; - u32 ports, val; + u32 val; - if (broxton_phy_is_enabled(dev_priv, phy)) { + if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { /* Still read out the GRC value for state verification */ if (phy == DPIO_PHY0) - dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); + dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy); - if (broxton_phy_verify_state(dev_priv, phy)) { + if (bxt_ddi_phy_verify_state(dev_priv, phy)) { DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " "won't reprogram it\n", phy); @@ -1810,8 +1833,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " "force reprogramming it\n", phy); - } else { - DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy); } val = I915_READ(BXT_P_CR_GT_DISP_PWRON); @@ -1831,28 +1852,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, DRM_ERROR("timeout during PHY%d power on\n", phy); } - if (phy == DPIO_PHY0) - ports = BIT(PORT_B) | BIT(PORT_C); - else - ports = BIT(PORT_A); - - for_each_port_masked(port, ports) { - int lane; - - for (lane = 0; lane < 4; lane++) { - val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); - /* - * Note that on CHV this flag is called UPAR, but has - * the same function. - */ - val &= ~LATENCY_OPTIM; - if (lane != 1) - val |= LATENCY_OPTIM; - - I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); - } - } - /* Program PLL Rcomp code offset */ val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); val &= ~IREF0RC_OFFSET_MASK; @@ -1899,10 +1898,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, * the corresponding calibrated value from PHY1, and disable * the automatic calibration on PHY0. */ - broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); - - val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, - DPIO_PHY1); + val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1); grc_code = val << GRC_CODE_FAST_SHIFT | val << GRC_CODE_SLOW_SHIFT | val; @@ -1912,31 +1908,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, val |= GRC_DIS | GRC_RDY_OVRD; I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); } - /* - * During PHY1 init delay waiting for GRC calibration to finish, since - * it can happen in parallel with the subsequent PHY0 init. - */ val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); val |= COMMON_RESET_DIS; I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); -} - -void broxton_ddi_phy_init(struct drm_i915_private *dev_priv) -{ - /* Enable PHY1 first since it provides Rcomp for PHY0 */ - broxton_phy_init(dev_priv, DPIO_PHY1); - broxton_phy_init(dev_priv, DPIO_PHY0); - /* - * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the - * PHY1 GRC calibration to finish, so wait for it here. - */ - broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); + if (phy == DPIO_PHY1) + bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1); } -static void broxton_phy_uninit(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { uint32_t val; @@ -1949,12 +1930,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv, I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); } -void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv) -{ - broxton_phy_uninit(dev_priv, DPIO_PHY1); - broxton_phy_uninit(dev_priv, DPIO_PHY0); -} - static bool __printf(6, 7) __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, i915_reg_t reg, u32 mask, u32 expected, @@ -1982,11 +1957,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, return false; } -static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - enum port port; - u32 ports; uint32_t mask; bool ok; @@ -1994,27 +1967,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) - /* We expect the PHY to be always enabled */ - if (!broxton_phy_is_enabled(dev_priv, phy)) + if (!bxt_ddi_phy_is_enabled(dev_priv, phy)) return false; ok = true; - if (phy == DPIO_PHY0) - ports = BIT(PORT_B) | BIT(PORT_C); - else - ports = BIT(PORT_A); - - for_each_port_masked(port, ports) { - int lane; - - for (lane = 0; lane < 4; lane++) - ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane), - LATENCY_OPTIM, - lane != 1 ? LATENCY_OPTIM : 0, - "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane); - } - /* PLL Rcomp code offset */ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, @@ -2058,11 +2015,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, #undef _CHK } -void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) +static uint8_t +bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) { - if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || - !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) - i915_report_error(dev_priv, "DDI PHY state mismatch\n"); + switch (pipe_config->lane_count) { + case 1: + return 0; + case 2: + return BIT(2) | BIT(0); + case 4: + return BIT(3) | BIT(2) | BIT(0); + default: + MISSING_CASE(pipe_config->lane_count); + + return 0; + } +} + +static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + enum port port = dport->port; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + int lane; + + for (lane = 0; lane < 4; lane++) { + u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); + + /* + * Note that on CHV this flag is called UPAR, but has + * the same function. + */ + val &= ~LATENCY_OPTIM; + if (intel_crtc->config->lane_lat_optim_mask & BIT(lane)) + val |= LATENCY_OPTIM; + + I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); + } +} + +static uint8_t +bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + enum port port = dport->port; + int lane; + uint8_t mask; + + mask = 0; + for (lane = 0; lane < 4; lane++) { + u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); + + if (val & LATENCY_OPTIM) + mask |= BIT(lane); + } + + return mask; } void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -2113,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) void intel_ddi_fdi_disable(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); uint32_t val; @@ -2146,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_hdmi *intel_hdmi; @@ -2200,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; case TRANS_DDI_MODE_SELECT_DP_SST: case TRANS_DDI_MODE_SELECT_DP_MST: - pipe_config->has_dp_encoder = true; pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; intel_dp_get_m_n(intel_crtc, pipe_config); @@ -2236,13 +2246,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder, } intel_ddi_clock_get(encoder, pipe_config); + + if (IS_BROXTON(dev_priv)) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_get_lane_lat_optim_mask(encoder); } static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int type = encoder->type; int port = intel_ddi_get_encoder_port(encoder); + int ret; WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); @@ -2250,9 +2266,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, pipe_config->cpu_transcoder = TRANSCODER_EDP; if (type == INTEL_OUTPUT_HDMI) - return intel_hdmi_compute_config(encoder, pipe_config); + ret = intel_hdmi_compute_config(encoder, pipe_config); else - return intel_dp_compute_config(encoder, pipe_config); + ret = intel_dp_compute_config(encoder, pipe_config); + + if (IS_BROXTON(dev_priv) && ret) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, + pipe_config); + + return ret; + } static const struct drm_encoder_funcs intel_ddi_funcs = { @@ -2297,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) void intel_ddi_init(struct drm_device *dev, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -2347,10 +2371,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port) encoder = &intel_encoder->base; drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; + if (IS_BROXTON(dev_priv)) + intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; intel_encoder->post_disable = intel_ddi_post_disable; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c new file mode 100644 index 000000000000..cba137f9ad3e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -0,0 +1,388 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" + +void intel_device_info_dump(struct drm_i915_private *dev_priv) +{ + const struct intel_device_info *info = &dev_priv->info; + +#define PRINT_S(name) "%s" +#define SEP_EMPTY +#define PRINT_FLAG(name) info->name ? #name "," : "" +#define SEP_COMMA , + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" + DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), + info->gen, + dev_priv->drm.pdev->device, + dev_priv->drm.pdev->revision, + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); +#undef PRINT_S +#undef SEP_EMPTY +#undef PRINT_FLAG +#undef SEP_COMMA +} + +static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + u32 fuse, eu_dis; + + fuse = I915_READ(CHV_FUSE_GT); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + +static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + int s_max = 3, ss_max = 4, eu_max = 8; + int s, ss; + u32 fuse2, s_enable, ss_disable, eu_disable; + u8 eu_mask = 0xff; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT; + + info->slice_total = hweight32(s_enable); + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & BIT(s))) + /* skip disabled slice */ + continue; + + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + for (ss = 0; ss < ss_max; ss++) { + int eu_per_ss; + + if (ss_disable & BIT(ss)) + /* skip disabled subslice */ + continue; + + eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & + eu_mask); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_per_ss == 7) + info->subslice_7eu[s] |= BIT(ss); + + info->eu_total += eu_per_ss; + } + } + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. BXT is expected to be perfectly uniform in EU + * distribution. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, + info->subslice_total) : 0; + /* + * SKL supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. BXT supports subslice + * power gating on devices with more than one subslice, and + * supports EU power gating on devices with more than one EU + * pair per subslice. + */ + info->has_slice_pg = + (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + info->slice_total > 1; + info->has_subslice_pg = + IS_BROXTON(dev_priv) && info->subslice_total > 1; + info->has_eu_pg = info->eu_per_subslice > 2; + + if (IS_BROXTON(dev_priv)) { +#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss)) + /* + * There is a HW issue in 2x6 fused down parts that requires + * Pooled EU to be enabled as a WA. The pool configuration + * changes depending upon which subslice is fused down. This + * doesn't affect if the device has all 3 subslices enabled. + */ + /* WaEnablePooledEuFor2x6:bxt */ + info->has_pooled_eu = ((info->subslice_per_slice == 3) || + (info->subslice_per_slice == 2 && + INTEL_REVID(dev_priv) < BXT_REVID_C0)); + + info->min_eu_in_pool = 0; + if (info->has_pooled_eu) { + if (IS_SS_DISABLED(ss_disable, 0) || + IS_SS_DISABLED(ss_disable, 2)) + info->min_eu_in_pool = 3; + else if (IS_SS_DISABLED(ss_disable, 1)) + info->min_eu_in_pool = 6; + else + info->min_eu_in_pool = 9; + } +#undef IS_SS_DISABLED + } +} + +static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + const int s_max = 3, ss_max = 3, eu_max = 8; + int s, ss; + u32 fuse2, eu_disable[s_max], s_enable, ss_disable; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; + + eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; + eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << + (32 - GEN8_EU_DIS0_S1_SHIFT)); + eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << + (32 - GEN8_EU_DIS1_S2_SHIFT)); + + info->slice_total = hweight32(s_enable); + + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + for (ss = 0; ss < ss_max; ss++) { + u32 n_disabled; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); + + /* + * Record which subslices have 7 EUs. + */ + if (eu_max - n_disabled == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_max - n_disabled; + } + } + + /* + * BDW is expected to always have a uniform distribution of EU across + * subslices with the exception that any one EU in any one subslice may + * be fused off for die recovery. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; + + /* + * BDW supports slice power gating on devices with more than + * one slice. + */ + info->has_slice_pg = (info->slice_total > 1); + info->has_subslice_pg = 0; + info->has_eu_pg = 0; +} + +/* + * Determine various intel_device_info fields at runtime. + * + * Use it when either: + * - it's judged too laborious to fill n static structures with the limit + * when a simple if statement does the job, + * - run-time checks (eg read fuse/strap registers) are needed. + * + * This function needs to be called: + * - after the MMIO has been setup as we are reading registers, + * - after the PCH has been detected, + * - before the first usage of the fields it can tweak. + */ +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + enum pipe pipe; + + /* + * Skylake and Broxton currently don't expose the topmost plane as its + * use is exclusive with the legacy cursor and we only want to expose + * one of those, not both. Until we can safely expose the topmost plane + * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, + * we don't expose the topmost plane at all to prevent ABI breakage + * down the line. + */ + if (IS_BROXTON(dev_priv)) { + info->num_sprites[PIPE_A] = 2; + info->num_sprites[PIPE_B] = 2; + info->num_sprites[PIPE_C] = 1; + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 2; + else + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 1; + + if (i915.disable_display) { + DRM_INFO("Display disabled (module parameter)\n"); + info->num_pipes = 0; + } else if (info->num_pipes > 0 && + (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && + HAS_PCH_SPLIT(dev_priv)) { + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 sfuse_strap = I915_READ(SFUSE_STRAP); + + /* + * SFUSE_STRAP is supposed to have a bit signalling the display + * is fused off. Unfortunately it seems that, at least in + * certain cases, fused off display means that PCH display + * reads don't land anywhere. In that case, we read 0s. + * + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK + * should be set when taking over after the firmware. + */ + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || + (dev_priv->pch_type == PCH_CPT && + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { + DRM_INFO("Display fused off, disabling\n"); + info->num_pipes = 0; + } else if (fuse_strap & IVB_PIPE_C_DISABLE) { + DRM_INFO("PipeC fused off\n"); + info->num_pipes -= 1; + } + } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { + u32 dfsm = I915_READ(SKL_DFSM); + u8 disabled_mask = 0; + bool invalid; + int num_bits; + + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) + disabled_mask |= BIT(PIPE_A); + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) + disabled_mask |= BIT(PIPE_B); + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) + disabled_mask |= BIT(PIPE_C); + + num_bits = hweight8(disabled_mask); + + switch (disabled_mask) { + case BIT(PIPE_A): + case BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_C): + invalid = true; + break; + default: + invalid = false; + } + + if (num_bits > info->num_pipes || invalid) + DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", + disabled_mask); + else + info->num_pipes -= num_bits; + } + + /* Initialize slice/subslice/EU info */ + if (IS_CHERRYVIEW(dev_priv)) + cherryview_sseu_info_init(dev_priv); + else if (IS_BROADWELL(dev_priv)) + broadwell_sseu_info_init(dev_priv); + else if (INTEL_INFO(dev_priv)->gen >= 9) + gen9_sseu_info_init(dev_priv); + + info->has_snoop = !info->has_llc; + + /* Snooping is broken on BXT A stepping. */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) + info->has_snoop = false; + + DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); + DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); + DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); + DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); + DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); + DRM_DEBUG_DRIVER("has slice power gating: %s\n", + info->has_slice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has subslice power gating: %s\n", + info->has_subslice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has EU power gating: %s\n", + info->has_eu_pg ? "y" : "n"); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3074c56a643d..c457eed76f1f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -36,6 +36,7 @@ #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_gem_dmabuf.h" #include "intel_dsi.h" #include "i915_trace.h" #include <drm/drm_atomic.h> @@ -46,7 +47,11 @@ #include <drm/drm_rect.h> #include <linux/dma_remapping.h> #include <linux/reservation.h> -#include <linux/dma-buf.h> + +static bool is_mmio_work(struct intel_flip_work *work) +{ + return work->mmio_work.func; +} /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { @@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); +static int ilk_max_pixel_rate(struct drm_atomic_state *state); +static int bxt_calc_cdclk(int max_pixclk); -typedef struct { - int min, max; -} intel_range_t; - -typedef struct { - int dot_limit; - int p2_slow, p2_fast; -} intel_p2_t; - -typedef struct intel_limit intel_limit_t; struct intel_limit { - intel_range_t dot, vco, n, m, m1, m2, p, p1; - intel_p2_t p2; + struct { + int min, max; + } dot, vco, n, m, m1, m2, p, p1; + + struct { + int dot_limit; + int p2_slow, p2_fast; + } p2; }; /* returns HPLL frequency in kHz */ @@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv) static int intel_vlv_hrawclk(struct drm_i915_private *dev_priv) { + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } @@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv) } } -static void intel_update_rawclk(struct drm_i915_private *dev_priv) +void intel_update_rawclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); @@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv, return 270000; } -static const intel_limit_t intel_limits_i8xx_dac = { +static const struct intel_limit intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = { .p2_slow = 4, .p2_fast = 2 }, }; -static const intel_limit_t intel_limits_i8xx_dvo = { +static const struct intel_limit intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 4 }, }; -static const intel_limit_t intel_limits_i8xx_lvds = { +static const struct intel_limit intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = { .p2_slow = 14, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_i9xx_sdvo = { +static const struct intel_limit intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_i9xx_lvds = { +static const struct intel_limit intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = { }; -static const intel_limit_t intel_limits_g4x_sdvo = { +static const struct intel_limit intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = { }, }; -static const intel_limit_t intel_limits_g4x_hdmi = { +static const struct intel_limit intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_g4x_single_channel_lvds = { +static const struct intel_limit intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { }, }; -static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { +static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { }, }; -static const intel_limit_t intel_limits_pineview_sdvo = { +static const struct intel_limit intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_pineview_lvds = { +static const struct intel_limit intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const intel_limit_t intel_limits_ironlake_dac = { +static const struct intel_limit intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_ironlake_single_lvds = { +static const struct intel_limit intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds = { +static const struct intel_limit intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p2_slow = 7, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_vlv = { +static const struct intel_limit intel_limits_vlv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = { .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; -static const intel_limit_t intel_limits_chv = { +static const struct intel_limit intel_limits_chv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = { .p2 = { .p2_slow = 1, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_bxt = { +static const struct intel_limit intel_limits_bxt = { /* FIXME: find real dot limits */ .dot = { .min = 0, .max = INT_MAX }, .vco = { .min = 4800000, .max = 6700000 }, @@ -526,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state) return drm_atomic_crtc_needs_modeset(state); } -/** - * Returns whether any output on the specified pipe is of the specified type - */ -bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) -{ - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *encoder; - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->type == type) - return true; - - return false; -} - -/** - * Returns whether any output on the specified pipe will have the specified - * type after a staged modeset is complete, i.e., the same as - * intel_pipe_has_type() but looking at encoder->new_crtc instead of - * encoder->crtc. - */ -static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, - int type) -{ - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - struct intel_encoder *encoder; - int i, num_connectors = 0; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - num_connectors++; - - encoder = to_intel_encoder(connector_state->best_encoder); - if (encoder->type == type) - return true; - } - - WARN_ON(num_connectors == 0); - - return false; -} - /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast @@ -581,7 +539,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ -static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -598,7 +556,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } -static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) +static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; @@ -610,7 +568,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot; } -static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -622,7 +580,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot / 5; } -int chv_calc_dpll_params(int refclk, intel_clock_t *clock) +int chv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -642,8 +600,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock) */ static bool intel_PLL_is_valid(struct drm_device *dev, - const intel_limit_t *limit, - const intel_clock_t *clock) + const struct intel_limit *limit, + const struct dpll *clock) { if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); @@ -678,13 +636,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static int -i9xx_select_p2_div(const intel_limit_t *limit, +i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { struct drm_device *dev = crtc_state->base.crtc->dev; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -713,13 +671,13 @@ i9xx_select_p2_div(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -i9xx_find_best_dpll(const intel_limit_t *limit, +i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -770,13 +728,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -pnv_find_best_dpll(const intel_limit_t *limit, +pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -825,13 +783,13 @@ pnv_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -g4x_find_best_dpll(const intel_limit_t *limit, +g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int max_n; bool found = false; /* approximately equals target * 0.00585 */ @@ -877,8 +835,8 @@ g4x_find_best_dpll(const intel_limit_t *limit, * best configuration and error found so far. Return the calculated error. */ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, - const intel_clock_t *calculated_clock, - const intel_clock_t *best_clock, + const struct dpll *calculated_clock, + const struct dpll *best_clock, unsigned int best_error_ppm, unsigned int *error_ppm) { @@ -918,14 +876,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -vlv_find_best_dpll(const intel_limit_t *limit, +vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - intel_clock_t clock; + struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ int max_n = min(limit->n.max, refclk / 19200); @@ -977,15 +935,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -chv_find_best_dpll(const intel_limit_t *limit, +chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; - intel_clock_t clock; + struct dpll clock; uint64_t m2; int found = false; @@ -1035,10 +993,10 @@ chv_find_best_dpll(const intel_limit_t *limit, } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock) + struct dpll *best_clock) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_bxt; + const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, target_clock, refclk, NULL, best_clock); @@ -1076,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; u32 line_mask; @@ -1112,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; @@ -1120,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, - 100)) + if (intel_wait_for_register(dev_priv, + reg, I965_PIPECONF_ACTIVE, 0, + 100)) WARN(1, "pipe_off wait timed out\n"); } else { /* Wait for the display line to settle */ @@ -1203,7 +1162,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv)->gen == 5) + if (IS_GEN5(dev_priv)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -1230,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = PIPE_A; @@ -1272,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, static void assert_cursor(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; bool cur_state; if (IS_845G(dev) || IS_I865G(dev)) @@ -1334,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv, static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i; /* Primary planes are fixed to pipes on gen4+ */ @@ -1360,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int sprite; if (INTEL_INFO(dev)->gen >= 9) { @@ -1540,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + if (intel_wait_for_register(dev_priv, + DPLL(pipe), + DPLL_LOCK_VLV, + DPLL_LOCK_VLV, + 1)) DRM_ERROR("DPLL %d failed to lock\n", pipe); } @@ -1589,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc, I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); /* Check PLL is locked */ - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + if (intel_wait_for_register(dev_priv, + DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, + 1)) DRM_ERROR("PLL %d failed to lock\n", pipe); } @@ -1635,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev) struct intel_crtc *crtc; int count = 0; - for_each_intel_crtc(dev, crtc) + for_each_intel_crtc(dev, crtc) { count += crtc->base.state->active && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); + } return count; } @@ -1645,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev) static void i9xx_enable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc->config->dpll_hw_state.dpll; @@ -1717,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) static void i9xx_disable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; /* Disable DVO 2x clock on both PLLs if necessary */ if (IS_I830(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && !intel_num_dvo_pipes(dev)) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); @@ -1809,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, BUG(); } - if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) + if (intel_wait_for_register(dev_priv, + dpll_reg, port_mask, expected_mask, + 1000)) WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); } @@ -1817,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); i915_reg_t reg; @@ -1850,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, * here for both 8bpc and 12bpc. */ val &= ~PIPECONF_BPC_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) val |= PIPECONF_8BPC; else val |= pipeconf_val & PIPECONF_BPC_MASK; @@ -1859,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) if (HAS_PCH_IBX(dev_priv) && - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; @@ -1867,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(reg, val | TRANS_ENABLE); - if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) + if (intel_wait_for_register(dev_priv, + reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, + 100)) DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); } @@ -1895,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(LPT_TRANSCONF, val); - if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) + if (intel_wait_for_register(dev_priv, + LPT_TRANSCONF, + TRANS_STATE_ENABLE, + TRANS_STATE_ENABLE, + 100)) DRM_ERROR("Failed to enable PCH transcoder\n"); } static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; i915_reg_t reg; uint32_t val; @@ -1918,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) + if (intel_wait_for_register(dev_priv, + reg, TRANS_STATE_ENABLE, 0, + 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); if (HAS_PCH_CPT(dev)) { @@ -1938,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) val &= ~TRANS_ENABLE; I915_WRITE(LPT_TRANSCONF, val); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) + if (intel_wait_for_register(dev_priv, + LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, + 50)) DRM_ERROR("Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ @@ -1957,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) static void intel_enable_pipe(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pch_transcoder; @@ -1981,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * need the check. */ if (HAS_GMCH_DISPLAY(dev_priv)) - if (crtc->config->has_dsi_encoder) + if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -2030,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) */ static void intel_disable_pipe(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; i915_reg_t reg; @@ -2068,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc) intel_wait_for_pipe_off(crtc); } -static bool need_vtd_wa(struct drm_device *dev) -{ -#ifdef CONFIG_INTEL_IOMMU - if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) - return true; -#endif - return false; -} - static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) { return IS_GEN2(dev_priv) ? 2048 : 4096; @@ -2241,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; u32 alignment; @@ -2258,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * we should always have valid PTE following the scanout preventing * the VT-d warning. */ - if (need_vtd_wa(dev) && alignment < 256 * 1024) + if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) alignment = 256 * 1024; /* @@ -2309,7 +2278,7 @@ err_pm: return ret; } -static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; @@ -2543,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct intel_crtc *i; struct drm_i915_gem_object *obj; @@ -2639,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, const struct intel_plane_state *plane_state) { struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -2752,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary, struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane; @@ -2769,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, const struct intel_plane_state *plane_state) { struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -2897,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); @@ -3007,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -3091,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = to_intel_crtc(crtc)->pipe; I915_WRITE(PLANE_CTL(pipe, 0), 0); @@ -3110,17 +3079,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENODEV; } -static void intel_complete_page_flips(struct drm_device *dev) +static void intel_complete_page_flips(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum plane plane = intel_crtc->plane; + struct intel_crtc *crtc; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip_plane(dev, plane); - } + for_each_intel_crtc(&dev_priv->drm, crtc) + intel_finish_page_flip_cs(dev_priv, crtc->pipe); } static void intel_update_primary_planes(struct drm_device *dev) @@ -3143,41 +3107,39 @@ static void intel_update_primary_planes(struct drm_device *dev) } } -void intel_prepare_reset(struct drm_device *dev) +void intel_prepare_reset(struct drm_i915_private *dev_priv) { /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev); + drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev); + intel_display_suspend(&dev_priv->drm); } -void intel_finish_reset(struct drm_device *dev) +void intel_finish_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space * will get its events and not get stuck. */ - intel_complete_page_flips(dev); + intel_complete_page_flips(dev_priv); /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* * Flips in the rings have been nuked by the reset, * so update the base address of all primary @@ -3187,7 +3149,7 @@ void intel_finish_reset(struct drm_device *dev) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev); + intel_update_primary_planes(&dev_priv->drm); return; } @@ -3198,18 +3160,18 @@ void intel_finish_reset(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(&dev_priv->drm); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); + intel_display_resume(&dev_priv->drm); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(&dev_priv->drm); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -3224,7 +3186,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) return false; spin_lock_irq(&dev->event_lock); - pending = to_intel_crtc(crtc)->unpin_work != NULL; + pending = to_intel_crtc(crtc)->flip_work != NULL; spin_unlock_irq(&dev->event_lock); return pending; @@ -3234,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); @@ -3275,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, static void intel_fdi_normal_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3318,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) static void ironlake_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3419,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = { static void gen6_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3552,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3671,7 +3633,7 @@ train_done: static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -3708,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -3738,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3803,7 +3765,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) if (atomic_read(&crtc->unpin_work_count) == 0) continue; - if (crtc->unpin_work) + if (crtc->flip_work) intel_wait_for_vblank(dev, crtc->pipe); return true; @@ -3815,11 +3777,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) static void page_flip_completed(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_unpin_work *work = intel_crtc->unpin_work; + struct intel_flip_work *work = intel_crtc->flip_work; - /* ensure that the unpin work is consistent wrt ->pending. */ - smp_rmb(); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; if (work->event) drm_crtc_send_vblank_event(&intel_crtc->base, work->event); @@ -3827,7 +3787,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + queue_work(dev_priv->wq, &work->unpin_work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -3836,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); long ret; WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); @@ -3851,9 +3811,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + work = intel_crtc->flip_work; + if (work && !is_mmio_work(work)) { WARN_ONCE(1, "Removing stuck page flip\n"); page_flip_completed(intel_crtc); } @@ -3997,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, enum pipe pch_transcoder) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), @@ -4019,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t temp; temp = I915_READ(SOUTH_CHICKEN1); @@ -4069,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) struct intel_encoder *encoder; for_each_encoder_on_crtc(dev, crtc, encoder) { - if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_EDP) return enc_to_dig_port(&encoder->base)->port; } @@ -4088,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) static void ironlake_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 temp; @@ -4138,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { + if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) { const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; @@ -4178,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) static void lpt_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -4194,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) static void cpt_verify_modeset(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t dslreg = PIPEDSL(pipe); u32 temp; @@ -4281,8 +4243,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); + DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", + intel_crtc->base.base.id, intel_crtc->base.name, + intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), @@ -4312,9 +4275,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, bool force_detach = !fb || !plane_state->visible; - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_crtc->pipe, - drm_plane_index(&intel_plane->base)); + DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", + intel_plane->base.base.id, intel_plane->base.name, + intel_crtc->pipe, drm_plane_index(&intel_plane->base)); ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), @@ -4330,8 +4293,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* check colorkey */ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { - DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", - intel_plane->base.base.id); + DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); return -EINVAL; } @@ -4350,8 +4314,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_VYUY: break; default: - DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, fb->base.id, fb->pixel_format); + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->pixel_format); return -EINVAL; } @@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc) static void skylake_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; struct intel_crtc_scaler_state *scaler_state = &crtc->config->scaler_state; @@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) static void ironlake_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; if (crtc->config->pch_pfit.enabled) { @@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) void hsw_enable_ips(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->ips_enabled) return; @@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc) * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) + if (intel_wait_for_register(dev_priv, + IPS_CTL, IPS_ENABLE, IPS_ENABLE, + 50)) DRM_ERROR("Timed out waiting for IPS enable\n"); } } @@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc) void hsw_disable_ips(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->ips_enabled) return; @@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc) WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->rps.hw_lock); /* wait for pcode to finish disabling IPS, which may take up to 42ms */ - if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) + if (intel_wait_for_register(dev_priv, + IPS_CTL, IPS_ENABLE, 0, + 42)) DRM_ERROR("Timed out waiting for IPS disable\n"); } else { I915_WRITE(IPS_CTL, 0); @@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) { if (intel_crtc->overlay) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.interruptible = false; @@ -4508,7 +4477,7 @@ static void intel_post_enable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4540,7 +4509,7 @@ static void intel_pre_disable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4567,7 +4536,7 @@ static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); struct drm_atomic_state *old_state = old_crtc_state->base.state; @@ -4641,14 +4610,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_plane_state *old_primary_state = to_intel_plane_state(old_pri_state); - intel_fbc_pre_update(crtc); + intel_fbc_pre_update(crtc, pipe_config, primary_state); if (old_primary_state->visible && (modeset || !primary_state->visible)) intel_pre_disable_primary(&crtc->base); } - if (pipe_config->disable_cxsr) { + if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { crtc->wm.cxsr_allowed = false; /* @@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) intel_prepare_shared_dpll(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) static void haswell_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe, hsw_workaround_pipe; @@ -4841,13 +4810,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + if (intel_crtc->config->shared_dpll) intel_enable_shared_dpll(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_set_pipe_timings(intel_crtc); intel_set_pipe_src_size(intel_crtc); @@ -4863,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) &intel_crtc->config->fdi_m_n, NULL); } - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) haswell_set_pipeconf(crtc); haswell_set_pipemisc(crtc); @@ -4885,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) dev_priv->display.fdi_link_train(crtc); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_pipe_clock(intel_crtc); if (INTEL_INFO(dev)->gen >= 9) @@ -4900,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_color_load_luts(&pipe_config->base); intel_ddi_set_pipe_settings(crtc); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_transcoder_func(crtc); if (dev_priv->display.initial_watermarks != NULL) @@ -4909,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) @@ -4946,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; /* To avoid upsetting the power well on haswell only disable the pfit if @@ -4961,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -5024,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) static void haswell_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -5042,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) assert_vblank_disabled(crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_disable_pipe(intel_crtc); if (intel_crtc->config->dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, false); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); if (INTEL_INFO(dev)->gen >= 9) @@ -5056,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) else ironlake_pfit_disable(intel_crtc, false); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_pipe_clock(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -5076,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) static void i9xx_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = crtc->config; if (!pipe_config->gmch_pfit.control) @@ -5146,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) case INTEL_OUTPUT_UNKNOWN: /* Only DDI platforms should ever use this output type */ WARN_ON_ONCE(!HAS_DDI(dev)); - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); @@ -5180,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) * run the DP detection too. */ WARN_ON_ONCE(!HAS_DDI(dev)); - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); return port_to_aux_power_domain(intel_dig_port->port); @@ -5228,7 +5201,7 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum intel_display_power_domain domain; unsigned long domains, new_domains, old_domains; @@ -5269,21 +5242,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) return max_cdclk_freq*90/100; } +static int skl_calc_cdclk(int max_pixclk, int vco); + static void intel_update_max_cdclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + int max_cdclk, vco; + + vco = dev_priv->skl_preferred_vco_freq; + WARN_ON(vco != 8100000 && vco != 8640000); + /* + * Use the lower (vco 8640) cdclk values as a + * first guess. skl_calc_cdclk() will correct it + * if the preferred vco is 8100 instead. + */ if (limit == SKL_DFSM_CDCLK_LIMIT_675) - dev_priv->max_cdclk_freq = 675000; + max_cdclk = 617143; else if (limit == SKL_DFSM_CDCLK_LIMIT_540) - dev_priv->max_cdclk_freq = 540000; + max_cdclk = 540000; else if (limit == SKL_DFSM_CDCLK_LIMIT_450) - dev_priv->max_cdclk_freq = 450000; + max_cdclk = 432000; else - dev_priv->max_cdclk_freq = 337500; + max_cdclk = 308571; + + dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROXTON(dev)) { dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev)) { @@ -5321,267 +5307,322 @@ static void intel_update_max_cdclk(struct drm_device *dev) static void intel_update_cdclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", - dev_priv->cdclk_freq); + + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", + dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, + dev_priv->cdclk_pll.ref); + else + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", + dev_priv->cdclk_freq); /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. + * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): + * Programmng [sic] note: bit[9:2] should be programmed to the number + * of cdclk that generates 4MHz reference clock freq which is used to + * generate GMBus clock. This will vary with the cdclk freq. */ - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); - } +} - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev); +/* convert from kHz to .1 fixpoint MHz with -1MHz offset */ +static int skl_cdclk_decimal(int cdclk) +{ + return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) +static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - uint32_t divider; - uint32_t ratio; - uint32_t current_freq; - int ret; + int ratio; - /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ - switch (frequency) { + if (cdclk == dev_priv->cdclk_pll.ref) + return 0; + + switch (cdclk) { + default: + MISSING_CASE(cdclk); case 144000: + case 288000: + case 384000: + case 576000: + ratio = 60; + break; + case 624000: + ratio = 65; + break; + } + + return dev_priv->cdclk_pll.ref * ratio; +} + +static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(BXT_DE_PLL_ENABLE, 0); + + /* Timeout 200us */ + if (intel_wait_for_register(dev_priv, + BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, + 1)) + DRM_ERROR("timeout waiting for DE PLL unlock\n"); + + dev_priv->cdclk_pll.vco = 0; +} + +static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +{ + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); + u32 val; + + val = I915_READ(BXT_DE_PLL_CTL); + val &= ~BXT_DE_PLL_RATIO_MASK; + val |= BXT_DE_PLL_RATIO(ratio); + I915_WRITE(BXT_DE_PLL_CTL, val); + + I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + + /* Timeout 200us */ + if (intel_wait_for_register(dev_priv, + BXT_DE_PLL_ENABLE, + BXT_DE_PLL_LOCK, + BXT_DE_PLL_LOCK, + 1)) + DRM_ERROR("timeout waiting for DE PLL lock\n"); + + dev_priv->cdclk_pll.vco = vco; +} + +static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) +{ + u32 val, divider; + int vco, ret; + + vco = bxt_de_pll_vco(dev_priv, cdclk); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); + + /* cdclk = vco / 2 / div{1,1.5,2,4} */ + switch (DIV_ROUND_CLOSEST(vco, cdclk)) { + case 8: divider = BXT_CDCLK_CD2X_DIV_SEL_4; - ratio = BXT_DE_PLL_RATIO(60); break; - case 288000: + case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; - ratio = BXT_DE_PLL_RATIO(60); break; - case 384000: + case 3: divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; - ratio = BXT_DE_PLL_RATIO(60); - break; - case 576000: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(60); break; - case 624000: + case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(65); - break; - case 19200: - /* - * Bypass frequency with DE PLL disabled. Init ratio, divider - * to suppress GCC warning. - */ - ratio = 0; - divider = 0; break; default: - DRM_ERROR("unsupported CDCLK freq %d", frequency); + WARN_ON(cdclk != dev_priv->cdclk_pll.ref); + WARN_ON(vco != 0); - return; + divider = BXT_CDCLK_CD2X_DIV_SEL_1; + break; } - mutex_lock(&dev_priv->rps.hw_lock); /* Inform power controller of upcoming frequency change */ + mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } - current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; - /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ - current_freq = current_freq * 500 + 1000; + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_disable(dev_priv); - /* - * DE PLL has to be disabled when - * - setting to 19.2MHz (bypass, PLL isn't used) - * - before setting to 624MHz (PLL needs toggling) - * - before setting to any frequency from 624MHz (PLL needs toggling) - */ - if (frequency == 19200 || frequency == 624000 || - current_freq == 624000) { - I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), - 1)) - DRM_ERROR("timout waiting for DE PLL unlock\n"); - } - - if (frequency != 19200) { - uint32_t val; - - val = I915_READ(BXT_DE_PLL_CTL); - val &= ~BXT_DE_PLL_RATIO_MASK; - val |= ratio; - I915_WRITE(BXT_DE_PLL_CTL, val); - - I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL lock\n"); - - val = I915_READ(CDCLK_CTL); - val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; - val |= divider; - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (frequency >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + if (dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_enable(dev_priv, vco); - val &= ~CDCLK_FREQ_DECIMAL_MASK; - /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ - val |= (frequency - 1000) / 500; - I915_WRITE(CDCLK_CTL, val); - } + val = divider | skl_cdclk_decimal(cdclk); + /* + * FIXME if only the cd2x divider needs changing, it could be done + * without shutting off the pipe (if only one pipe is active). + */ + val |= BXT_CDCLK_CD2X_PIPE_NONE; + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + I915_WRITE(CDCLK_CTL, val); mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - DIV_ROUND_UP(frequency, 25000)); + DIV_ROUND_UP(cdclk, 25000)); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); } -static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { - if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) - return false; + u32 cdctl, expected; - /* TODO: Check for a valid CDCLK rate */ + intel_update_cdclk(&dev_priv->drm); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) + goto sanitize; - return false; - } + /* DPLL okay; verify the cdclock + * + * Some BIOS versions leave an incorrect decimal frequency value and + * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, + * so sanitize this register. + */ + cdctl = I915_READ(CDCLK_CTL); + /* + * Let's ignore the pipe field, since BIOS could have configured the + * dividers both synching to an active pipe, or asynchronously + * (PIPE_NONE). + */ + cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); + expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (dev_priv->cdclk_freq >= 500000) + expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - return false; - } + if (cdctl == expected) + /* All well; nothing to sanitize */ + return; - return true; -} +sanitize: + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) -{ - return broxton_cdclk_is_enabled(dev_priv); + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } -void broxton_init_cdclk(struct drm_i915_private *dev_priv) +void bxt_init_cdclk(struct drm_i915_private *dev_priv) { - /* check if cd clock is enabled */ - if (broxton_cdclk_is_enabled(dev_priv)) { - DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n"); - return; - } + bxt_sanitize_cdclk(dev_priv); - DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) + return; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. - * - check if setting the max (or any) cdclk freq is really necessary - * here, it belongs to modeset time */ - broxton_set_cdclk(dev_priv, 624000); - - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); +} - udelay(10); +void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) +{ + bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); +} - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout!\n"); +static int skl_calc_cdclk(int max_pixclk, int vco) +{ + if (vco == 8640000) { + if (max_pixclk > 540000) + return 617143; + else if (max_pixclk > 432000) + return 540000; + else if (max_pixclk > 308571) + return 432000; + else + return 308571; + } else { + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; + } } -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +static void +skl_dpll0_update(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + u32 val; - udelay(10); + dev_priv->cdclk_pll.ref = 24000; + dev_priv->cdclk_pll.vco = 0; - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout!\n"); + val = I915_READ(LCPLL1_CTL); + if ((val & LCPLL_PLL_ENABLE) == 0) + return; - /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ - broxton_set_cdclk(dev_priv, 19200); -} + if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) + return; -static const struct skl_cdclk_entry { - unsigned int freq; - unsigned int vco; -} skl_cdclk_frequencies[] = { - { .freq = 308570, .vco = 8640 }, - { .freq = 337500, .vco = 8100 }, - { .freq = 432000, .vco = 8640 }, - { .freq = 450000, .vco = 8100 }, - { .freq = 540000, .vco = 8100 }, - { .freq = 617140, .vco = 8640 }, - { .freq = 675000, .vco = 8100 }, -}; + val = I915_READ(DPLL_CTRL1); -static unsigned int skl_cdclk_decimal(unsigned int freq) -{ - return (freq - 1000) / 500; + if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | + DPLL_CTRL1_SSC(SKL_DPLL0) | + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) + return; + + switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8100000; + break; + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8640000; + break; + default: + MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); + break; + } } -static unsigned int skl_cdclk_get_vco(unsigned int freq) +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) { - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { - const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; + bool changed = dev_priv->skl_preferred_vco_freq != vco; - if (e->freq == freq) - return e->vco; - } + dev_priv->skl_preferred_vco_freq = vco; - return 8100; + if (changed) + intel_update_max_cdclk(&dev_priv->drm); } static void -skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) +skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - unsigned int min_freq; + int min_cdclk = skl_calc_cdclk(0, vco); u32 val; - /* select the minimum CDCLK before enabling DPLL 0 */ - val = I915_READ(CDCLK_CTL); - val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; - val |= CDCLK_FREQ_337_308; - - if (required_vco == 8640) - min_freq = 308570; - else - min_freq = 337500; - - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); + WARN_ON(vco != 8100000 && vco != 8640000); + /* select the minimum CDCLK before enabling DPLL 0 */ + val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); I915_WRITE(CDCLK_CTL, val); POSTING_READ(CDCLK_CTL); @@ -5592,14 +5633,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. * The modeset code is responsible for the selection of the exact link * rate later on, with the constraint of choosing a frequency that - * works with required_vco. + * works with vco. */ val = I915_READ(DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - if (required_vco == 8640) + if (vco == 8640000) val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); else @@ -5611,8 +5652,27 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); - if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) + if (intel_wait_for_register(dev_priv, + LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, + 5)) DRM_ERROR("DPLL0 not locked\n"); + + dev_priv->cdclk_pll.vco = vco; + + /* We'll want to keep using the current vco from now on. */ + skl_set_preferred_cdclk_vco(dev_priv, vco); +} + +static void +skl_dpll0_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + if (intel_wait_for_register(dev_priv, + LCPLL1_CTL, LCPLL_PLL_LOCK, 0, + 1)) + DRM_ERROR("Couldn't disable DPLL0\n"); + + dev_priv->cdclk_pll.vco = 0; } static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) @@ -5642,12 +5702,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) return false; } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) +static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 freq_select, pcu_ack; - DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); + WARN_ON((cdclk == 24000) != (vco == 0)); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { DRM_ERROR("failed to inform PCU about cdclk change\n"); @@ -5655,7 +5717,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) } /* set CDCLK_CTL */ - switch(freq) { + switch (cdclk) { case 450000: case 432000: freq_select = CDCLK_FREQ_450_432; @@ -5665,20 +5727,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) freq_select = CDCLK_FREQ_540; pcu_ack = 2; break; - case 308570: + case 308571: case 337500: default: freq_select = CDCLK_FREQ_337_308; pcu_ack = 0; break; - case 617140: + case 617143: case 675000: freq_select = CDCLK_FREQ_675_617; pcu_ack = 3; break; } - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + skl_dpll0_disable(dev_priv); + + if (dev_priv->cdclk_pll.vco != vco) + skl_dpll0_enable(dev_priv, vco); + + I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ @@ -5689,52 +5758,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) intel_update_cdclk(dev); } +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); + void skl_uninit_cdclk(struct drm_i915_private *dev_priv) { - /* disable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); - - udelay(10); - - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout\n"); - - /* disable DPLL0 */ - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); + skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); } void skl_init_cdclk(struct drm_i915_private *dev_priv) { - unsigned int required_vco; + int cdclk, vco; - /* DPLL0 not enabled (happens on early BIOS versions) */ - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { - /* enable DPLL0 */ - required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); - skl_dpll0_enable(dev_priv, required_vco); - } + skl_sanitize_cdclk(dev_priv); - /* set CDCLK to the frequency the BIOS chose */ - skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); - - /* enable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { + /* + * Use the current vco as our initial + * guess as to what the preferred vco is. + */ + if (dev_priv->skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(dev_priv, + dev_priv->cdclk_pll.vco); + return; + } - udelay(10); + vco = dev_priv->skl_preferred_vco_freq; + if (vco == 0) + vco = 8100000; + cdclk = skl_calc_cdclk(0, vco); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); + skl_set_cdclk(dev_priv, cdclk, vco); } -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - int freq = dev_priv->skl_boot_cdclk; + uint32_t cdctl, expected; /* * check if the pre-os intialized the display @@ -5744,8 +5802,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; + intel_update_cdclk(&dev_priv->drm); /* Is PLL enabled and locked ? */ - if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) goto sanitize; /* DPLL okay; verify the cdclock @@ -5754,25 +5814,26 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) + cdctl = I915_READ(CDCLK_CTL); + expected = (cdctl & CDCLK_FREQ_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + if (cdctl == expected) /* All well; nothing to sanitize */ - return false; + return; + sanitize: - /* - * As of now initialize with max cdclk till - * we get dynamic cdclk support - * */ - dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; - skl_init_cdclk(dev_priv); + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - /* we did have to sanitize */ - return true; + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } /* Adjust CDclk dividers to allow high res or save power if possible */ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; WARN_ON(dev_priv->display.get_display_clock_speed(dev) @@ -5837,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; WARN_ON(dev_priv->display.get_display_clock_speed(dev) @@ -5906,21 +5967,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, return 200000; } -static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, - int max_pixclk) +static int bxt_calc_cdclk(int max_pixclk) { - /* - * FIXME: - * - remove the guardband, it's not needed on BXT - * - set 19.2MHz bypass frequency if there are no active pipes - */ - if (max_pixclk > 576000*9/10) + if (max_pixclk > 576000) return 624000; - else if (max_pixclk > 384000*9/10) + else if (max_pixclk > 384000) return 576000; - else if (max_pixclk > 288000*9/10) + else if (max_pixclk > 288000) return 384000; - else if (max_pixclk > 144000*9/10) + else if (max_pixclk > 144000) return 288000; else return 144000; @@ -5931,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev, struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; unsigned max_pixclk = 0, i; @@ -5958,14 +6013,11 @@ static int intel_mode_max_pixclk(struct drm_device *dev, static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int max_pixclk = intel_mode_max_pixclk(dev, state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5975,22 +6027,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) return 0; } -static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) +static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int max_pixclk = intel_mode_max_pixclk(dev, state); + int max_pixclk = ilk_max_pixel_rate(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = - broxton_calc_cdclk(dev_priv, max_pixclk); + bxt_calc_cdclk(max_pixclk); if (!intel_state->active_crtcs) - intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + intel_state->dev_cdclk = bxt_calc_cdclk(0); return 0; } @@ -6034,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); unsigned req_cdclk = old_intel_state->dev_cdclk; @@ -6073,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (WARN_ON(intel_crtc->active)) return; - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); intel_set_pipe_src_size(intel_crtc); if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); I915_WRITE(CHV_CANVAS(pipe), 0); @@ -6125,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) static void i9xx_set_pll_dividers(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); @@ -6146,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) i9xx_set_pll_dividers(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -6182,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) static void i9xx_pfit_disable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->gmch_pfit.control) return; @@ -6197,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc) static void i9xx_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -6223,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - if (!intel_crtc->config->has_dsi_encoder) { + if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev)) @@ -6252,7 +6299,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; if (to_intel_plane_state(crtc->primary->state)->visible) { - WARN_ON(intel_crtc->unpin_work); + WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); @@ -6262,8 +6309,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", + crtc->base.id, crtc->name); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); crtc->state->active = false; @@ -6541,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); pipe_config->ips_enabled = i915.enable_ips && hsw_crtc_supports_ips(crtc) && @@ -6561,12 +6608,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int clock_limit = dev_priv->max_dotclk_freq; - /* FIXME should check pixel clock limits on all platforms */ if (INTEL_INFO(dev)->gen < 4) { - int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; + clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -6574,16 +6621,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, */ if (intel_crtc_supports_double_wide(crtc) && adjusted_mode->crtc_clock > clock_limit) { - clock_limit *= 2; + clock_limit = dev_priv->max_dotclk_freq; pipe_config->double_wide = true; } + } - if (adjusted_mode->crtc_clock > clock_limit) { - DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, - yesno(pipe_config->double_wide)); - return -EINVAL; - } + if (adjusted_mode->crtc_clock > clock_limit) { + DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); + return -EINVAL; } /* @@ -6592,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * - LVDS dual channel mode * - Double wide pipe */ - if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && + if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) pipe_config->pipe_src_w &= ~1; @@ -6615,81 +6662,103 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int skylake_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t linkrate; + uint32_t cdctl; - if (!(lcpll1 & LCPLL_PLL_ENABLE)) - return 24000; /* 24MHz is the cd freq with NSSC ref */ + skl_dpll0_update(dev_priv); - if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) - return 540000; + if (dev_priv->cdclk_pll.vco == 0) + return dev_priv->cdclk_pll.ref; - linkrate = (I915_READ(DPLL_CTRL1) & - DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; + cdctl = I915_READ(CDCLK_CTL); - if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || - linkrate == DPLL_CTRL1_LINK_RATE_1080) { - /* vco 8640 */ + if (dev_priv->cdclk_pll.vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 432000; case CDCLK_FREQ_337_308: - return 308570; + return 308571; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: - return 617140; + return 617143; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } else { - /* vco 8100 */ switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 450000; case CDCLK_FREQ_337_308: return 337500; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: return 675000; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } - /* error case, do as if DPLL0 isn't enabled */ - return 24000; + return dev_priv->cdclk_pll.ref; +} + +static void bxt_de_pll_update(struct drm_i915_private *dev_priv) +{ + u32 val; + + dev_priv->cdclk_pll.ref = 19200; + dev_priv->cdclk_pll.vco = 0; + + val = I915_READ(BXT_DE_PLL_ENABLE); + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + return; + + if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) + return; + + val = I915_READ(BXT_DE_PLL_CTL); + dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * + dev_priv->cdclk_pll.ref; } static int broxton_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); - int cdclk; + u32 divider; + int div, vco; + + bxt_de_pll_update(dev_priv); - if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) - return 19200; + vco = dev_priv->cdclk_pll.vco; + if (vco == 0) + return dev_priv->cdclk_pll.ref; - cdclk = 19200 * pll_ratio / 2; + divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { + switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: - return cdclk; /* 576MHz or 624MHz */ + div = 2; + break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - return cdclk * 2 / 3; /* 384MHz */ + div = 3; + break; case BXT_CDCLK_CD2X_DIV_SEL_2: - return cdclk / 2; /* 288MHz */ + div = 4; + break; case BXT_CDCLK_CD2X_DIV_SEL_4: - return cdclk / 4; /* 144MHz */ + div = 8; + break; + default: + MISSING_CASE(divider); + return dev_priv->cdclk_pll.ref; } - /* error case, do as if DE PLL isn't enabled */ - return 19200; + return DIV_ROUND_CLOSEST(vco, div); } static int broadwell_get_display_clock_speed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6709,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev) static int haswell_get_display_clock_speed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6843,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev) static unsigned int intel_hpll_vco(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); static const unsigned int blb_vco[8] = { [0] = 3200000, [1] = 4000000, @@ -7063,7 +7132,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) static void i9xx_update_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; u32 fp, fp2 = 0; @@ -7081,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && reduced_clock) { crtc_state->dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; @@ -7123,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); @@ -7137,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; enum transcoder transcoder = crtc->config->cpu_transcoder; @@ -7200,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc, pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!pipe_config->has_dsi_encoder) + if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; @@ -7217,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc, pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!pipe_config->has_dsi_encoder) + if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; pipe_config->dpll_hw_state.dpll_md = @@ -7228,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; u32 mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; @@ -7287,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, /* Set HBR and RBR LPF coefficients */ if (pipe_config->port_clock == 162000 || - intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x00d0000f); - if (pipe_config->has_dp_encoder) { + if (intel_crtc_has_dp_encoder(pipe_config)) { /* Use SSC source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), @@ -7315,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + if (intel_crtc_has_dp_encoder(crtc->config)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); @@ -7328,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; @@ -7487,22 +7555,18 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll; - bool is_sdvo; struct dpll *clock = &crtc_state->dpll; i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); - is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || - intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); - dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -7512,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, << SDVO_MULTIPLIER_SHIFT_HIRES; } - if (is_sdvo) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; - if (crtc_state->has_dp_encoder) + if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -7545,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -7563,10 +7628,10 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, static void i8xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll; struct dpll *clock = &crtc_state->dpll; @@ -7574,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) @@ -7585,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -7601,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -7618,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) crtc_vtotal -= 1; crtc_vblank_end -= 1; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; else vsyncshift = adjusted_mode->crtc_hsync_start - @@ -7663,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; /* pipesrc controls the size that is scaled from, which should @@ -7678,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; uint32_t tmp; @@ -7713,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; tmp = I915_READ(PIPESRC(crtc->pipe)); @@ -7751,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t pipeconf; pipeconf = 0; @@ -7797,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (INTEL_INFO(dev)->gen < 4 || - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; @@ -7816,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + struct drm_i915_private *dev_priv = to_i915(dev); + const struct intel_limit *limit; int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } limit = &intel_limits_i8xx_lvds; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { limit = &intel_limits_i8xx_dvo; } else { limit = &intel_limits_i8xx_dac; @@ -7852,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + struct drm_i915_private *dev_priv = to_i915(dev); + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -7869,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || - intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else { /* The option is for other outputs */ @@ -7895,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + struct drm_i915_private *dev_priv = to_i915(dev); + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -7929,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + struct drm_i915_private *dev_priv = to_i915(dev); + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -7963,7 +8028,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_chv; + const struct intel_limit *limit = &intel_limits_chv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -7984,7 +8049,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_vlv; + const struct intel_limit *limit = &intel_limits_vlv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8005,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) @@ -8032,9 +8097,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; - intel_clock_t clock; + struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8060,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset; int pipe = crtc->pipe, plane = crtc->plane; int fourcc, pixel_format; @@ -8128,10 +8193,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); - intel_clock_t clock; + struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; @@ -8162,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -8273,7 +8338,7 @@ out: static void ironlake_init_pch_refclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; int i; u32 val, final; @@ -8544,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, bool with_fdi) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) @@ -8583,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, /* Sequence to disable CLKOUT_DP */ static void lpt_disable_clkout_dp(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; mutex_lock(&dev_priv->sb_lock); @@ -8704,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev) static void ironlake_set_pipeconf(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; uint32_t val; @@ -8746,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) static void haswell_set_pipeconf(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; u32 val = 0; @@ -8765,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) static void haswell_set_pipemisc(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { @@ -8814,41 +8879,17 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - struct intel_encoder *encoder; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll, fp, fp2; - int factor, i; - bool is_lvds = false, is_sdvo = false; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - break; - default: - break; - } - } + int factor; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; - if (is_lvds) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) @@ -8872,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, dpll = 0; - if (is_lvds) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -8880,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, dpll |= (crtc_state->pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; - if (is_sdvo) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; - if (crtc_state->has_dp_encoder) + + if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -8905,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, break; } - if (is_lvds && intel_panel_use_ssc(dev_priv)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -8921,11 +8965,11 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - intel_clock_t reduced_clock; + struct drm_i915_private *dev_priv = to_i915(dev); + struct dpll reduced_clock; bool has_reduced_clock = false; struct intel_shared_dpll *pll; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 120000; memset(&crtc_state->dpll_hw_state, 0, @@ -8937,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->has_pch_encoder) return 0; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", dev_priv->vbt.lvds_ssc_freq); @@ -8976,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && has_reduced_clock) crtc->lowfreq_avail = true; @@ -8987,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); @@ -9005,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; if (INTEL_INFO(dev)->gen >= 5) { @@ -9063,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; uint32_t ps_ctrl = 0; int id = -1; @@ -9094,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset, stride_mult, tiling; int pipe = crtc->pipe; int fourcc, pixel_format; @@ -9177,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; tmp = I915_READ(PF_CTL(crtc->pipe)); @@ -9202,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset; int pipe = crtc->pipe; int fourcc, pixel_format; @@ -9270,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -9320,6 +9364,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ pll_id = (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); @@ -9361,7 +9409,7 @@ out: static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) @@ -9395,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (IS_HASWELL(dev)) return I915_READ(D_COMP_HSW); @@ -9405,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (IS_HASWELL(dev)) { mutex_lock(&dev_priv->rps.hw_lock); @@ -9451,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, I915_WRITE(LCPLL_CTL, val); POSTING_READ(LCPLL_CTL); - if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) + if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) DRM_ERROR("LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); @@ -9506,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_PLL_DISABLE; I915_WRITE(LCPLL_CTL, val); - if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) + if (intel_wait_for_register(dev_priv, + LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, + 5)) DRM_ERROR("LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { @@ -9520,7 +9570,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) } intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); } /* @@ -9548,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Enabling package C8+\n"); @@ -9565,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Disabling package C8+\n"); @@ -9580,21 +9630,21 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) } } -static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) +static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); unsigned int req_cdclk = old_intel_state->dev_cdclk; - broxton_set_cdclk(to_i915(dev), req_cdclk); + bxt_set_cdclk(to_i915(dev), req_cdclk); } /* compute the max rate for new configuration */ static int ilk_max_pixel_rate(struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(state->dev); struct drm_crtc *crtc; struct drm_crtc_state *cstate; struct intel_crtc_state *crtc_state; @@ -9630,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val, data; int ret; @@ -9707,6 +9757,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) cdclk, dev_priv->cdclk_freq); } +static int broadwell_calc_cdclk(int max_pixclk) +{ + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; +} + static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); @@ -9718,14 +9780,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - if (max_pixclk > 540000) - cdclk = 675000; - else if (max_pixclk > 450000) - cdclk = 540000; - else if (max_pixclk > 337500) - cdclk = 450000; - else - cdclk = 337500; + cdclk = broadwell_calc_cdclk(max_pixclk); if (cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", @@ -9735,7 +9790,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk = intel_state->dev_cdclk = cdclk; if (!intel_state->active_crtcs) - intel_state->dev_cdclk = 337500; + intel_state->dev_cdclk = broadwell_calc_cdclk(0); return 0; } @@ -9750,13 +9805,51 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) broadwell_set_cdclk(dev, req_cdclk); } +static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->dev); + const int max_pixclk = ilk_max_pixel_rate(state); + int vco = intel_state->cdclk_pll_vco; + int cdclk; + + /* + * FIXME should also account for plane ratio + * once 64bpp pixel formats are supported. + */ + cdclk = skl_calc_cdclk(max_pixclk, vco); + + /* + * FIXME move the cdclk caclulation to + * compute_config() so we can fail gracegully. + */ + if (cdclk > dev_priv->max_cdclk_freq) { + DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", + cdclk, dev_priv->max_cdclk_freq); + cdclk = dev_priv->max_cdclk_freq; + } + + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = skl_calc_cdclk(0, vco); + + return 0; +} + +static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) +{ + struct drm_i915_private *dev_priv = to_i915(old_state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); + unsigned int req_cdclk = intel_state->dev_cdclk; + unsigned int req_vco = intel_state->cdclk_pll_vco; + + skl_set_cdclk(dev_priv, req_cdclk, req_vco); +} + static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct intel_encoder *intel_encoder = - intel_ddi_get_crtc_new_encoder(crtc_state); - - if (intel_encoder->type != INTEL_OUTPUT_DSI) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { if (!intel_ddi_pll_select(crtc, crtc_state)) return -EINVAL; } @@ -9866,10 +9959,14 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, unsigned long *power_domain_mask) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 tmp; + /* + * The pipe->transcoder mapping is fixed with the exception of the eDP + * transcoder handled below. + */ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; /* @@ -9913,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, unsigned long *power_domain_mask) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; enum port port; enum transcoder cpu_transcoder; u32 tmp; - pipe_config->has_dsi_encoder = false; - for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { if (port == PORT_A) cpu_transcoder = TRANSCODER_DSI_A; @@ -9952,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, continue; pipe_config->cpu_transcoder = cpu_transcoder; - pipe_config->has_dsi_encoder = true; break; } - return pipe_config->has_dsi_encoder; + return transcoder_is_dsi(pipe_config->cpu_transcoder); } static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll; enum port port; uint32_t tmp; @@ -10006,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; unsigned long power_domain_mask; bool active; @@ -10020,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); - if (IS_BROXTON(dev_priv)) { - bxt_get_dsi_transcoder_state(crtc, pipe_config, - &power_domain_mask); - WARN_ON(active && pipe_config->has_dsi_encoder); - if (pipe_config->has_dsi_encoder) - active = true; + if (IS_BROXTON(dev_priv) && + bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { + WARN_ON(active); + active = true; } if (!active) goto out; - if (!pipe_config->has_dsi_encoder) { + if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -10082,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; @@ -10145,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; uint32_t cntl = 0; @@ -10193,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 base = intel_crtc->cursor_addr; @@ -10337,10 +10429,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - obj = i915_gem_alloc_object(dev, + obj = i915_gem_object_create(dev, intel_framebuffer_size_for_mode(mode, bpp)); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + if (IS_ERR(obj)) + return ERR_CAST(obj); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; @@ -10360,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode) { #ifdef CONFIG_DRM_FBDEV_EMULATION - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; @@ -10630,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, static int i9xx_pll_refclk(struct drm_device *dev, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) @@ -10648,11 +10740,11 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; - intel_clock_t clock; + struct dpll clock; int port_clock; int refclk = i9xx_pll_refclk(dev, pipe_config); @@ -10774,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; struct drm_display_mode *mode; @@ -10826,48 +10918,20 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -void intel_mark_busy(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->mm.busy) - return; - - intel_runtime_pm_get(dev_priv); - i915_update_gfx_val(dev_priv); - if (INTEL_INFO(dev)->gen >= 6) - gen6_rps_busy(dev_priv); - dev_priv->mm.busy = true; -} - -void intel_mark_idle(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->mm.busy) - return; - - dev_priv->mm.busy = false; - - if (INTEL_INFO(dev)->gen >= 6) - gen6_rps_idle(dev->dev_private); - - intel_runtime_pm_put(dev_priv); -} - static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = intel_crtc->unpin_work; - intel_crtc->unpin_work = NULL; + work = intel_crtc->flip_work; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); if (work) { - cancel_work_sync(&work->work); + cancel_work_sync(&work->mmio_work); + cancel_work_sync(&work->unpin_work); kfree(work); } @@ -10878,12 +10942,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) static void intel_unpin_work_fn(struct work_struct *__work) { - struct intel_unpin_work *work = - container_of(__work, struct intel_unpin_work, work); + struct intel_flip_work *work = + container_of(__work, struct intel_flip_work, unpin_work); struct intel_crtc *crtc = to_intel_crtc(work->crtc); struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; + if (is_mmio_work(work)) + flush_work(&work->mmio_work); + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); drm_gem_object_unreference(&work->pending_flip_obj->base); @@ -10902,63 +10969,17 @@ static void intel_unpin_work_fn(struct work_struct *__work) kfree(work); } -static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - unsigned long flags; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - /* - * This is called both by irq handlers and the reset code (to complete - * lost pageflips) so needs the full irqsave spinlocks. - */ - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - /* Ensure we don't miss a work->pending update ... */ - smp_rmb(); - - if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - page_flip_completed(intel_crtc); - - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -void intel_finish_page_flip(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - - do_intel_finish_page_flip(dev, crtc); -} - -void intel_finish_page_flip_plane(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - - do_intel_finish_page_flip(dev, crtc); -} - /* Is 'a' after or equal to 'b'? */ static bool g4x_flip_count_after_eq(u32 a, u32 b) { return !((a - b) & 0x80000000); } -static bool page_flip_finished(struct intel_crtc *crtc) +static bool __pageflip_finished_cs(struct intel_crtc *crtc, + struct intel_flip_work *work) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned reset_counter; reset_counter = i915_reset_counter(&dev_priv->gpu_error); @@ -10997,40 +11018,103 @@ static bool page_flip_finished(struct intel_crtc *crtc) * anyway, we don't really care. */ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == - crtc->unpin_work->gtt_offset && + crtc->flip_work->gtt_offset && g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), - crtc->unpin_work->flip_count); + crtc->flip_work->flip_count); } -void intel_prepare_page_flip(struct drm_device *dev, int plane) +static bool +__pageflip_finished_mmio(struct intel_crtc *crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + /* + * MMIO work completes when vblank is different from + * flip_queued_vblank. + * + * Reset counter value doesn't matter, this is handled by + * i915_wait_request finishing early, so no need to handle + * reset here. + */ + return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; +} + + +static bool pageflip_finished(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + if (!atomic_read(&work->pending)) + return false; + + smp_rmb(); + + if (is_mmio_work(work)) + return __pageflip_finished_mmio(crtc, work); + else + return __pageflip_finished_cs(crtc, work); +} + +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = &dev_priv->drm; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; unsigned long flags; + /* Ignore early vblank irqs */ + if (!crtc) + return; /* * This is called both by irq handlers and the reset code (to complete * lost pageflips) so needs the full irqsave spinlocks. - * - * NB: An MMIO update of the plane base pointer will also - * generate a page-flip completion irq, i.e. every modeset - * is also accompanied by a spurious intel_prepare_page_flip(). */ spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) - atomic_inc_not_zero(&intel_crtc->unpin_work->pending); + work = intel_crtc->flip_work; + + if (work != NULL && + !is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + spin_unlock_irqrestore(&dev->event_lock, flags); } -static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) { + struct drm_device *dev = &dev_priv->drm; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; + unsigned long flags; + + /* Ignore early vblank irqs */ + if (!crtc) + return; + + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + */ + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->flip_work; + + if (work != NULL && + is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); + /* Ensure that the work item is consistent when activating it ... */ - smp_wmb(); - atomic_set(&work->pending, INTEL_FLIP_PENDING); - /* and that it is marked active as soon as the irq could fire. */ - smp_wmb(); + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); } static int intel_gen2_queue_flip(struct drm_device *dev, @@ -11061,10 +11145,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, 0); /* aux display base address, unused */ - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11093,10 +11176,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, MI_NOOP); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11108,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, uint32_t flags) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; @@ -11124,7 +11206,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11135,7 +11217,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11147,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, uint32_t flags) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; @@ -11159,7 +11240,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11171,7 +11252,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11263,16 +11343,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, (MI_NOOP)); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } static bool use_mmio_flip(struct intel_engine_cs *engine, struct drm_i915_gem_object *obj) { + struct reservation_object *resv; + /* * This is not being used for older platforms, because * non-availability of flip done interrupt forces us to use @@ -11284,7 +11365,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_GEN(engine->i915) < 5) return false; if (i915.use_mmio_flip < 0) @@ -11293,20 +11374,20 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, return true; else if (i915.enable_execlists) return true; - else if (obj->base.dma_buf && - !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, - false)) + + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv && !reservation_object_test_signaled_rcu(resv, false)) return true; - else - return engine != i915_gem_request_get_engine(obj->last_write_req); + + return engine != i915_gem_request_get_engine(obj->last_write_req); } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, unsigned int rotation, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; u32 ctl, stride, tile_height; @@ -11355,10 +11436,10 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_framebuffer *intel_fb = to_intel_framebuffer(intel_crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; @@ -11378,78 +11459,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, POSTING_READ(DSPSURF(intel_crtc->plane)); } -/* - * XXX: This is the temporary way to update the plane registers until we get - * around to using the usual plane update functions for MMIO flips - */ -static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) -{ - struct intel_crtc *crtc = mmio_flip->crtc; - struct intel_unpin_work *work; - - spin_lock_irq(&crtc->base.dev->event_lock); - work = crtc->unpin_work; - spin_unlock_irq(&crtc->base.dev->event_lock); - if (work == NULL) - return; - - intel_mark_page_flip_active(work); - - intel_pipe_update_start(crtc); - - if (INTEL_INFO(mmio_flip->i915)->gen >= 9) - skl_do_mmio_flip(crtc, mmio_flip->rotation, work); - else - /* use_mmio_flip() retricts MMIO flips to ilk+ */ - ilk_do_mmio_flip(crtc, work); - - intel_pipe_update_end(crtc); -} - -static void intel_mmio_flip_work_func(struct work_struct *work) +static void intel_mmio_flip_work_func(struct work_struct *w) { - struct intel_mmio_flip *mmio_flip = - container_of(work, struct intel_mmio_flip, work); + struct intel_flip_work *work = + container_of(w, struct intel_flip_work, mmio_work); + struct intel_crtc *crtc = to_intel_crtc(work->crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_framebuffer *intel_fb = - to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; + struct reservation_object *resv; - if (mmio_flip->req) { - WARN_ON(__i915_wait_request(mmio_flip->req, + if (work->flip_queued_req) + WARN_ON(__i915_wait_request(work->flip_queued_req, false, NULL, - &mmio_flip->i915->rps.mmioflips)); - i915_gem_request_unreference__unlocked(mmio_flip->req); - } + &dev_priv->rps.mmioflips)); /* For framebuffer backed by dmabuf, wait for fence */ - if (obj->base.dma_buf) - WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, - false, false, + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) + WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, MAX_SCHEDULE_TIMEOUT) < 0); - intel_do_mmio_flip(mmio_flip); - kfree(mmio_flip); -} - -static int intel_queue_mmio_flip(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_i915_gem_object *obj) -{ - struct intel_mmio_flip *mmio_flip; - - mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL); - if (mmio_flip == NULL) - return -ENOMEM; - - mmio_flip->i915 = to_i915(dev); - mmio_flip->req = i915_gem_request_reference(obj->last_write_req); - mmio_flip->crtc = to_intel_crtc(crtc); - mmio_flip->rotation = crtc->primary->state->rotation; + intel_pipe_update_start(crtc); - INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); - schedule_work(&mmio_flip->work); + if (INTEL_GEN(dev_priv) >= 9) + skl_do_mmio_flip(crtc, work->rotation, work); + else + /* use_mmio_flip() retricts MMIO flips to ilk+ */ + ilk_do_mmio_flip(crtc, work); - return 0; + intel_pipe_update_end(crtc, work); } static int intel_default_queue_flip(struct drm_device *dev, @@ -11462,37 +11502,32 @@ static int intel_default_queue_flip(struct drm_device *dev, return -ENODEV; } -static bool __intel_pageflip_stall_check(struct drm_device *dev, - struct drm_crtc *crtc) +static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work = intel_crtc->unpin_work; - u32 addr; - - if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) - return true; + u32 addr, vblank; - if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) + if (!atomic_read(&work->pending)) return false; - if (!work->enable_stall_check) - return false; + smp_rmb(); + vblank = intel_crtc_get_vblank_counter(intel_crtc); if (work->flip_ready_vblank == 0) { if (work->flip_queued_req && - !i915_gem_request_completed(work->flip_queued_req, true)) + !i915_gem_request_completed(work->flip_queued_req)) return false; - work->flip_ready_vblank = drm_crtc_vblank_count(crtc); + work->flip_ready_vblank = vblank; } - if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) + if (vblank - work->flip_ready_vblank < 3) return false; /* Potential stall - if we see that the flip has happened, * assume a missed interrupt. */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); else addr = I915_READ(DSPADDR(intel_crtc->plane)); @@ -11504,12 +11539,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, return addr == work->gtt_offset; } -void intel_check_page_flip(struct drm_device *dev, int pipe) +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; + struct intel_flip_work *work; WARN_ON(!in_interrupt()); @@ -11517,16 +11552,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) return; spin_lock(&dev->event_lock); - work = intel_crtc->unpin_work; - if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { - WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - work->flip_queued_vblank, drm_vblank_count(dev, pipe)); + work = intel_crtc->flip_work; + + if (work != NULL && !is_mmio_work(work) && + __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { + WARN_ONCE(1, + "Kicking stuck page flip: queued at %d, now %d\n", + work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); page_flip_completed(intel_crtc); work = NULL; } - if (work != NULL && - drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) - intel_queue_rps_boost_for_request(dev, work->flip_queued_req); + + if (work != NULL && !is_mmio_work(work) && + intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) + intel_queue_rps_boost_for_request(work->flip_queued_req); spin_unlock(&dev->event_lock); } @@ -11536,13 +11575,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *old_fb = crtc->primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *primary = crtc->primary; enum pipe pipe = intel_crtc->pipe; - struct intel_unpin_work *work; + struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request = NULL; @@ -11579,19 +11618,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; work->old_fb = old_fb; - INIT_WORK(&work->work, intel_unpin_work_fn); + INIT_WORK(&work->unpin_work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); if (ret) goto free_work; - /* We borrow the event spin lock for protecting unpin_work */ + /* We borrow the event spin lock for protecting flip_work */ spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + if (intel_crtc->flip_work) { /* Before declaring the flip queue wedged, check if * the hardware completed the operation behind our backs. */ - if (__intel_pageflip_stall_check(dev, crtc)) { + if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); page_flip_completed(intel_crtc); } else { @@ -11603,7 +11642,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; } } - intel_crtc->unpin_work = work; + intel_crtc->flip_work = work; spin_unlock_irq(&dev->event_lock); if (atomic_read(&intel_crtc->unpin_work_count) >= 2) @@ -11615,7 +11654,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->primary->fb = fb; update_state_fb(crtc->primary); - intel_fbc_pre_update(intel_crtc); + + intel_fbc_pre_update(intel_crtc, intel_crtc->config, + to_intel_plane_state(primary->state)); work->pending_flip_obj = obj; @@ -11658,6 +11699,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, */ if (!mmio_flip) { ret = i915_gem_object_sync(obj, engine, &request); + if (!ret && !request) { + request = i915_gem_request_alloc(engine, NULL); + ret = PTR_ERR_OR_ZERO(request); + } + if (ret) goto cleanup_pending; } @@ -11669,38 +11715,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj, 0); work->gtt_offset += intel_crtc->dspaddr_offset; + work->rotation = crtc->primary->state->rotation; if (mmio_flip) { - ret = intel_queue_mmio_flip(dev, crtc, obj); - if (ret) - goto cleanup_unpin; + INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); i915_gem_request_assign(&work->flip_queued_req, obj->last_write_req); - } else { - if (!request) { - request = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(request)) { - ret = PTR_ERR(request); - goto cleanup_unpin; - } - } + schedule_work(&work->mmio_work); + } else { + i915_gem_request_assign(&work->flip_queued_req, request); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) goto cleanup_unpin; - i915_gem_request_assign(&work->flip_queued_req, request); - } + intel_mark_page_flip_active(intel_crtc, work); - if (request) i915_add_request_no_flush(request); + } - work->flip_queued_vblank = drm_crtc_vblank_count(crtc); - work->enable_stall_check = true; - - i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, + i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); @@ -11726,7 +11762,7 @@ cleanup: drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); @@ -11828,15 +11864,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); - int idx = intel_crtc->base.base.id, ret; bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; + int ret; - if (crtc_state && INTEL_INFO(dev)->gen >= 9 && - plane->type != DRM_PLANE_TYPE_CURSOR) { + if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { ret = skl_update_scaler_plane( to_intel_crtc_state(crtc_state), to_intel_plane_state(plane_state)); @@ -11854,6 +11889,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * Visibility is calculated as if the crtc was on, but * after scaler setup everything depends on it being off * when the crtc isn't active. + * + * FIXME this is wrong for watermarks. Watermarks should also + * be computed as if the pipe would be active. Perhaps move + * per-plane wm computation to the .check_plane() hook, and + * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) to_intel_plane_state(plane_state)->visible = visible = false; @@ -11867,11 +11907,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, - plane->base.id, fb ? fb->base.id : -1); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + intel_crtc->base.base.id, + intel_crtc->base.name, + plane->base.id, plane->name, + fb ? fb->base.id : -1); - DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", - plane->base.id, was_visible, visible, + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + plane->base.id, plane->name, + was_visible, visible, turn_off, turn_on, mode_changed); if (turn_on) { @@ -11944,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state, return true; } -static bool check_encoder_cloning(struct drm_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_encoder *encoder; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int i; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - if (!check_single_encoder_cloning(state, crtc, encoder)) - return false; - } - - return true; -} - static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); @@ -11976,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, int ret; bool mode_changed = needs_modeset(crtc_state); - if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { - DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); - return -EINVAL; - } - if (mode_changed && !crtc_state->active) pipe_config->update_wm_post = true; @@ -12033,7 +12052,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } } else if (dev_priv->display.compute_intermediate_wm) { if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) - pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; + pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } if (INTEL_INFO(dev)->gen >= 9) { @@ -12168,7 +12187,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", + crtc->base.base.id, crtc->base.name, context, pipe_config, pipe_name(crtc->pipe)); DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); @@ -12181,14 +12201,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, pipe_config->fdi_m_n.tu); DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - pipe_config->has_dp_encoder, + intel_crtc_has_dp_encoder(pipe_config), pipe_config->lane_count, pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, pipe_config->dp_m_n.tu); DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", - pipe_config->has_dp_encoder, + intel_crtc_has_dp_encoder(pipe_config), pipe_config->lane_count, pipe_config->dp_m2_n2.gmch_m, pipe_config->dp_m2_n2.gmch_n, @@ -12269,29 +12289,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, state = to_intel_plane_state(plane->state); fb = state->base.fb; if (!fb) { - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " - "disabled, scaler_id = %d\n", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, - drm_plane_index(plane), state->scaler_id); + DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", + plane->base.id, plane->name, state->scaler_id); continue; } - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - crtc->base.primary == plane ? 0 : intel_plane->plane + 1, - drm_plane_index(plane)); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", - fb->base.id, fb->width, fb->height, fb->pixel_format); - DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", - state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), drm_rect_height(&state->dst)); + DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", + plane->base.id, plane->name); + DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->pixel_format)); + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->src.x1 >> 16, state->src.y1 >> 16, + drm_rect_width(&state->src) >> 16, + drm_rect_height(&state->src) >> 16, + state->dst.x1, state->dst.y1, + drm_rect_width(&state->dst), + drm_rect_height(&state->dst)); } } @@ -12326,7 +12341,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) case INTEL_OUTPUT_UNKNOWN: if (WARN_ON(!HAS_DDI(dev))) break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: port_mask = 1 << enc_to_dig_port(&encoder->base)->port; @@ -12423,6 +12438,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, &pipe_config->pipe_src_w, &pipe_config->pipe_src_h); + for_each_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc != crtc) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + + if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { + DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); + goto fail; + } + + /* + * Determine output_types before calling the .compute_config() + * hooks so that the hooks can use this information safely. + */ + pipe_config->output_types |= 1 << encoder->type; + } + encoder_retry: /* Ensure the port clock defaults are reset when retrying. */ pipe_config->port_clock = 0; @@ -12708,8 +12741,8 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(fdi_lanes); PIPE_CONF_CHECK_M_N(fdi_m_n); - PIPE_CONF_CHECK_I(has_dp_encoder); PIPE_CONF_CHECK_I(lane_count); + PIPE_CONF_CHECK_X(lane_lat_optim_mask); if (INTEL_INFO(dev)->gen < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); @@ -12719,7 +12752,7 @@ intel_pipe_config_compare(struct drm_device *dev, } else PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); - PIPE_CONF_CHECK_I(has_dsi_encoder); + PIPE_CONF_CHECK_X(output_types); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); @@ -12838,7 +12871,7 @@ static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation hw_ddb, *sw_ddb; struct skl_ddb_entry *hw_entry, *sw_entry; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -12944,7 +12977,7 @@ verify_crtc_state(struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config, *sw_config; @@ -12958,7 +12991,7 @@ verify_crtc_state(struct drm_crtc *crtc, pipe_config->base.crtc = crtc; pipe_config->base.state = old_state; - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); @@ -12987,8 +13020,10 @@ verify_crtc_state(struct drm_crtc *crtc, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); - if (active) + if (active) { + pipe_config->output_types |= 1 << encoder->type; encoder->get_config(encoder, pipe_config); + } } if (!new_crtc_state->active) @@ -13067,7 +13102,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); @@ -13106,7 +13141,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc, static void verify_disabled_dpll_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_shared_dpll; i++) @@ -13153,7 +13188,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) crtc->scanline_offset = vtotal - 1; } else if (HAS_DDI(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; } else crtc->scanline_offset = 1; @@ -13288,7 +13323,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) static int intel_modeset_checks(struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(state->dev); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int ret = 0, i; @@ -13306,6 +13341,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_state->active_crtcs |= 1 << i; else intel_state->active_crtcs &= ~(1 << i); + + if (crtc_state->active != crtc->state->active) + intel_state->active_pipe_changes |= drm_crtc_mask(crtc); } /* @@ -13316,9 +13354,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret < 0) + return ret; - if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) + if (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) ret = intel_modeset_all_pipes(state); if (ret < 0) @@ -13342,38 +13388,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * phase. The code here should be run after the per-crtc and per-plane 'check' * handlers to ensure that all derived state has been updated. */ -static void calc_watermark_data(struct drm_atomic_state *state) +static int calc_watermark_data(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - struct drm_plane *plane; - struct drm_plane_state *pstate; - - /* - * Calculate watermark configuration details now that derived - * plane/crtc state is all properly updated. - */ - drm_for_each_crtc(crtc, dev) { - cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: - crtc->state; - - if (cstate->active) - intel_state->wm_config.num_pipes_active++; - } - drm_for_each_legacy_plane(plane, dev) { - pstate = drm_atomic_get_existing_plane_state(state, plane) ?: - plane->state; + struct drm_i915_private *dev_priv = to_i915(dev); - if (!to_intel_plane_state(pstate)->visible) - continue; + /* Is there platform-specific watermark information to calculate? */ + if (dev_priv->display.compute_global_watermarks) + return dev_priv->display.compute_global_watermarks(state); - intel_state->wm_config.sprites_enabled = true; - if (pstate->crtc_w != pstate->src_w >> 16 || - pstate->crtc_h != pstate->src_h >> 16) - intel_state->wm_config.sprites_scaled = true; - } + return 0; } /** @@ -13403,14 +13427,13 @@ static int intel_atomic_check(struct drm_device *dev, if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; - if (!crtc_state->enable) { - if (needs_modeset(crtc_state)) - any_ms = true; + if (!needs_modeset(crtc_state)) continue; - } - if (!needs_modeset(crtc_state)) + if (!crtc_state->enable) { + any_ms = true; continue; + } /* FIXME: For only active_changed we shouldn't need to do any * state recomputation at all. */ @@ -13420,8 +13443,11 @@ static int intel_atomic_check(struct drm_device *dev, return ret; ret = intel_modeset_pipe_config(crtc, pipe_config); - if (ret) + if (ret) { + intel_dump_pipe_config(to_intel_crtc(crtc), + pipe_config, "[failed]"); return ret; + } if (i915.fastboot && intel_pipe_config_compare(dev, @@ -13431,13 +13457,12 @@ static int intel_atomic_check(struct drm_device *dev, to_intel_crtc_state(crtc_state)->update_pipe = true; } - if (needs_modeset(crtc_state)) { + if (needs_modeset(crtc_state)) any_ms = true; - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - return ret; - } + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, needs_modeset(crtc_state) ? @@ -13457,27 +13482,20 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, state); - calc_watermark_data(state); - - return 0; + return calc_watermark_data(state); } static int intel_atomic_prepare_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_plane_state *plane_state; struct drm_crtc_state *crtc_state; struct drm_plane *plane; struct drm_crtc *crtc; int i, ret; - if (nonblock) { - DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n"); - return -EINVAL; - } - for_each_crtc_in_state(state, crtc, crtc_state, i) { if (state->legacy_cursor_update) continue; @@ -13521,6 +13539,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; } +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + + if (!dev->max_vblank_count) + return drm_accurate_vblank_count(&crtc->base); + + return dev->driver->get_vblank_counter(dev, crtc->pipe); +} + static void intel_atomic_wait_for_vblanks(struct drm_device *dev, struct drm_i915_private *dev_priv, unsigned crtc_mask) @@ -13586,45 +13614,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) return false; } -/** - * intel_atomic_commit - commit validated state object - * @dev: DRM device - * @state: the top-level driver state object - * @nonblock: nonblocking commit - * - * This function commits a top-level state object that has been validated - * with drm_atomic_helper_check(). - * - * FIXME: Atomic modeset support for i915 is not yet complete. At the moment - * we can only handle plane-related operations and do not yet support - * nonblocking commit. - * - * RETURNS - * Zero for success or -errno. - */ -static int intel_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) +static void intel_atomic_commit_tail(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; - int ret = 0, i; + struct drm_plane *plane; + struct drm_plane_state *plane_state; bool hw_check = intel_state->modeset; unsigned long put_domains[I915_MAX_PIPES] = {}; unsigned crtc_vblank_mask = 0; + int i, ret; - ret = intel_atomic_prepare_commit(dev, state, nonblock); - if (ret) { - DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); - return ret; + for_each_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *intel_plane_state = + to_intel_plane_state(plane_state); + + if (!intel_plane_state->wait_req) + continue; + + ret = __i915_wait_request(intel_plane_state->wait_req, + true, NULL, NULL); + /* EIO should be eaten, and we can't get interrupted in the + * worker, and blocking commits have waited already. */ + WARN_ON(ret); } - drm_atomic_helper_swap_state(dev, state); - dev_priv->wm.config = intel_state->wm_config; - intel_shared_dpll_commit(state); + drm_atomic_helper_wait_for_dependencies(state); if (intel_state->modeset) { memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, @@ -13679,7 +13698,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_update_legacy_modeset_state(state->dev, state); if (dev_priv->display.modeset_commit_cdclk && - intel_state->dev_cdclk != dev_priv->cdclk_freq) + (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) dev_priv->display.modeset_commit_cdclk(state); intel_modeset_verify_disabled(dev); @@ -13691,30 +13711,44 @@ static int intel_atomic_commit(struct drm_device *dev, bool modeset = needs_modeset(crtc->state); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); - bool update_pipe = !modeset && pipe_config->update_pipe; if (modeset && crtc->state->active) { update_scanline_offset(to_intel_crtc(crtc)); dev_priv->display.crtc_enable(crtc); } + /* Complete events for now disable pipes here. */ + if (modeset && !crtc->state->active && crtc->state->event) { + spin_lock_irq(&dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&dev->event_lock); + + crtc->state->event = NULL; + } + if (!modeset) intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); if (crtc->state->active && drm_atomic_get_existing_plane_state(state, crtc->primary)) - intel_fbc_enable(intel_crtc); + intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); - if (crtc->state->active && - (crtc->state->planes_changed || update_pipe)) + if (crtc->state->active) drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); if (pipe_config->base.active && needs_vblank_wait(pipe_config)) crtc_vblank_mask |= 1 << i; } - /* FIXME: add subpixel order */ - + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here + * already, but still need the state for the delayed optimization. To + * fix this: + * - wrap the optimization/post_plane_update stuff into a per-crtc work. + * - schedule that vblank worker _before_ calling hw_done + * - at the start of commit_tail, cancel it _synchrously + * - switch over to the vblank wait helper in the core after that since + * we don't need out special handling any more. + */ if (!state->legacy_cursor_update) intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); @@ -13741,6 +13775,8 @@ static int intel_atomic_commit(struct drm_device *dev, intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); } + drm_atomic_helper_commit_hw_done(state); + if (intel_state->modeset) intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); @@ -13748,6 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); mutex_unlock(&dev->struct_mutex); + drm_atomic_helper_commit_cleanup_done(state); + drm_atomic_state_free(state); /* As one of the primary mmio accessors, KMS has a high likelihood @@ -13762,6 +13800,86 @@ static int intel_atomic_commit(struct drm_device *dev, * can happen also when the device is completely off. */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); +} + +static void intel_atomic_commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + intel_atomic_commit_tail(state); +} + +static void intel_atomic_track_fbs(struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state; + struct drm_plane *plane; + struct drm_i915_gem_object *obj, *old_obj; + struct intel_plane *intel_plane; + int i; + + mutex_lock(&state->dev->struct_mutex); + for_each_plane_in_state(state, plane, old_plane_state, i) { + obj = intel_fb_obj(plane->state->fb); + old_obj = intel_fb_obj(old_plane_state->fb); + intel_plane = to_intel_plane(plane); + + i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + } + mutex_unlock(&state->dev->struct_mutex); +} + +/** + * intel_atomic_commit - commit validated state object + * @dev: DRM device + * @state: the top-level driver state object + * @nonblock: nonblocking commit + * + * This function commits a top-level state object that has been validated + * with drm_atomic_helper_check(). + * + * FIXME: Atomic modeset support for i915 is not yet complete. At the moment + * nonblocking commits are only safe for pure plane updates. Everything else + * should work though. + * + * RETURNS + * Zero for success or -errno. + */ +static int intel_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(dev); + int ret = 0; + + if (intel_state->modeset && nonblock) { + DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); + return -EINVAL; + } + + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, intel_atomic_commit_work); + + ret = intel_atomic_prepare_commit(dev, state, nonblock); + if (ret) { + DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); + return ret; + } + + drm_atomic_helper_swap_state(state, true); + dev_priv->wm.distrust_bios_wm = false; + dev_priv->wm.skl_results = intel_state->wm_results; + intel_shared_dpll_commit(state); + intel_atomic_track_fbs(state); + + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + intel_atomic_commit_tail(state); return 0; } @@ -13775,8 +13893,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc) state = drm_atomic_state_alloc(dev); if (!state) { - DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", + crtc->base.id, crtc->name); return; } @@ -13806,8 +13924,50 @@ out: #undef for_each_intel_crtc_masked +/* + * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling + * drm_atomic_helper_legacy_gamma_set() directly. + */ +static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + struct drm_crtc_state *state; + int ret; + + ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size); + if (ret) + return ret; + + /* + * Make sure we update the legacy properties so this works when + * atomic is not enabled. + */ + + state = crtc->state; + + drm_object_property_set_value(&crtc->base, + config->degamma_lut_property, + (state->degamma_lut) ? + state->degamma_lut->base.id : 0); + + drm_object_property_set_value(&crtc->base, + config->ctm_property, + (state->ctm) ? + state->ctm->base.id : 0); + + drm_object_property_set_value(&crtc->base, + config->gamma_lut_property, + (state->gamma_lut) ? + state->gamma_lut->base.id : 0); + + return 0; +} + static const struct drm_crtc_funcs intel_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, + .gamma_set = intel_atomic_legacy_gamma_set, .set_config = drm_atomic_helper_set_config, .set_property = drm_atomic_helper_crtc_set_property, .destroy = intel_crtc_destroy, @@ -13836,9 +13996,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, { struct drm_device *dev = plane->dev; struct drm_framebuffer *fb = new_state->fb; - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); + struct reservation_object *resv; int ret = 0; if (!obj && !old_obj) @@ -13868,12 +14028,15 @@ intel_prepare_plane_fb(struct drm_plane *plane, } } + if (!obj) + return 0; + /* For framebuffer backed by dmabuf, wait for fence */ - if (obj && obj->base.dma_buf) { + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) { long lret; - lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, - false, true, + lret = reservation_object_wait_timeout_rcu(resv, false, true, MAX_SCHEDULE_TIMEOUT); if (lret == -ERESTARTSYS) return lret; @@ -13881,9 +14044,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, WARN(lret < 0, "waiting returns %li\n", lret); } - if (!obj) { - ret = 0; - } else if (plane->type == DRM_PLANE_TYPE_CURSOR && + if (plane->type == DRM_PLANE_TYPE_CURSOR && INTEL_INFO(dev)->cursor_needs_physical) { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); @@ -13894,15 +14055,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, } if (ret == 0) { - if (obj) { - struct intel_plane_state *plane_state = - to_intel_plane_state(new_state); + struct intel_plane_state *plane_state = + to_intel_plane_state(new_state); - i915_gem_request_assign(&plane_state->wait_req, - obj->last_write_req); - } - - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + i915_gem_request_assign(&plane_state->wait_req, + obj->last_write_req); } return ret; @@ -13922,7 +14079,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, const struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; - struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane_state *old_intel_state; struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); @@ -13936,11 +14092,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, !INTEL_INFO(dev)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state->rotation); - /* prepare_fb aborted? */ - if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || - (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); - i915_gem_request_assign(&old_intel_state->wait_req, NULL); } @@ -13948,15 +14099,11 @@ int skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) { int max_scale; - struct drm_device *dev; - struct drm_i915_private *dev_priv; int crtc_clock, cdclk; if (!intel_crtc || !crtc_state->base.enable) return DRM_PLANE_HELPER_NO_SCALING; - dev = intel_crtc->base.dev; - dev_priv = dev->dev_private; crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; @@ -13996,6 +14143,7 @@ intel_check_primary_plane(struct drm_plane *plane, return drm_plane_helper_check_update(plane, crtc, fb, &state->src, &state->dst, &state->clip, + state->base.rotation, min_scale, max_scale, can_position, true, &state->visible); @@ -14032,7 +14180,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(intel_crtc, NULL); } /** @@ -14044,9 +14192,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, */ void intel_plane_destroy(struct drm_plane *plane) { - struct intel_plane *intel_plane = to_intel_plane(plane); + if (!plane) + return; + drm_plane_cleanup(plane); - kfree(intel_plane); + kfree(to_intel_plane(plane)); } const struct drm_plane_funcs intel_plane_funcs = { @@ -14118,10 +14268,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, - intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane 1%c", pipe_name(pipe)); + else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "primary %c", pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane %c", plane_name(primary->plane)); if (ret) goto fail; @@ -14171,6 +14335,7 @@ intel_check_cursor_plane(struct drm_plane *plane, ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, &state->dst, &state->clip, + state->base.rotation, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true, &state->visible); @@ -14279,7 +14444,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR, NULL); + DRM_PLANE_TYPE_CURSOR, + "cursor %c", pipe_name(pipe)); if (ret) goto fail; @@ -14327,7 +14493,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr static void intel_crtc_init(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state = NULL; struct drm_plane *primary = NULL; @@ -14364,7 +14530,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs, NULL); + cursor, &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); if (ret) goto fail; @@ -14398,10 +14565,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) return; fail: - if (primary) - drm_plane_cleanup(primary); - if (cursor) - drm_plane_cleanup(cursor); + intel_plane_destroy(primary); + intel_plane_destroy(cursor); kfree(crtc_state); kfree(intel_crtc); } @@ -14427,11 +14592,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct intel_crtc *crtc; drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); - - if (!drmmode_crtc) { - DRM_ERROR("no such CRTC id\n"); + if (!drmmode_crtc) return -ENOENT; - } crtc = to_intel_crtc(drmmode_crtc); pipe_from_crtc_id->pipe = crtc->pipe; @@ -14458,7 +14620,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) static bool has_edp_a(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!IS_MOBILE(dev)) return false; @@ -14474,7 +14636,7 @@ static bool has_edp_a(struct drm_device *dev) static bool intel_crt_present(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen >= 9) return false; @@ -14500,10 +14662,15 @@ static bool intel_crt_present(struct drm_device *dev) static void intel_setup_outputs(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; + /* + * intel_edp_init_connector() depends on this completing first, to + * prevent the registeration of both eDP and LVDS and the incorrect + * sharing of the PPS. + */ intel_lvds_init(dev); if (intel_crt_present(dev)) @@ -15088,12 +15255,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - if (IS_BROADWELL(dev_priv)) { - dev_priv->display.modeset_commit_cdclk = - broadwell_modeset_commit_cdclk; - dev_priv->display.modeset_calc_cdclk = - broadwell_modeset_calc_cdclk; - } + } + + if (IS_BROADWELL(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + broadwell_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + broadwell_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; @@ -15101,9 +15269,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) valleyview_modeset_calc_cdclk; } else if (IS_BROXTON(dev_priv)) { dev_priv->display.modeset_commit_cdclk = - broxton_modeset_commit_cdclk; + bxt_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = - broxton_modeset_calc_cdclk; + bxt_modeset_calc_cdclk; + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + skl_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + skl_modeset_calc_cdclk; } switch (INTEL_INFO(dev_priv)->gen) { @@ -15142,7 +15315,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) */ static void quirk_pipea_force(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_PIPEA_FORCE; DRM_INFO("applying pipe a force quirk\n"); @@ -15150,7 +15323,7 @@ static void quirk_pipea_force(struct drm_device *dev) static void quirk_pipeb_force(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_PIPEB_FORCE; DRM_INFO("applying pipe b force quirk\n"); @@ -15161,7 +15334,7 @@ static void quirk_pipeb_force(struct drm_device *dev) */ static void quirk_ssc_force_disable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; DRM_INFO("applying lvds SSC disable quirk\n"); } @@ -15172,7 +15345,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev) */ static void quirk_invert_brightness(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; DRM_INFO("applying inverted panel brightness quirk\n"); } @@ -15180,7 +15353,7 @@ static void quirk_invert_brightness(struct drm_device *dev) /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; DRM_INFO("applying backlight present quirk\n"); } @@ -15306,7 +15479,7 @@ static void intel_init_quirks(struct drm_device *dev) /* Disable the VGA plane that we never use */ static void i915_disable_vga(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u8 sr1; i915_reg_t vga_reg = i915_vgacntrl_reg(dev); @@ -15324,14 +15497,14 @@ static void i915_disable_vga(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_update_cdclk(dev); dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); } /* @@ -15401,7 +15574,6 @@ retry: } /* Write calculated watermark values back */ - to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; for_each_crtc_in_state(state, crtc, cstate, i) { struct intel_crtc_state *cs = to_intel_crtc_state(cstate); @@ -15499,11 +15671,13 @@ void intel_modeset_init(struct drm_device *dev) } intel_update_czclk(dev_priv); - intel_update_rawclk(dev_priv); intel_update_cdclk(dev); intel_shared_dpll_init(dev); + if (dev_priv->max_cdclk_freq == 0) + intel_update_max_cdclk(dev); + /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); @@ -15571,7 +15745,7 @@ static bool intel_check_plane_mapping(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (INTEL_INFO(dev)->num_pipes == 1) @@ -15611,7 +15785,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder) static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; /* Clear any frame start delays used for debugging left by the BIOS */ @@ -15644,8 +15818,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { bool plane; - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", - crtc->base.base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", + crtc->base.base.id, crtc->base.name); /* Pipe has the wrong plane attached and the plane is active. * Temporarily change the plane mapping and disable everything @@ -15736,7 +15910,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) void i915_redisable_vga_power_on(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t vga_reg = i915_vgacntrl_reg(dev); if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { @@ -15747,7 +15921,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev) void i915_redisable_vga(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well @@ -15787,7 +15961,7 @@ static void readout_plane_state(struct intel_crtc *crtc) static void intel_modeset_readout_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -15813,26 +15987,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active) { dev_priv->active_crtcs |= 1 << crtc->pipe; - if (IS_BROADWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) pixclk = ilk_pipe_pixel_rate(crtc_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } else if (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pixclk = crtc_state->base.adjusted_mode.crtc_clock; else WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); } dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", - crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, crtc->active ? "enabled" : "disabled"); } @@ -15858,6 +16030,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; + crtc->config->output_types |= 1 << encoder->type; encoder->get_config(encoder, crtc->config); } else { encoder->base.crtc = NULL; @@ -15942,7 +16115,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) static void intel_modeset_setup_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -16063,15 +16236,16 @@ retry: void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; - intel_init_gt_powersave(dev); + intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); - intel_setup_overlay(dev); + intel_setup_overlay(dev_priv); /* * Make sure any fbs we allocated at startup are properly @@ -16097,26 +16271,36 @@ void intel_modeset_gem_init(struct drm_device *dev) c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); } } +} + +int intel_connector_register(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; + + ret = intel_backlight_device_register(intel_connector); + if (ret) + goto err; - intel_backlight_register(dev); + return 0; + +err: + return ret; } -void intel_connector_unregister(struct intel_connector *intel_connector) +void intel_connector_unregister(struct drm_connector *connector) { - struct drm_connector *connector = &intel_connector->base; + struct intel_connector *intel_connector = to_intel_connector(connector); + intel_backlight_device_unregister(intel_connector); intel_panel_destroy_backlight(connector); - drm_connector_unregister(connector); } void intel_modeset_cleanup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_connector *connector; - - intel_disable_gt_powersave(dev); + struct drm_i915_private *dev_priv = to_i915(dev); - intel_backlight_unregister(dev); + intel_disable_gt_powersave(dev_priv); /* * Interrupts and polling as the first thing to avoid creating havoc. @@ -16138,27 +16322,15 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); - /* destroy the backlight and sysfs files before encoders/connectors */ - for_each_intel_connector(dev, connector) - connector->unregister(connector); - drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev); + intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev); + intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev); } -/* - * Return which encoder is currently attached for connector. - */ -struct drm_encoder *intel_best_encoder(struct drm_connector *connector) -{ - return &intel_attached_encoder(connector)->base; -} - void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { @@ -16172,7 +16344,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, */ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; @@ -16242,9 +16414,8 @@ struct intel_display_error_state { }; struct intel_display_error_state * -intel_display_capture_error_state(struct drm_device *dev) +intel_display_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -16254,14 +16425,14 @@ intel_display_capture_error_state(struct drm_device *dev) }; int i; - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(dev_priv, i) { @@ -16277,25 +16448,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) error->plane[i].addr = I915_READ(DSPADDR(i)); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev)->num_pipes; + error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ @@ -16329,7 +16500,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, struct drm_device *dev, struct intel_display_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; if (!error) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891107f92d9f..21b04c3eda41 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe); static void intel_dp_unset_edid(struct intel_dp *intel_dp); -static unsigned int intel_dp_unused_lane_mask(int lane_count) -{ - return ~((1 << lane_count) - 1) & 0xf; -} - static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { @@ -267,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; /* @@ -285,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; mutex_unlock(&dev_priv->pps_mutex); @@ -299,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_dp->pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); @@ -373,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); enum pipe pipe; @@ -431,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) return intel_dp->pps_pipe; } +static int +bxt_power_sequencer_idx(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* We should never land here with regular DP ports */ + WARN_ON(!is_edp(intel_dp)); + + /* + * TODO: BXT has 2 PPS instances. The correct port->PPS instance + * mapping needs to be retrieved from VBT, for now just hard-code to + * use instance #0 always. + */ + if (!intel_dp->pps_reset) + return 0; + + intel_dp->pps_reset = false; + + /* + * Only the HW needs to be reprogrammed, the SW state is fixed and + * has been setup during connector init. + */ + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + + return 0; +} + typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -480,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; lockdep_assert_held(&dev_priv->pps_mutex); @@ -512,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); } -void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_encoder *encoder; - if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) + if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && + !IS_BROXTON(dev))) return; /* @@ -537,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) continue; intel_dp = enc_to_intel_dp(&encoder->base); - intel_dp->pps_pipe = INVALID_PIPE; + if (IS_BROXTON(dev)) + intel_dp->pps_reset = true; + else + intel_dp->pps_pipe = INVALID_PIPE; + } +} + +struct pps_registers { + i915_reg_t pp_ctrl; + i915_reg_t pp_stat; + i915_reg_t pp_on; + i915_reg_t pp_off; + i915_reg_t pp_div; +}; + +static void intel_pps_get_registers(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp, + struct pps_registers *regs) +{ + memset(regs, 0, sizeof(*regs)); + + if (IS_BROXTON(dev_priv)) { + int idx = bxt_power_sequencer_idx(intel_dp); + + regs->pp_ctrl = BXT_PP_CONTROL(idx); + regs->pp_stat = BXT_PP_STATUS(idx); + regs->pp_on = BXT_PP_ON_DELAYS(idx); + regs->pp_off = BXT_PP_OFF_DELAYS(idx); + } else if (HAS_PCH_SPLIT(dev_priv)) { + regs->pp_ctrl = PCH_PP_CONTROL; + regs->pp_stat = PCH_PP_STATUS; + regs->pp_on = PCH_PP_ON_DELAYS; + regs->pp_off = PCH_PP_OFF_DELAYS; + regs->pp_div = PCH_PP_DIVISOR; + } else { + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + + regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe); + regs->pp_stat = VLV_PIPE_PP_STATUS(pipe); + regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe); + regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe); + regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe); } } static i915_reg_t _pp_ctrl_reg(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct pps_registers regs; - if (IS_BROXTON(dev)) - return BXT_PP_CONTROL(0); - else if (HAS_PCH_SPLIT(dev)) - return PCH_PP_CONTROL; - else - return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); + intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, + ®s); + + return regs.pp_ctrl; } static i915_reg_t _pp_stat_reg(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct pps_registers regs; - if (IS_BROXTON(dev)) - return BXT_PP_STATUS(0); - else if (HAS_PCH_SPLIT(dev)) - return PCH_PP_STATUS; - else - return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); + intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, + ®s); + + return regs.pp_stat; } /* Reboot notifier handler to shutdown panel power to guarantee T12 timing @@ -575,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!is_edp(intel_dp) || code != SYS_RESTART) return 0; @@ -606,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); lockdep_assert_held(&dev_priv->pps_mutex); @@ -620,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); lockdep_assert_held(&dev_priv->pps_mutex); @@ -635,7 +699,7 @@ static void intel_dp_check_edp(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!is_edp(intel_dp)) return; @@ -653,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t status; bool done; @@ -775,6 +839,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_1600us | DP_AUX_CH_CTL_RECEIVE_ERROR | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); } @@ -785,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t aux_clock_divider; int i, ret, recv_bytes; @@ -1181,48 +1246,21 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp) static void intel_dp_aux_fini(struct intel_dp *intel_dp) { - drm_dp_aux_unregister(&intel_dp->aux); kfree(intel_dp->aux.name); } -static int +static void intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->port; - int ret; intel_aux_reg_init(intel_dp); + drm_dp_aux_init(&intel_dp->aux); + /* Failure to allocate our preferred name is not critical */ intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); - if (!intel_dp->aux.name) - return -ENOMEM; - - intel_dp->aux.dev = connector->base.kdev; intel_dp->aux.transfer = intel_dp_aux_transfer; - - DRM_DEBUG_KMS("registering %s bus for %s\n", - intel_dp->aux.name, - connector->base.kdev->kobj.name); - - ret = drm_dp_aux_register(&intel_dp->aux); - if (ret < 0) { - DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", - intel_dp->aux.name, ret); - kfree(intel_dp->aux.name); - return ret; - } - - return 0; -} - -static void -intel_dp_connector_unregister(struct intel_connector *intel_connector) -{ - struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - - intel_dp_aux_fini(intel_dp); - intel_connector_unregister(intel_connector); } static int @@ -1435,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; @@ -1463,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; - pipe_config->has_dp_encoder = true; pipe_config->has_drrs = false; pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; @@ -1582,6 +1619,27 @@ found: &pipe_config->dp_m2_n2); } + /* + * DPLL0 VCO may need to be adjusted to get the correct + * clock for eDP. This will affect cdclk as well. + */ + if (is_edp(intel_dp) && + (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) { + int vco; + + switch (pipe_config->port_clock / 2) { + case 108000: + case 216000: + vco = 8640000; + break; + default: + vco = 8100000; + break; + } + + to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco; + } + if (!HAS_DDI(dev)) intel_dp_set_clock(encoder, pipe_config); @@ -1598,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, static void intel_dp_prepare(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); @@ -1686,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder) #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) +static void intel_pps_verify_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp); + static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); + intel_pps_verify_state(dev_priv, intel_dp); + pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -1704,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, - 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) + if (intel_wait_for_register(dev_priv, + pp_stat_reg, mask, value, + 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); @@ -1765,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 control; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1788,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; @@ -1861,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; @@ -1930,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) */ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { - struct drm_i915_private *dev_priv = - intel_dp_to_dev(intel_dp)->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); lockdep_assert_held(&dev_priv->pps_mutex); @@ -1952,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) static void edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2013,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 pp; i915_reg_t pp_ctrl_reg; @@ -2065,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2106,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2222,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) * 2. Program DP PLL enable */ if (IS_GEN5(dev_priv)) - intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); + intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe); intel_dp->DP |= DP_PLL_ENABLE; @@ -2287,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 tmp; bool ret; @@ -2340,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); u32 tmp, flags = 0; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); @@ -2378,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder, !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - pipe_config->has_dp_encoder = true; - pipe_config->lane_count = ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; @@ -2460,55 +2521,11 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder) intel_dp_link_down(intel_dp); } -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } -} - static void chv_post_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_dp_link_down(intel_dp); @@ -2527,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; if (HAS_DDI(dev)) { @@ -2607,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, static void intel_dp_enable_port(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc); @@ -2636,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); uint32_t dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; @@ -2709,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder) static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum pipe pipe = intel_dp->pps_pipe; i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); @@ -2735,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2773,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); lockdep_assert_held(&dev_priv->pps_mutex); @@ -2811,266 +2828,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) static void vlv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_dp_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_pll_enable(encoder); } static void chv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - /* Program Tx lane latency optimal setting*/ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - /* Set the upar bit */ - if (intel_crtc->config->lane_count == 1) - data = 0x0; - else - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - if (intel_crtc->config->lane_count > 2) { - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - } - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - unsigned int lane_mask = - intel_dp_unused_lane_mask(intel_crtc->config->lane_count); - u32 val; - intel_dp_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, lane_mask); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - } - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_dp_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } /* @@ -3089,7 +2878,7 @@ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = dp_to_dig_port(intel_dp)->port; if (IS_BROXTON(dev)) @@ -3178,16 +2967,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = - to_intel_crtc(dport->base.base.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3262,37 +3045,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), - uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, + uniqtranscale_reg_value, 0); return 0; } -static bool chv_need_uniq_trans_scale(uint8_t train_set) -{ - return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 && - (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3; -} - static uint32_t chv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); - u32 deemph_reg_value, margin_reg_value, val; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u32 deemph_reg_value, margin_reg_value; + bool uniq_trans_scale = false; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - int i; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3312,7 +3076,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: deemph_reg_value = 128; margin_reg_value = 154; - /* FIXME extra to set for 1200 */ + uniq_trans_scale = true; break; default: return 0; @@ -3364,88 +3128,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); - } - - /* Program swing deemph */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - /* Program swing margin */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - if (chv_need_uniq_trans_scale(train_set)) - val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; - else - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, deemph_reg_value, + margin_reg_value, uniq_trans_scale); return 0; } @@ -3612,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; uint32_t val; @@ -3634,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A) return; - if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), - 1)) + if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port), + DP_TP_STATUS_IDLE_DONE, + DP_TP_STATUS_IDLE_DONE, + 1)) DRM_ERROR("Timed out waiting for DP idle patterns\n"); } @@ -3646,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); enum port port = intel_dig_port->port; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev))) @@ -3698,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -3713,8 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint8_t rev; + struct drm_i915_private *dev_priv = to_i915(dev); if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -3771,6 +3456,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.psr2_support ? "supported" : "not supported"); } + + /* Read the eDP Display control capabilities registers */ + memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd)); + if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == + sizeof(intel_dp->edp_dpcd))) + DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); } DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", @@ -3778,10 +3472,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) yesno(drm_dp_tps3_supported(intel_dp->dpcd))); /* Intermediate frequency support */ - if (is_edp(intel_dp) && - (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && - (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && - (rev >= 0x03)) { /* eDp v1.4 or higher */ + if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; int i; @@ -4559,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) } if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; intel_dp_probe_oui(intel_dp); @@ -4635,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) /* MST devices are disconnected from a monitor POV */ intel_dp_unset_edid(intel_dp); if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; return connector_status_disconnected; } @@ -4674,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector) intel_display_power_put(dev_priv, power_domain); if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; } static int intel_dp_get_modes(struct drm_connector *connector) @@ -4723,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); @@ -4811,6 +4502,32 @@ done: return 0; } +static int +intel_dp_connector_register(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + i915_debugfs_connector_add(connector); + + DRM_DEBUG_KMS("registering %s bus for %s\n", + intel_dp->aux.name, connector->kdev->kobj.name); + + intel_dp->aux.dev = connector->kdev; + return drm_dp_aux_register(&intel_dp->aux); +} + +static void +intel_dp_connector_unregister(struct drm_connector *connector) +{ + drm_dp_aux_unregister(&intel_attached_dp(connector)->aux); + intel_connector_unregister(connector); +} + static void intel_dp_connector_destroy(struct drm_connector *connector) { @@ -4851,6 +4568,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) intel_dp->edp_notifier.notifier_call = NULL; } } + + intel_dp_aux_fini(intel_dp); + drm_encoder_cleanup(encoder); kfree(intel_dig_port); } @@ -4876,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; lockdep_assert_held(&dev_priv->pps_mutex); @@ -4929,6 +4649,8 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_dp_connector_register, + .early_unregister = intel_dp_connector_unregister, .destroy = intel_dp_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -4937,7 +4659,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -4951,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; enum irqreturn ret = IRQ_NONE; if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && intel_dig_port->base.type != INTEL_OUTPUT_HDMI) - intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; + intel_dig_port->base.type = INTEL_OUTPUT_DP; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { /* @@ -5019,7 +4740,7 @@ put_power: /* check the VBT to see whether the eDP is on another port */ bool intel_dp_is_edp(struct drm_device *dev, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * eDP not supported on g4x. so bail out early just @@ -5061,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) } static void -intel_dp_init_panel_power_sequencer(struct drm_device *dev, - struct intel_dp *intel_dp) +intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp, struct edp_power_seq *seq) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct edp_power_seq cur, vbt, spec, - *final = &intel_dp->pps_delays; u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; - i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; - - lockdep_assert_held(&dev_priv->pps_mutex); - - /* already initialized? */ - if (final->t11_t12 != 0) - return; - - if (IS_BROXTON(dev)) { - /* - * TODO: BXT has 2 sets of PPS registers. - * Correct Register for Broxton need to be identified - * using VBT. hardcoding for now - */ - pp_ctrl_reg = BXT_PP_CONTROL(0); - pp_on_reg = BXT_PP_ON_DELAYS(0); - pp_off_reg = BXT_PP_OFF_DELAYS(0); - } else if (HAS_PCH_SPLIT(dev)) { - pp_ctrl_reg = PCH_PP_CONTROL; - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_div_reg = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + struct pps_registers regs; - pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); - pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); - pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); - } + intel_pps_get_registers(dev_priv, intel_dp, ®s); /* Workaround: Need to write PP_CONTROL with the unlock key as * the very first thing. */ pp_ctl = ironlake_get_pp_control(intel_dp); - pp_on = I915_READ(pp_on_reg); - pp_off = I915_READ(pp_off_reg); - if (!IS_BROXTON(dev)) { - I915_WRITE(pp_ctrl_reg, pp_ctl); - pp_div = I915_READ(pp_div_reg); + pp_on = I915_READ(regs.pp_on); + pp_off = I915_READ(regs.pp_off); + if (!IS_BROXTON(dev_priv)) { + I915_WRITE(regs.pp_ctrl, pp_ctl); + pp_div = I915_READ(regs.pp_div); } /* Pull timing values out of registers */ - cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; + seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; - cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; - cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; - cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; + seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> BXT_POWER_CYCLE_DELAY_SHIFT; if (tmp > 0) - cur.t11_t12 = (tmp - 1) * 1000; + seq->t11_t12 = (tmp - 1) * 1000; else - cur.t11_t12 = 0; + seq->t11_t12 = 0; } else { - cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; } +} + +static void +intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) +{ + DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + state_name, + seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); +} + +static void +intel_pps_verify_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp) +{ + struct edp_power_seq hw; + struct edp_power_seq *sw = &intel_dp->pps_delays; + + intel_pps_readout_hw_state(dev_priv, intel_dp, &hw); + + if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || + hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { + DRM_ERROR("PPS state mismatch\n"); + intel_pps_dump_state("sw", sw); + intel_pps_dump_state("hw", &hw); + } +} + +static void +intel_dp_init_panel_power_sequencer(struct drm_device *dev, + struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct edp_power_seq cur, vbt, spec, + *final = &intel_dp->pps_delays; + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* already initialized? */ + if (final->t11_t12 != 0) + return; - DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + intel_pps_readout_hw_state(dev_priv, intel_dp, &cur); + + intel_pps_dump_state("cur", &cur); vbt = dev_priv->vbt.edp.pps; @@ -5152,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, * too. */ spec.t11_t12 = (510 + 100) * 10; - DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); + intel_pps_dump_state("vbt", &vbt); /* Use the max of the register settings and vbt. If both are * unset, fall back to the spec limits. */ @@ -5181,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + /* + * We override the HW backlight delays to 1 because we do manual waits + * on them. For T8, even BSpec recommends doing it. For T9, if we + * don't do this, we'll end up waiting for the backlight off delay + * twice: once when we do the manual sleep, and once when we disable + * the panel and wait for the PP_STATUS bit to become zero. + */ + final->t8 = 1; + final->t9 = 1; } static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp_on, pp_off, pp_div, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; - i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; + struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->port; const struct edp_power_seq *seq = &intel_dp->pps_delays; lockdep_assert_held(&dev_priv->pps_mutex); - if (IS_BROXTON(dev)) { - /* - * TODO: BXT has 2 sets of PPS registers. - * Correct Register for Broxton need to be identified - * using VBT. hardcoding for now - */ - pp_ctrl_reg = BXT_PP_CONTROL(0); - pp_on_reg = BXT_PP_ON_DELAYS(0); - pp_off_reg = BXT_PP_OFF_DELAYS(0); - - } else if (HAS_PCH_SPLIT(dev)) { - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_div_reg = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + intel_pps_get_registers(dev_priv, intel_dp, ®s); - pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); - pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); - } - - /* - * And finally store the new values in the power sequencer. The - * backlight delays are set to 1 because we do manual waits on them. For - * T8, even BSpec recommends doing it. For T9, if we don't do this, - * we'll end up waiting for the backlight off delay twice: once when we - * do the manual sleep, and once when we disable the panel and wait for - * the PP_STATUS bit to become zero. - */ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (1 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | + (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); + pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); /* Compute the divisor for the pp clock, simply match the Bspec * formula. */ if (IS_BROXTON(dev)) { - pp_div = I915_READ(pp_ctrl_reg); + pp_div = I915_READ(regs.pp_ctrl); pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) << BXT_POWER_CYCLE_DELAY_SHIFT); @@ -5256,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, pp_on |= port_sel; - I915_WRITE(pp_on_reg, pp_on); - I915_WRITE(pp_off_reg, pp_off); + I915_WRITE(regs.pp_on, pp_on); + I915_WRITE(regs.pp_off, pp_off); if (IS_BROXTON(dev)) - I915_WRITE(pp_ctrl_reg, pp_div); + I915_WRITE(regs.pp_ctrl, pp_div); else - I915_WRITE(pp_div_reg, pp_div); + I915_WRITE(regs.pp_div, pp_div); DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - I915_READ(pp_on_reg), - I915_READ(pp_off_reg), + I915_READ(regs.pp_on), + I915_READ(regs.pp_off), IS_BROXTON(dev) ? - (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) : - I915_READ(pp_div_reg)); + (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : + I915_READ(regs.pp_div)); } /** @@ -5285,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, */ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; struct intel_digital_port *dig_port = NULL; struct intel_dp *intel_dp = dev_priv->drrs.dp; @@ -5384,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) void intel_edp_drrs_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5416,7 +5129,7 @@ unlock: void intel_edp_drrs_disable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5431,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp) } if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - intel_dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + intel_dp->attached_connector->panel. + fixed_mode->vrefresh); dev_priv->drrs.dp = NULL; mutex_unlock(&dev_priv->drrs.mutex); @@ -5463,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work) goto unlock; if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - intel_dp->attached_connector->panel. - downclock_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + intel_dp->attached_connector->panel. + downclock_mode->vrefresh); unlock: mutex_unlock(&dev_priv->drrs.mutex); @@ -5484,7 +5197,7 @@ unlock: void intel_edp_drrs_invalidate(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5507,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, /* invalidate means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - dev_priv->drrs.dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + dev_priv->drrs.dp->attached_connector->panel. + fixed_mode->vrefresh); mutex_unlock(&dev_priv->drrs.mutex); } @@ -5529,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5552,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev, /* flush means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - dev_priv->drrs.dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + dev_priv->drrs.dp->attached_connector->panel. + fixed_mode->vrefresh); /* * flush also means no more activity hence schedule downclock, if all @@ -5589,14 +5302,14 @@ void intel_edp_drrs_flush(struct drm_device *dev, * * DRRS saves power by switching to low RR based on usage scenarios. * - * eDP DRRS:- - * The implementation is based on frontbuffer tracking implementation. - * When there is a disturbance on the screen triggered by user activity or a - * periodic system activity, DRRS is disabled (RR is changed to high RR). - * When there is no movement on screen, after a timeout of 1 second, a switch - * to low RR is made. - * For integration with frontbuffer tracking code, - * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. * * DRRS can be further extended to support other internal panels and also * the scenario of video playback wherein RR is set based on the rate @@ -5622,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, { struct drm_connector *connector = &intel_connector->base; struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); @@ -5660,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; bool has_dpcd; @@ -5671,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; + /* + * On IBX/CPT we may get here with LVDS already registered. Since the + * driver uses the only internal power sequencer available for both + * eDP and LVDS bail out early in this case to prevent interfering + * with an already powered-on LVDS power sequencer. + */ + if (intel_get_lvds_encoder(dev)) { + WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); + DRM_INFO("LVDS was detected, not registering eDP\n"); + + return false; + } + pps_lock(intel_dp); + + intel_dp_init_panel_power_timestamps(intel_dp); + + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + vlv_initial_power_sequencer_setup(intel_dp); + } else { + intel_dp_init_panel_power_sequencer(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + } + intel_edp_panel_vdd_sanitize(intel_dp); + pps_unlock(intel_dp); /* Cache DPCD and EDID for edp. */ @@ -5686,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); - return false; + goto out_vdd_off; } - /* We now know it's not a ghost, init power sequence regs. */ - pps_lock(intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); - pps_unlock(intel_dp); - mutex_lock(&dev->mode_config.mutex); edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { @@ -5761,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_setup_backlight(connector, pipe); return true; + +out_vdd_off: + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + pps_lock(intel_dp); + edp_panel_vdd_off_sync(intel_dp); + pps_unlock(intel_dp); + + return false; } bool @@ -5771,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; - int type, ret; + int type; if (WARN(intel_dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on port %c\n", @@ -5832,17 +5576,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, connector->interlace_allowed = true; connector->doublescan_allowed = 0; + intel_dp_aux_init(intel_dp, intel_connector); + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_connector_register(connector); if (HAS_DDI(dev)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_dp_connector_unregister; /* Set up the hotplug pin. */ switch (port) { @@ -5867,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, BUG(); } - if (is_edp(intel_dp)) { - pps_lock(intel_dp); - intel_dp_init_panel_power_timestamps(intel_dp); - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - vlv_initial_power_sequencer_setup(intel_dp); - else - intel_dp_init_panel_power_sequencer(dev, intel_dp); - pps_unlock(intel_dp); - } - - ret = intel_dp_aux_init(intel_dp, intel_connector); - if (ret) - goto fail; - /* init MST on ports that can support it */ - if (HAS_DP_MST(dev) && + if (HAS_DP_MST(dev) && !is_edp(intel_dp) && (port == PORT_B || port == PORT_C || port == PORT_D)) intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); @@ -5904,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } - i915_debugfs_connector_add(connector); - return true; fail: - if (is_edp(intel_dp)) { - cancel_delayed_work_sync(&intel_dp->panel_vdd_work); - /* - * vdd might still be enabled do to the delayed vdd off. - * Make sure vdd is actually turned off here. - */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); - } - drm_connector_unregister(connector); drm_connector_cleanup(connector); return false; @@ -5929,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -5947,7 +5664,7 @@ bool intel_dp_init(struct drm_device *dev, encoder = &intel_encoder->base; if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL)) + DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; @@ -5977,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev, intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; if (IS_CHERRYVIEW(dev)) { if (port == PORT_D) intel_encoder->crtc_mask = 1 << 2; @@ -6007,43 +5724,35 @@ err_connector_alloc: void intel_dp_mst_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; /* disable MST */ for (i = 0; i < I915_MAX_PORTS; i++) { struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port) + + if (!intel_dig_port || !intel_dig_port->dp.can_mst) continue; - if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { - if (!intel_dig_port->dp.can_mst) - continue; - if (intel_dig_port->dp.is_mst) - drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); - } + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); } } void intel_dp_mst_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < I915_MAX_PORTS; i++) { struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port) - continue; - if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { - int ret; + int ret; - if (!intel_dig_port->dp.can_mst) - continue; + if (!intel_dig_port || !intel_dig_port->dp.can_mst) + continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); - if (ret != 0) { - intel_dp_check_mst_status(&intel_dig_port->dp); - } - } + ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + if (ret) + intel_dp_check_mst_status(&intel_dig_port->dp); } } diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c new file mode 100644 index 000000000000..6532e226db29 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_drv.h" + +static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) +{ + uint8_t reg_val = 0; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +/* + * Read the current backlight value from DPCD register(s) based + * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported + */ +static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t read_val[2] = { 0x0 }; + uint16_t level = 0; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + level = read_val[0]; + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +/* + * Sends the current backlight level over the aux channel, checking if its using + * 8-bit or 16 bit value (MSB and LSB) + */ +static void +intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t vals[2] = { 0x0 }; + + vals[0] = level; + + /* Write the MSB and/or LSB */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return; + } +} + +static void intel_dp_aux_enable_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t dpcd_buf = 0; + + set_aux_backlight_enable(intel_dp, true); + + if ((drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) && + ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) == + DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET)) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)); +} + +static void intel_dp_aux_disable_backlight(struct intel_connector *connector) +{ + set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false); +} + +static int intel_dp_aux_setup_backlight(struct intel_connector *connector, + enum pipe pipe) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_panel *panel = &connector->panel; + + intel_dp_aux_enable_backlight(connector); + + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + panel->backlight.max = 0xFFFF; + else + panel->backlight.max = 0xFF; + + panel->backlight.min = 0; + panel->backlight.level = intel_dp_aux_get_backlight(connector); + + panel->backlight.enabled = panel->backlight.level != 0; + + return 0; +} + +static bool +intel_dp_aux_display_control_capable(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + + /* Check the eDP Display control capabilities registers to determine if + * the panel can support backlight control over the aux channel + */ + if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) || + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + return true; + } + return false; +} + +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct intel_panel *panel = &intel_connector->panel; + + if (!i915.enable_dpcd_backlight) + return -ENODEV; + + if (!intel_dp_aux_display_control_capable(intel_connector)) + return -ENODEV; + + panel->backlight.setup = intel_dp_aux_setup_backlight; + panel->backlight.enable = intel_dp_aux_enable_backlight; + panel->backlight.disable = intel_dp_aux_disable_backlight; + panel->backlight.set = intel_dp_aux_set_backlight; + panel->backlight.get = intel_dp_aux_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7a34090cef34..68a005d729e9 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->dp_encoder_is_mst = true; pipe_config->has_pch_encoder = false; - pipe_config->has_dp_encoder = true; bpp = 24; /* * for MST we always configure max link bw - the spec doesn't @@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; int ret; uint32_t temp; @@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; int ret; DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); - if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), - 1)) + if (intel_wait_for_register(dev_priv, + DP_TP_STATUS(port), + DP_TP_STATUS_ACT_SENT, + DP_TP_STATUS_ACT_SENT, + 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); ret = drm_dp_check_act_status(&intel_dp->mst_mgr); @@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 temp, flags = 0; - pipe_config->has_dp_encoder = true; - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -336,6 +336,8 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_mst_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_dp_mst_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -455,7 +457,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - intel_connector->unregister = intel_connector_unregister; intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; @@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; + drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); + drm_connector_register(&intel_connector->base); } @@ -489,7 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; - intel_connector->unregister(intel_connector); + drm_connector_unregister(connector); /* need to nuke the connector */ drm_modeset_lock_all(dev); @@ -534,7 +537,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->primary = intel_dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST, NULL); + DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->crtc_mask = 0x7; diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c new file mode 100644 index 000000000000..047f48748944 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -0,0 +1,470 @@ +/* + * Copyright © 2014-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "intel_drv.h" + +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + int i; + + mutex_lock(&dev_priv->sb_lock); + + /* Clear calc init */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + } + + /* Program swing deemph */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); + val &= ~DPIO_SWING_DEEMPH9P5_MASK; + val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); + } + + /* Program swing margin */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); + + val &= ~DPIO_SWING_MARGIN000_MASK; + val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; + + /* + * Supposedly this value shouldn't matter when unique transition + * scale is disabled, but in fact it does matter. Let's just + * always program the same value and hope it's OK. + */ + val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); + val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; + + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); + } + + /* + * The document said it needs to set bit 27 for ch0 and bit 26 + * for ch1. Might be a typo in the doc. + * For now, for this unique transition scale selection, set bit + * 27 for ch0 and ch1. + */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); + if (uniq_trans_scale) + val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; + else + val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); + } + + /* Start swing calculation */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + mutex_unlock(&dev_priv->sb_lock); + +} + +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + enum pipe pipe = crtc->pipe; + uint32_t val; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); + } +} + +void chv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + unsigned int lane_mask = + intel_dp_unused_lane_mask(intel_crtc->config->lane_count); + u32 val; + + /* + * Must trick the second common lane into life. + * Otherwise we can't even access the PLL. + */ + if (ch == DPIO_CH0 && pipe == PIPE_B) + dport->release_cl2_override = + !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); + + chv_phy_powergate_lanes(encoder, true, lane_mask); + + mutex_lock(&dev_priv->sb_lock); + + /* Assert data lane reset */ + chv_data_lane_soft_reset(encoder, true); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + } + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + int data, i, stagger; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* allow hardware to manage TX FIFO reset source */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + /* Program Tx lane latency optimal setting*/ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + /* Set the upar bit */ + if (intel_crtc->config->lane_count == 1) + data = 0x0; + else + data = (i == 1) ? 0x0 : 0x1; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), + data << DPIO_UPAR_SHIFT); + } + + /* Data lane stagger programming */ + if (intel_crtc->config->port_clock > 270000) + stagger = 0x18; + else if (intel_crtc->config->port_clock > 135000) + stagger = 0xd; + else if (intel_crtc->config->port_clock > 67500) + stagger = 0x7; + else if (intel_crtc->config->port_clock > 33750) + stagger = 0x4; + else + stagger = 0x2; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(6) | + DPIO_TX2_STAGGER_MULT(0)); + + if (intel_crtc->config->lane_count > 2) { + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(7) | + DPIO_TX2_STAGGER_MULT(5)); + } + + /* Deassert data lane reset */ + chv_data_lane_soft_reset(encoder, false); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_release_cl2_override(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dport->release_cl2_override) { + chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); + dport->release_cl2_override = false; + } +} + +void chv_phy_post_pll_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* disable left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + mutex_unlock(&dev_priv->sb_lock); + + /* + * Leave the power down bit cleared for at least one + * lane so that chv_powergate_phy_ch() will power + * on something when the channel is otherwise unused. + * When the port is off and the override is removed + * the lanes power down anyway, so otherwise it doesn't + * really matter what the state of power down bits is + * after this. + */ + chv_phy_powergate_lanes(encoder, false, 0x0); +} + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), + uniqtranscale_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); + + if (tx3_demph) + vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); + + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), + DPIO_PCS_TX_LANE2_RESET | + DPIO_PCS_TX_LANE1_RESET); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), + DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | + DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | + (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | + DPIO_PCS_CLK_SOFT_RESET); + + /* Fix up inter-pair skew failure */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* Enable clock channels for this port */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); + val = 0; + if (pipe) + val |= (1<<21); + else + val &= ~(1<<21); + val |= 0x001000c4; + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + + /* Program lane clock */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); + + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_reset_lanes(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); + mutex_unlock(&dev_priv->sb_lock); +} diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 58f60b27837e..5c1f2d235ffa 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, void intel_prepare_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; if (WARN_ON(pll == NULL)) @@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) void intel_enable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); unsigned old_mask; @@ -151,7 +151,7 @@ out: void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); @@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; struct intel_shared_dpll_config *shared_dpll; enum intel_dpll_id i; @@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, if (memcmp(&crtc_state->dpll_hw_state, &shared_dpll[i].hw_state, sizeof(crtc_state->dpll_hw_state)) == 0) { - DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", - crtc->base.base.id, pll->name, + DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", + crtc->base.base.id, crtc->base.name, pll->name, shared_dpll[i].crtc_mask, pll->active_mask); return pll; @@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, for (i = range_min; i <= range_max; i++) { pll = &dev_priv->shared_dplls[i]; if (shared_dpll[i].crtc_mask == 0) { - DRM_DEBUG_KMS("CRTC:%d allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); return pll; } } @@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; /* Make sure no transcoder isn't still depending on us. */ @@ -358,8 +358,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->shared_dplls[i]; - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); } else { pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_PCH_PLL_A, @@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_DP_MST || encoder->type == INTEL_OUTPUT_EDP) { enum intel_dpll_id pll_id; @@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(regs[pll->id].ctl, I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); - if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5)) + if (intel_wait_for_register(dev_priv, + DPLL_STATUS, + DPLL_LOCK(pll->id), + DPLL_LOCK(pll->id), + 5)) DRM_ERROR("DPLL %d not locked\n", pll->id); } @@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_DP_MST || encoder->type == INTEL_OUTPUT_EDP) { switch (crtc_state->port_clock / 2) { @@ -1239,9 +1243,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case 162000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); break; - /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which - results in CDCLK change. Need to handle the change of CDCLK by - disabling pipes and re-enabling them */ case 108000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); break; @@ -1511,7 +1512,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int clock = crtc_state->port_clock; if (encoder->type == INTEL_OUTPUT_HDMI) { - intel_clock_t best_clock; + struct dpll best_clock; /* Calculate HDMI div */ /* @@ -1533,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, clk_div.m2_frac_en = clk_div.m2_frac != 0; vco = best_clock.vco; - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_EDP) { int i; @@ -1616,8 +1617,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) intel_dig_port->port; pll = intel_get_shared_dpll_by_id(dev_priv, i); - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); intel_reference_shared_dpll(pll, crtc_state); @@ -1635,19 +1636,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { static void intel_ddi_pll_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { - int cdclk_freq; - - cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - dev_priv->skl_boot_cdclk = cdclk_freq; - if (skl_sanitize_cdclk(dev_priv)) - DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) - DRM_ERROR("LCPLL1 is disabled\n"); - } else if (!IS_BROXTON(dev_priv)) { + struct drm_i915_private *dev_priv = to_i915(dev); + + if (INTEL_GEN(dev_priv) < 9) { + uint32_t val = I915_READ(LCPLL_CTL); + /* * The LCPLL register should be turned on by the BIOS. For now * let's just check its state and print errors in case @@ -1730,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { void intel_shared_dpll_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f7f0f01814f6..cc937a19b1ba 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -69,39 +69,63 @@ }) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) -#define wait_for_us(COND, US) _wait_for((COND), (US), 1) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) -# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic()) +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else -# define _WAIT_FOR_ATOMIC_CHECK do { } while (0) +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) #endif -#define _wait_for_atomic(COND, US) ({ \ - unsigned long end__; \ - int ret__ = 0; \ - _WAIT_FOR_ATOMIC_CHECK; \ +#define _wait_for_atomic(COND, US, ATOMIC) \ +({ \ + int cpu, ret, timeout = (US) * 1000; \ + u64 base; \ + _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ BUILD_BUG_ON((US) > 50000); \ - end__ = (local_clock() >> 10) + (US) + 1; \ - while (!(COND)) { \ - if (time_after((unsigned long)(local_clock() >> 10), end__)) { \ - /* Unlike the regular wait_for(), this atomic variant \ - * cannot be preempted (and we'll just ignore the issue\ - * of irq interruptions) and so we know that no time \ - * has passed since the last check of COND and can \ - * immediately report the timeout. \ - */ \ - ret__ = -ETIMEDOUT; \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + cpu = smp_processor_id(); \ + } \ + base = local_clock(); \ + for (;;) { \ + u64 now = local_clock(); \ + if (!(ATOMIC)) \ + preempt_enable(); \ + if (COND) { \ + ret = 0; \ + break; \ + } \ + if (now - base >= timeout) { \ + ret = -ETIMEDOUT; \ break; \ } \ cpu_relax(); \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + if (unlikely(cpu != smp_processor_id())) { \ + timeout -= now - base; \ + cpu = smp_processor_id(); \ + base = local_clock(); \ + } \ + } \ } \ + ret; \ +}) + +#define wait_for_us(COND, US) \ +({ \ + int ret__; \ + BUILD_BUG_ON(!__builtin_constant_p(US)); \ + if ((US) > 10) \ + ret__ = _wait_for((COND), (US), 10); \ + else \ + ret__ = _wait_for_atomic((COND), (US), 0); \ ret__; \ }) -#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000) -#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US)) +#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1) +#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1) #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) @@ -135,7 +159,7 @@ enum intel_output_type { INTEL_OUTPUT_LVDS = 4, INTEL_OUTPUT_TVOUT = 5, INTEL_OUTPUT_HDMI = 6, - INTEL_OUTPUT_DISPLAYPORT = 7, + INTEL_OUTPUT_DP = 7, INTEL_OUTPUT_EDP = 8, INTEL_OUTPUT_DSI = 9, INTEL_OUTPUT_UNKNOWN = 10, @@ -159,6 +183,7 @@ struct intel_framebuffer { struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; + async_cookie_t cookie; int preferred_bpp; }; @@ -242,14 +267,6 @@ struct intel_connector { * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); - /* - * Removes all interfaces through which the connector is accessible - * - like sysfs, debugfs entries -, so that no new operations can be - * started on the connector. Also makes sure all currently pending - * operations finish before returing. - */ - void (*unregister)(struct intel_connector *); - /* Panel info for eDP and LVDS */ struct intel_panel panel; @@ -266,7 +283,7 @@ struct intel_connector { struct intel_dp *mst_port; }; -typedef struct dpll { +struct dpll { /* given values */ int n; int m1, m2; @@ -276,7 +293,7 @@ typedef struct dpll { int vco; int m; int p; -} intel_clock_t; +}; struct intel_atomic_state { struct drm_atomic_state base; @@ -291,17 +308,32 @@ struct intel_atomic_state { bool dpll_set, modeset; + /* + * Does this transaction change the pipes that are active? This mask + * tracks which CRTC's have changed their active state at the end of + * the transaction (not counting the temporary disable during modesets). + * This mask should only be non-zero when intel_state->modeset is true, + * but the converse is not necessarily true; simply changing a mode may + * not flip the final active status of any CRTC's + */ + unsigned int active_pipe_changes; + unsigned int active_crtcs; unsigned int min_pixclk[I915_MAX_PIPES]; + /* SKL/KBL Only */ + unsigned int cdclk_pll_vco; + struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; - struct intel_wm_config wm_config; /* * Current watermarks can't be trusted during hardware readout, so * don't bother calculating intermediate watermarks. */ bool skip_intermediate_wm; + + /* Gen9+ only */ + struct skl_wm_values wm_results; }; struct intel_plane_state { @@ -405,6 +437,48 @@ struct skl_pipe_wm { uint32_t linetime; }; +struct intel_crtc_wm_state { + union { + struct { + /* + * Intermediate watermarks; these can be + * programmed immediately since they satisfy + * both the current configuration we're + * switching away from and the new + * configuration we're switching to. + */ + struct intel_pipe_wm intermediate; + + /* + * Optimal watermarks, programmed post-vblank + * when this state is committed. + */ + struct intel_pipe_wm optimal; + } ilk; + + struct { + /* gen9+ only needs 1-step wm programming */ + struct skl_pipe_wm optimal; + + /* cached plane data rate */ + unsigned plane_data_rate[I915_MAX_PLANES]; + unsigned plane_y_data_rate[I915_MAX_PLANES]; + + /* minimum block allocation */ + uint16_t minimum_blocks[I915_MAX_PLANES]; + uint16_t minimum_y_blocks[I915_MAX_PLANES]; + } skl; + }; + + /* + * Platforms with two-step watermark programming will need to + * update watermark programming post-vblank to switch from the + * safe intermediate watermarks to the optimal final + * watermarks. + */ + bool need_postvbl_update; +}; + struct intel_crtc_state { struct drm_crtc_state base; @@ -448,12 +522,10 @@ struct intel_crtc_state { */ bool limited_color_range; - /* DP has a bunch of special case unfortunately, so mark the pipe - * accordingly. */ - bool has_dp_encoder; - - /* DSI has special cases */ - bool has_dsi_encoder; + /* Bitmask of encoder types (enum intel_output_type) + * driven by the pipe. + */ + unsigned int output_types; /* Whether we should send NULL infoframes. Required for audio. */ bool has_hdmi_sink; @@ -522,6 +594,12 @@ struct intel_crtc_state { uint8_t lane_count; + /* + * Used by platforms having DP/HDMI PHY with programmable lane + * latency optimization. + */ + uint8_t lane_lat_optim_mask; + /* Panel fitter controls for gen2-gen4 + VLV */ struct { u32 control; @@ -558,32 +636,7 @@ struct intel_crtc_state { /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ bool disable_lp_wm; - struct { - /* - * Optimal watermarks, programmed post-vblank when this state - * is committed. - */ - union { - struct intel_pipe_wm ilk; - struct skl_pipe_wm skl; - } optimal; - - /* - * Intermediate watermarks; these can be programmed immediately - * since they satisfy both the current configuration we're - * switching away from and the new configuration we're switching - * to. - */ - struct intel_pipe_wm intermediate; - - /* - * Platforms with two-step watermark programming will need to - * update watermark programming post-vblank to switch from the - * safe intermediate watermarks to the optimal final - * watermarks. - */ - bool need_postvbl_update; - } wm; + struct intel_crtc_wm_state wm; /* Gamma mode programmed on the pipe */ uint32_t gamma_mode; @@ -598,14 +651,6 @@ struct vlv_wm_state { bool cxsr; }; -struct intel_mmio_flip { - struct work_struct work; - struct drm_i915_private *i915; - struct drm_i915_gem_request *req; - struct intel_crtc *crtc; - unsigned int rotation; -}; - struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -620,7 +665,7 @@ struct intel_crtc { unsigned long enabled_power_domains; bool lowfreq_avail; struct intel_overlay *overlay; - struct intel_unpin_work *unpin_work; + struct intel_flip_work *flip_work; atomic_t unpin_work_count; @@ -815,6 +860,7 @@ struct intel_dp { uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ uint8_t num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -838,6 +884,11 @@ struct intel_dp { * this port. Only relevant on VLV/CHV. */ enum pipe pps_pipe; + /* + * Set if the sequencer may be reset due to a power transition, + * requiring a reinitialization. Only relevant on BXT. + */ + bool pps_reset; struct edp_power_seq pps_delays; bool can_mst; /* this port supports mst */ @@ -934,33 +985,32 @@ vlv_pipe_to_channel(enum pipe pipe) static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return dev_priv->pipe_to_crtc_mapping[pipe]; } static inline struct drm_crtc * intel_get_crtc_for_plane(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return dev_priv->plane_to_crtc_mapping[plane]; } -struct intel_unpin_work { - struct work_struct work; +struct intel_flip_work { + struct work_struct unpin_work; + struct work_struct mmio_work; + struct drm_crtc *crtc; struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; atomic_t pending; -#define INTEL_FLIP_INACTIVE 0 -#define INTEL_FLIP_PENDING 1 -#define INTEL_FLIP_COMPLETE 2 u32 flip_count; u32 gtt_offset; struct drm_i915_gem_request *flip_queued_req; u32 flip_queued_vblank; u32 flip_ready_vblank; - bool enable_stall_check; + unsigned int rotation; }; struct intel_load_detect_pipe { @@ -1029,9 +1079,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_reset_rps_interrupts(struct drm_device *dev); -void gen6_enable_rps_interrupts(struct drm_device *dev); -void gen6_disable_rps_interrupts(struct drm_device *dev); +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); @@ -1052,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); - +void intel_crt_reset(struct drm_encoder *encoder); /* intel_ddi.c */ void intel_ddi_clk_select(struct intel_encoder *encoder, @@ -1110,14 +1160,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); /* intel_display.c */ +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco); +void intel_update_rawclk(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); -void intel_mark_busy(struct drm_device *dev); -void intel_mark_idle(struct drm_device *dev); +void intel_mark_busy(struct drm_i915_private *dev_priv); +void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); int intel_display_suspend(struct drm_device *dev); void intel_encoder_destroy(struct drm_encoder *encoder); @@ -1126,7 +1178,6 @@ struct intel_connector *intel_connector_alloc(void); bool intel_connector_get_hw_state(struct intel_connector *connector); void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); -struct drm_encoder *intel_best_encoder(struct drm_connector *connector); struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); @@ -1134,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); -bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type); +static inline bool +intel_crtc_has_type(const struct intel_crtc_state *crtc_state, + enum intel_output_type type) +{ + return crtc_state->output_types & (1 << type); +} +static inline bool +intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_types & + ((1 << INTEL_OUTPUT_DP) | + (1 << INTEL_OUTPUT_DP_MST) | + (1 << INTEL_OUTPUT_EDP)); +} static inline void intel_wait_for_vblank(struct drm_device *dev, int pipe) { @@ -1149,6 +1213,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe) if (crtc->active) intel_wait_for_vblank(dev, pipe); } + +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); + int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, @@ -1162,14 +1229,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); -void intel_prepare_page_flip(struct drm_device *dev, int plane); -void intel_finish_page_flip(struct drm_device *dev, int pipe); -void intel_finish_page_flip_plane(struct drm_device *dev, int plane); -void intel_check_page_flip(struct drm_device *dev, int pipe); +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe); +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe); +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe); int intel_prepare_plane_fb(struct drm_plane *plane, const struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, @@ -1226,23 +1293,25 @@ u32 intel_compute_tile_offset(int *x, int *y, const struct drm_framebuffer *fb, int plane, unsigned int pitch, unsigned int rotation); -void intel_prepare_reset(struct drm_device *dev); -void intel_finish_reset(struct drm_device *dev); +void intel_prepare_reset(struct drm_i915_private *dev_priv); +void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); -void broxton_init_cdclk(struct drm_i915_private *dev_priv); -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); +void bxt_init_cdclk(struct drm_i915_private *dev_priv); +void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); +void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); +void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); +bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy); +bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy); void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv); void gen9_enable_dc5(struct drm_i915_private *dev_priv); void skl_init_cdclk(struct drm_i915_private *dev_priv); -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv); +unsigned int skl_cdclk_get_vco(unsigned int freq); void skl_enable_dc6(struct drm_i915_private *dev_priv); void skl_disable_dc6(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, @@ -1250,8 +1319,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock); -int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); + struct dpll *best_clock); +int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); bool intel_crtc_active(struct drm_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc); @@ -1310,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); -void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp); @@ -1337,15 +1406,27 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); +static inline unsigned int intel_dp_unused_lane_mask(int lane_count) +{ + return ~((1 << lane_count) - 1) & 0xf; +} + +/* intel_dp_aux_backlight.c */ +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); + /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ void intel_dsi_init(struct drm_device *dev); +/* intel_dsi_dcs_backlight.c */ +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dvo.c */ void intel_dvo_init(struct drm_device *dev); +/* intel_hotplug.c */ +void intel_hpd_poll_init(struct drm_i915_private *dev_priv); /* legacy fbdev emulation in intel_fbdev.c */ @@ -1383,11 +1464,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct drm_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -void intel_fbc_pre_update(struct intel_crtc *crtc); +void intel_fbc_pre_update(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); void intel_fbc_post_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); -void intel_fbc_enable(struct intel_crtc *crtc); +void intel_fbc_enable(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); void intel_fbc_disable(struct intel_crtc *crtc); void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, @@ -1409,6 +1494,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); /* intel_lvds.c */ void intel_lvds_init(struct drm_device *dev); +struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev); @@ -1422,13 +1508,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector); /* intel_overlay.c */ -void intel_setup_overlay(struct drm_device *dev); -void intel_cleanup_overlay(struct drm_device *dev); +void intel_setup_overlay(struct drm_i915_private *dev_priv); +void intel_cleanup_overlay(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); @@ -1447,7 +1533,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc, int fitting_mode); void intel_panel_set_backlight_acpi(struct intel_connector *connector, u32 level, u32 max); -int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); +int intel_panel_setup_backlight(struct drm_connector *connector, + enum pipe pipe); void intel_panel_enable_backlight(struct intel_connector *connector); void intel_panel_disable_backlight(struct intel_connector *connector); void intel_panel_destroy_backlight(struct drm_connector *connector); @@ -1456,8 +1543,19 @@ extern struct drm_display_mode *intel_find_panel_downclock( struct drm_device *dev, struct drm_display_mode *fixed_mode, struct drm_connector *connector); -void intel_backlight_register(struct drm_device *dev); -void intel_backlight_unregister(struct drm_device *dev); + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +int intel_backlight_device_register(struct intel_connector *connector); +void intel_backlight_device_unregister(struct intel_connector *connector); +#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +static int intel_backlight_device_register(struct intel_connector *connector) +{ + return 0; +} +static inline void intel_backlight_device_unregister(struct intel_connector *connector) +{ +} +#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* intel_psr.c */ @@ -1599,21 +1697,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_device *dev); -void intel_cleanup_gt_powersave(struct drm_device *dev); -void intel_enable_gt_powersave(struct drm_device *dev); -void intel_disable_gt_powersave(struct drm_device *dev); -void intel_suspend_gt_powersave(struct drm_device *dev); -void intel_reset_gt_powersave(struct drm_device *dev); -void gen6_update_ring_freq(struct drm_device *dev); +void intel_init_gt_powersave(struct drm_i915_private *dev_priv); +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv); +void gen6_update_ring_freq(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv, struct intel_rps_client *rps, unsigned long submitted); -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req); +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void vlv_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev); @@ -1621,7 +1718,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); bool ilk_disable_lp_wm(struct drm_device *dev); -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); +static inline int intel_enable_rc6(void) +{ + return i915.enable_rc6; +} /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, @@ -1629,11 +1730,13 @@ bool intel_sdvo_init(struct drm_device *dev, /* intel_sprite.c */ +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs); int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); -void intel_pipe_update_end(struct intel_crtc *crtc); +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); /* intel_tv.c */ void intel_tv_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 4756ef639648..de8e9fb51595 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100)) + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), mask, mask, + 100)) DRM_ERROR("DPI FIFOs are not empty\n"); } @@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, { struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dsi_host->port; struct mipi_dsi_packet packet; ssize_t ret; @@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* note: this is never true for reads */ if (packet.payload_length) { - - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50)) + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), + data_mask, 0, + 50)) DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); write_data(dev_priv, data_reg, packet.payload, @@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); } - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) { + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), + ctrl_mask, 0, + 50)) { DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); } @@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50)) + if (intel_wait_for_register(dev_priv, + MIPI_INTR_STAT(port), + data_mask, data_mask, + 50)) DRM_ERROR("Timeout waiting for read data.\n"); read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); @@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; /* XXX: pipe, hs */ @@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, I915_WRITE(MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100)) + if (intel_wait_for_register(dev_priv, + MIPI_INTR_STAT(port), mask, mask, + 100)) DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); return 0; @@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) static bool intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; @@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("\n"); - pipe_config->has_dsi_encoder = true; - if (fixed_mode) { intel_fixed_panel_mode(fixed_mode, adjusted_mode); @@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, static void bxt_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) static void vlv_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) static void intel_dsi_port_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) static void intel_dsi_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -528,11 +538,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder); static void intel_dsi_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); enum port port; - u32 tmp; DRM_DEBUG_KMS("\n"); @@ -551,11 +560,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) msleep(intel_dsi->panel_on_delay); - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + u32 val; + /* Disable DPOunit clock gating, can stall pipe */ - tmp = I915_READ(DSPCLK_GATE_D); - tmp |= DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, tmp); + val = I915_READ(DSPCLK_GATE_D); + val |= DPOUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); } /* put device in ready state */ @@ -601,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder) static void intel_dsi_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 temp; @@ -640,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder) static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -666,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) /* Wait till Clock lanes are in LP-00 state for MIPI Port A * only. MIPI Port C has no similar bit for checking */ - if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT) - == 0x00000), 30)) + if (intel_wait_for_register(dev_priv, + port_ctrl, AFE_LATCHOUT, 0, + 30)) DRM_ERROR("DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ @@ -684,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) static void intel_dsi_post_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); DRM_DEBUG_KMS("\n"); @@ -693,7 +705,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) intel_dsi_clear_device_ready(encoder); - if (!IS_BROXTON(dev_priv)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 val; val = I915_READ(DSPCLK_GATE_D); @@ -719,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_device *dev = encoder->base.dev; enum intel_display_power_domain power_domain; @@ -793,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; @@ -953,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, u32 pclk; DRM_DEBUG_KMS("\n"); - pipe_config->has_dsi_encoder = true; - if (IS_BROXTON(dev)) bxt_dsi_get_pipe_config(encoder, pipe_config); @@ -1012,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1098,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -1171,6 +1181,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; + if (IS_BROXTON(dev_priv)) { + tmp |= BXT_DPHY_DEFEATURE_EN; + if (!is_cmd_mode(intel_dsi)) + tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; + } + for_each_dsi_port(port, intel_dsi->ports) { I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); @@ -1378,12 +1394,13 @@ static const struct drm_encoder_funcs intel_dsi_funcs = { static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, .mode_valid = intel_dsi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dsi_detect, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_dsi_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dsi_set_property, @@ -1413,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev) struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_display_mode *scan, *fixed_mode = NULL; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port; unsigned int i; @@ -1449,7 +1466,7 @@ void intel_dsi_init(struct drm_device *dev) connector = &intel_connector->base; drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, - NULL); + "DSI %c", port_name(port)); intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; @@ -1460,7 +1477,6 @@ void intel_dsi_init(struct drm_device *dev) intel_encoder->get_config = intel_dsi_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; /* * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI @@ -1473,10 +1489,42 @@ void intel_dsi_init(struct drm_device *dev) else intel_encoder->crtc_mask = BIT(PIPE_B); - if (dev_priv->vbt.dsi.config->dual_link) + if (dev_priv->vbt.dsi.config->dual_link) { intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); - else + + switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_backlight_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + + switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_cabc_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + } else { intel_dsi->ports = BIT(port); + intel_dsi->dcs_backlight_ports = BIT(port); + intel_dsi->dcs_cabc_ports = BIT(port); + } + + if (!dev_priv->vbt.dsi.config->cabc_supported) + intel_dsi->dcs_cabc_ports = 0; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1549,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev) connector->display_info.height_mm = fixed_mode->height_mm; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); + intel_panel_setup_backlight(connector, INVALID_PIPE); intel_dsi_add_properties(intel_connector); - drm_connector_register(connector); - - intel_panel_setup_backlight(connector, INVALID_PIPE); - return; err: diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 61a6957fc6c2..5967ea6d6045 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -78,6 +78,10 @@ struct intel_dsi { u8 escape_clk_div; u8 dual_link; + + u16 dcs_backlight_ports; + u16 dcs_cabc_ports; + u8 pixel_overlap; u32 port_bits; u32 bw_timer; diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c new file mode 100644 index 000000000000..ac7c6020c443 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -0,0 +1,179 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Deepak M <m.deepak at intel.com> + */ + +#include "intel_drv.h" +#include "intel_dsi.h" +#include "i915_drv.h" +#include <video/mipi_display.h> +#include <drm/drm_mipi_dsi.h> + +#define CONTROL_DISPLAY_BCTRL (1 << 5) +#define CONTROL_DISPLAY_DD (1 << 3) +#define CONTROL_DISPLAY_BL (1 << 2) + +#define POWER_SAVE_OFF (0 << 0) +#define POWER_SAVE_LOW (1 << 0) +#define POWER_SAVE_MEDIUM (2 << 0) +#define POWER_SAVE_HIGH (3 << 0) +#define POWER_SAVE_OUTDOOR_MODE (4 << 0) + +#define PANEL_PWM_MAX_VALUE 0xFF + +static u32 dcs_get_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + break; + } + + return data; +} + +static void dcs_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data = level; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + } +} + +static void dcs_disable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + enum port port; + + dcs_set_backlight(connector, 0); + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_OFF; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl &= ~CONTROL_DISPLAY_BL; + ctrl &= ~CONTROL_DISPLAY_DD; + ctrl &= ~CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } +} + +static void dcs_enable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_panel *panel = &connector->panel; + struct mipi_dsi_device *dsi_device; + enum port port; + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl |= CONTROL_DISPLAY_BL; + ctrl |= CONTROL_DISPLAY_DD; + ctrl |= CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_MEDIUM; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + dcs_set_backlight(connector, panel->backlight.level); +} + +static int dcs_setup_backlight(struct intel_connector *connector, + enum pipe unused) +{ + struct intel_panel *panel = &connector->panel; + + panel->backlight.max = PANEL_PWM_MAX_VALUE; + panel->backlight.level = PANEL_PWM_MAX_VALUE; + + return 0; +} + +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *encoder = intel_connector->encoder; + struct intel_panel *panel = &intel_connector->panel; + + if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) + return -ENODEV; + + if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI)) + return -EINVAL; + + panel->backlight.setup = dcs_setup_backlight; + panel->backlight.enable = dcs_enable_backlight; + panel->backlight.disable = dcs_disable_backlight; + panel->backlight.set = dcs_set_backlight; + panel->backlight.get = dcs_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index e498f1c3221e..cd154ce6b6c1 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; +#define CHV_GPIO_IDX_START_N 0 +#define CHV_GPIO_IDX_START_E 73 +#define CHV_GPIO_IDX_START_SW 100 +#define CHV_GPIO_IDX_START_SE 198 + +#define CHV_VBT_MAX_PINS_PER_FMLY 15 + +#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) +#define CHV_GPIO_GPIOEN (1 << 15) +#define CHV_GPIO_GPIOCFG_GPIO (0 << 8) +#define CHV_GPIO_GPIOCFG_GPO (1 << 8) +#define CHV_GPIO_GPIOCFG_GPI (2 << 8) +#define CHV_GPIO_GPIOCFG_HIZ (3 << 8) +#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) + +#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) +#define CHV_GPIO_CFGLOCK (1 << 31) + static inline enum port intel_dsi_seq_port_to_port(u8 port) { return port ? PORT_C : PORT_A; @@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, map = &vlv_gpio_table[gpio_index]; if (dev_priv->vbt.dsi.seq_version >= 3) { - DRM_DEBUG_KMS("GPIO element v3 not supported\n"); - return; + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ + port = IOSF_PORT_GPIO_NC; } else { if (gpio_source == 0) { port = IOSF_PORT_GPIO_NC; } else if (gpio_source == 1) { - port = IOSF_PORT_GPIO_SC; + DRM_DEBUG_KMS("SC gpio not supported\n"); + return; } else { DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); return; @@ -231,10 +250,60 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, mutex_unlock(&dev_priv->sb_lock); } +static void chv_exec_gpio(struct drm_i915_private *dev_priv, + u8 gpio_source, u8 gpio_index, bool value) +{ + u16 cfg0, cfg1; + u16 family_num; + u8 port; + + if (dev_priv->vbt.dsi.seq_version >= 3) { + if (gpio_index >= CHV_GPIO_IDX_START_SE) { + /* XXX: it's unclear whether 255->57 is part of SE. */ + gpio_index -= CHV_GPIO_IDX_START_SE; + port = CHV_IOSF_PORT_GPIO_SE; + } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { + gpio_index -= CHV_GPIO_IDX_START_SW; + port = CHV_IOSF_PORT_GPIO_SW; + } else if (gpio_index >= CHV_GPIO_IDX_START_E) { + gpio_index -= CHV_GPIO_IDX_START_E; + port = CHV_IOSF_PORT_GPIO_E; + } else { + port = CHV_IOSF_PORT_GPIO_N; + } + } else { + /* XXX: The spec is unclear about CHV GPIO on seq v2 */ + if (gpio_source != 0) { + DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + return; + } + + if (gpio_index >= CHV_GPIO_IDX_START_E) { + DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n", + gpio_index); + return; + } + + port = CHV_IOSF_PORT_GPIO_N; + } + + family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; + gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; + + cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); + cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); + + mutex_lock(&dev_priv->sb_lock); + vlv_iosf_sb_write(dev_priv, port, cfg1, 0); + vlv_iosf_sb_write(dev_priv, port, cfg0, + CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); + mutex_unlock(&dev_priv->sb_lock); +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u8 gpio_source, gpio_index; bool value; @@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); + else if (IS_CHERRYVIEW(dev_priv)) + chv_exec_gpio(dev_priv, gpio_source, gpio_index, value); else DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); @@ -398,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel) struct vbt_panel *vbt_panel = to_vbt_panel(panel); struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *mode; if (!panel->connector) @@ -426,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = { struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) { struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; @@ -578,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) ); /* - * Exit zero is unified val ths_zero and ths_exit + * Exit zero is unified val ths_zero and ths_exit * minimum value for ths_exit = 110ns * min (exit_zero_cnt * 2) = 110/UI * exit_zero_cnt = 55/UI */ - if (exit_zero_cnt < (55 * ui_den / ui_num)) - if ((55 * ui_den) % ui_num) - exit_zero_cnt += 1; + if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num) + exit_zero_cnt += 1; /* clk zero count */ clk_zero_cnt = DIV_ROUND_UP( diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 1765e6e18f2c..6ab58a01b18e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, struct intel_crtc_state *config, int target_dsi_clk) { - unsigned int calc_m = 0, calc_p = 0; unsigned int m_min, m_max, p_min = 2, p_max = 6; unsigned int m, n, p; - int ref_clk; - int delta = target_dsi_clk; - u32 m_seed; + unsigned int calc_m, calc_p; + int delta, ref_clk; /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { @@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, m_max = 92; } + calc_p = p_min; + calc_m = m_min; + delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n)); + for (m = m_min; m <= m_max && delta; m++) { for (p = p_min; p <= p_max && delta; p++) { /* @@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, } /* register has log2(N1), this works fine for powers of two */ - n = ffs(n) - 1; - m_seed = lfsr_converts[calc_m - 62]; config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); - config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT | - m_seed << DSI_PLL_M1_DIV_SHIFT; + config->dsi_pll.div = + (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT | + (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT; return 0; } @@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, static int vlv_compute_dsi_pll(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); int ret; u32 dsi_clk; @@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder) * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE) - & BXT_DSI_PLL_LOCKED) == 0, 1)) + if (intel_wait_for_register(dev_priv, + BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, + 0, + 1)) DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); } @@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, u32 dsi_clk; u32 dsi_ratio; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); /* Divide by zero */ if (!pipe_bpp) { @@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); temp = I915_READ(MIPI_CTRL(port)); @@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; u32 dsi_rate = 0; u32 pll_ratio = 0; @@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder, static void bxt_enable_dsi_pll(struct intel_encoder *encoder, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder, I915_WRITE(BXT_DSI_PLL_ENABLE, val); /* Timeout and fail if PLL not locked */ - if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) { + if (intel_wait_for_register(dev_priv, + BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, + BXT_DSI_PLL_LOCKED, + 1)) { DRM_ERROR("Timed out waiting for DSI PLL to lock\n"); return; } @@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 tmp; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Clear old configurations */ tmp = I915_READ(BXT_MIPI_CLOCK_CTL); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 286baec979c8..47bdf9dad0d3 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); u32 tmp; @@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; @@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp, flags = 0; @@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_disable_dvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder) static void intel_enable_dvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; @@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, static void intel_dvo_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); @@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force) static int intel_dvo_get_modes(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); const struct drm_display_mode *fixed_mode = to_intel_connector(connector)->panel.fixed_mode; @@ -341,6 +341,8 @@ static void intel_dvo_destroy(struct drm_connector *connector) static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dvo_detect, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_connector_atomic_get_property, @@ -351,7 +353,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) @@ -378,7 +379,7 @@ static struct drm_display_mode * intel_dvo_get_current_mode(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = intel_attached_dvo(connector); uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); struct drm_display_mode *mode = NULL; @@ -406,9 +407,21 @@ intel_dvo_get_current_mode(struct drm_connector *connector) return mode; } +static char intel_dvo_port_name(i915_reg_t dvo_reg) +{ + if (i915_mmio_reg_equal(dvo_reg, DVOA)) + return 'A'; + else if (i915_mmio_reg_equal(dvo_reg, DVOB)) + return 'B'; + else if (i915_mmio_reg_equal(dvo_reg, DVOC)) + return 'C'; + else + return '?'; +} + void intel_dvo_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; @@ -428,8 +441,6 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->attached_connector = intel_connector; intel_encoder = &intel_dvo->base; - drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type, NULL); intel_encoder->disable = intel_disable_dvo; intel_encoder->enable = intel_enable_dvo; @@ -438,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->compute_config = intel_dvo_compute_config; intel_encoder->pre_enable = intel_dvo_pre_enable; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { @@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev) if (!dvoinit) continue; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type, + "DVO %c", intel_dvo_port_name(dvo->dvo_reg)); + intel_encoder->type = INTEL_OUTPUT_DVO; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { @@ -537,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->panel_wants_dither = true; } - drm_connector_register(connector); return; } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 647127f3aaff..6a7ad3ed1463 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { + if (intel_wait_for_register(dev_priv, + FBC_STATUS, FBC_STAT_COMPRESSING, 0, + 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); return; } @@ -374,8 +376,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) * @dev_priv: i915 device instance * * This function is used to verify the current state of FBC. + * * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. + * instead of queried at runtime for most callers. */ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) { @@ -389,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_work *work = &fbc->work; struct intel_crtc *crtc = fbc->crtc; - struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; + struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; if (drm_crtc_vblank_get(&crtc->base)) { DRM_ERROR("vblank not available for FBC on pipe %c\n", @@ -442,7 +445,7 @@ out: static void intel_fbc_schedule_activation(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_work *work = &fbc->work; @@ -480,10 +483,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) intel_fbc_hw_deactivate(dev_priv); } -static bool multiple_pipes_ok(struct intel_crtc *crtc) +static bool multiple_pipes_ok(struct intel_crtc *crtc, + struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_plane *primary = crtc->base.primary; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; enum pipe pipe = crtc->pipe; @@ -491,9 +494,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc) if (!no_fbc_on_multiple_pipes(dev_priv)) return true; - WARN_ON(!drm_modeset_is_locked(&primary->mutex)); - - if (to_intel_plane_state(primary->state)->visible) + if (plane_state->visible) fbc->visible_pipes_mask |= (1 << pipe); else fbc->visible_pipes_mask &= ~(1 << pipe); @@ -554,7 +555,7 @@ again: static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct drm_mm_node *uninitialized_var(compressed_llb); int size, fb_cpp, ret; @@ -685,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, */ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; @@ -708,21 +709,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } -static void intel_fbc_update_state_cache(struct intel_crtc *crtc) +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(crtc->base.primary->state); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj; - WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); - WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); - cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) cache->crtc.hsw_bdw_pixel_rate = @@ -740,7 +736,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc) /* FIXME: We lack the proper locking here, so only run this on the * platforms that need. */ - if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) + if (IS_GEN(dev_priv, 5, 6)) cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; @@ -750,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc) static bool intel_fbc_can_activate(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; @@ -822,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) static bool intel_fbc_can_choose(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; - bool enable_by_default = IS_BROADWELL(dev_priv); - if (intel_vgpu_active(dev_priv->dev)) { + if (intel_vgpu_active(dev_priv)) { fbc->no_fbc_reason = "VGPU is active"; return false; } - if (i915.enable_fbc < 0 && !enable_by_default) { - fbc->no_fbc_reason = "disabled per chip default"; - return false; - } - if (!i915.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param"; + fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -857,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) static void intel_fbc_get_reg_params(struct intel_crtc *crtc, struct intel_fbc_reg_params *params) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; @@ -886,9 +876,11 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, return memcmp(params1, params2, sizeof(*params1)) == 0; } -void intel_fbc_pre_update(struct intel_crtc *crtc) +void intel_fbc_pre_update(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -896,7 +888,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) mutex_lock(&fbc->lock); - if (!multiple_pipes_ok(crtc)) { + if (!multiple_pipes_ok(crtc, plane_state)) { fbc->no_fbc_reason = "more than one pipe active"; goto deactivate; } @@ -904,7 +896,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) if (!fbc->enabled || fbc->crtc != crtc) goto unlock; - intel_fbc_update_state_cache(crtc); + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); deactivate: intel_fbc_deactivate(dev_priv); @@ -914,7 +906,7 @@ unlock: static void __intel_fbc_post_update(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_reg_params old_params; @@ -947,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) void intel_fbc_post_update(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -996,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) - return; - mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + goto out; + if (!fbc->busy_bits && fbc->enabled && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) @@ -1011,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, __intel_fbc_post_update(fbc->crtc); } +out: mutex_unlock(&fbc->lock); } @@ -1088,9 +1081,11 @@ out: * intel_fbc_enable multiple times for the same pipe without an * intel_fbc_disable in the middle, as long as it is deactivated. */ -void intel_fbc_enable(struct intel_crtc *crtc) +void intel_fbc_enable(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -1101,19 +1096,19 @@ void intel_fbc_enable(struct intel_crtc *crtc) if (fbc->enabled) { WARN_ON(fbc->crtc == NULL); if (fbc->crtc == crtc) { - WARN_ON(!crtc->config->enable_fbc); + WARN_ON(!crtc_state->enable_fbc); WARN_ON(fbc->active); } goto out; } - if (!crtc->config->enable_fbc) + if (!crtc_state->enable_fbc) goto out; WARN_ON(fbc->active); WARN_ON(fbc->crtc != NULL); - intel_fbc_update_state_cache(crtc); + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); if (intel_fbc_alloc_cfb(crtc)) { fbc->no_fbc_reason = "not enough stolen memory"; goto out; @@ -1161,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -1215,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) if (!no_fbc_on_multiple_pipes(dev_priv)) return; - for_each_intel_crtc(dev_priv->dev, crtc) + for_each_intel_crtc(&dev_priv->drm, crtc) if (intel_crtc_active(&crtc->base) && to_intel_plane_state(crtc->base.primary->state)->visible) dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); } +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (i915.enable_fbc >= 0) + return !!i915.enable_fbc; + + if (IS_BROADWELL(dev_priv)) + return 1; + + return 0; +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1238,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; fbc->work.scheduled = false; + i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); + DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); + if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; return; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index ab8d09a81f14..86b00c6db1a6 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (size * 2 < ggtt->stolen_usable_size) obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, size); - if (!obj) { + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); - ret = -ENOMEM; + ret = PTR_ERR(obj); goto out; } @@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_ggtt *ggtt = &dev_priv->ggtt; struct fb_info *info; struct drm_framebuffer *fb; + struct i915_vma *vma; struct drm_i915_gem_object *obj; - int size, ret; bool prealloc = false; + void *vaddr; + int ret; if (intel_fb && (sizes->fb_width > intel_fb->base.width || @@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper, } obj = intel_fb->obj; - size = obj->base.size; mutex_lock(&dev->struct_mutex); @@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; + vma = i915_gem_obj_to_ggtt(obj); + /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); - info->fix.smem_len = size; + info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_len = vma->node.size; - info->screen_base = - ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), - size); - if (!info->screen_base) { + vaddr = i915_vma_pin_iomap(vma); + if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); - ret = -ENOSPC; + ret = PTR_ERR(vaddr); goto out_destroy_fbi; } - info->screen_size = size; + info->screen_base = vaddr; + info->screen_size = vma->node.size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; @@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - i915_gem_object_ggtt_unpin(obj); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -360,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_device *dev = fb_helper->dev; + unsigned long conn_configured, mask; + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; bool *save_enabled; bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; - uint64_t conn_configured = 0, mask; int pass = 0; - save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool), - GFP_KERNEL); + save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); if (!save_enabled) return false; - memcpy(save_enabled, enabled, fb_helper->connector_count); - mask = (1 << fb_helper->connector_count) - 1; + memcpy(save_enabled, enabled, count); + mask = BIT(count) - 1; + conn_configured = 0; retry: - for (i = 0; i < fb_helper->connector_count; i++) { + for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; struct drm_encoder *encoder; @@ -386,7 +389,7 @@ retry: fb_conn = fb_helper->connector_info[i]; connector = fb_conn->connector; - if (conn_configured & (1 << i)) + if (conn_configured & BIT(i)) continue; if (pass == 0 && !connector->has_tile) @@ -398,7 +401,7 @@ retry: if (!enabled[i]) { DRM_DEBUG_KMS("connector %s not enabled, skipping\n", connector->name); - conn_configured |= (1 << i); + conn_configured |= BIT(i); continue; } @@ -417,7 +420,7 @@ retry: DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); enabled[i] = false; - conn_configured |= (1 << i); + conn_configured |= BIT(i); continue; } @@ -430,14 +433,15 @@ retry: intel_crtc->lut_b[j] = j; } - new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); + new_crtc = intel_fb_helper_crtc(fb_helper, + connector->state->crtc); /* * Make sure we're not trying to drive multiple connectors * with a single CRTC, since our cloning support may not * match the BIOS. */ - for (j = 0; j < fb_helper->connector_count; j++) { + for (j = 0; j < count; j++) { if (crtcs[j] == new_crtc) { DRM_DEBUG_KMS("fallback: cloned configuration\n"); goto bail; @@ -488,15 +492,15 @@ retry: } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", + DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", connector->name, - pipe_name(to_intel_crtc(connector->state->crtc)->pipe), connector->state->crtc->base.id, + connector->state->crtc->name, modes[i]->hdisplay, modes[i]->vdisplay, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); fallback = false; - conn_configured |= (1 << i); + conn_configured |= BIT(i); } if ((conn_configured & mask) != mask) { @@ -520,7 +524,7 @@ retry: if (fallback) { bail: DRM_DEBUG_KMS("Not using firmware configuration\n"); - memcpy(enabled, save_enabled, fb_helper->connector_count); + memcpy(enabled, save_enabled, count); kfree(save_enabled); return false; } @@ -536,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intelfb_create, }; -static void intel_fbdev_destroy(struct drm_device *dev, - struct intel_fbdev *ifbdev) +static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) { /* We rely on the object-free to release the VMA pinning for * the info->screen_base mmaping. Leaking the VMA is simpler than @@ -550,9 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); if (ifbdev->fb) { - drm_framebuffer_unregister_private(&ifbdev->fb->base); + mutex_lock(&ifbdev->helper.dev->struct_mutex); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); + mutex_unlock(&ifbdev->helper.dev->struct_mutex); + drm_framebuffer_remove(&ifbdev->fb->base); } + + kfree(ifbdev); } /* @@ -685,9 +693,9 @@ out: static void intel_fbdev_suspend_worker(struct work_struct *work) { - intel_fbdev_set_suspend(container_of(work, - struct drm_i915_private, - fbdev_suspend_work)->dev, + intel_fbdev_set_suspend(&container_of(work, + struct drm_i915_private, + fbdev_suspend_work)->drm, FBINFO_STATE_RUNNING, true); } @@ -695,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) @@ -717,8 +725,6 @@ int intel_fbdev_init(struct drm_device *dev) return ret; } - ifbdev->helper.atomic = true; - dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); @@ -729,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev) static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) { - struct drm_i915_private *dev_priv = data; - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = data; /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp)) - intel_fbdev_fini(dev_priv->dev); + intel_fbdev_fini(ifbdev->helper.dev); } void intel_fbdev_initial_config_async(struct drm_device *dev) { - async_schedule(intel_fbdev_initial_config, to_i915(dev)); + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + + ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); +} + +static void intel_fbdev_sync(struct intel_fbdev *ifbdev) +{ + if (!ifbdev->cookie) + return; + + /* Only serialises with all preceding async calls, hence +1 */ + async_synchronize_cookie(ifbdev->cookie + 1); + ifbdev->cookie = 0; } void intel_fbdev_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->fbdev) + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_fbdev *ifbdev = dev_priv->fbdev; + + if (!ifbdev) return; flush_work(&dev_priv->fbdev_suspend_work); - if (!current_is_async()) - async_synchronize_full(); - intel_fbdev_destroy(dev, dev_priv->fbdev); - kfree(dev_priv->fbdev); + intel_fbdev_sync(ifbdev); + + intel_fbdev_destroy(ifbdev); dev_priv->fbdev = NULL; } void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; @@ -809,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->fbdev) drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } @@ -817,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_restore_mode(struct drm_device *dev) { int ret; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev = dev_priv->fbdev; struct drm_fb_helper *fb_helper; if (!ifbdev) return; + intel_fbdev_sync(ifbdev); + fb_helper = &ifbdev->helper; ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 9be839a242f9..2aa744081f09 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -50,7 +50,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; enum pipe pipe; @@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) static bool cpt_can_enable_serr_int(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; @@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0xffff0000; @@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; @@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); @@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); @@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t bit = (pch_transcoder == TRANSCODER_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; @@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { I915_WRITE(SERR_INT, @@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool old; @@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, bool ret; spin_lock_irqsave(&dev_priv->irq_lock, flags); - ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, + ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe, enable); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, intel_crtc->pch_fifo_underrun_disabled = !enable; if (HAS_PCH_IBX(dev_priv)) - ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + ibx_set_fifo_underrun_reporting(&dev_priv->drm, + pch_transcoder, enable); else - cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + cpt_set_fifo_underrun_reporting(&dev_priv->drm, + pch_transcoder, enable, old); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - for_each_intel_crtc(dev_priv->dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->cpu_fifo_underrun_disabled) continue; @@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - for_each_intel_crtc(dev_priv->dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->pch_fifo_underrun_disabled) continue; diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 9d79c4c3e256..3e3e743740c0 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -26,6 +26,7 @@ #include "intel_guc_fwif.h" #include "i915_guc_reg.h" +#include "intel_ringbuffer.h" struct drm_i915_gem_request; @@ -48,14 +49,23 @@ struct drm_i915_gem_request; * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. * - * Finally, we also keep a few statistics here, including the number of - * submissions to each engine, and a record of the last submission failure - * (if any). + * We also keep a few statistics on failures. Ideally, these should all + * be zero! + * no_wq_space: times that the submission pre-check found no space was + * available in the work queue (note, the queue is shared, + * not per-engine). It is OK for this to be nonzero, but + * it should not be huge! + * q_fail: failed to enqueue a work item. This should never happen, + * because we check for space beforehand. + * b_fail: failed to ring the doorbell. This should never happen, unless + * somehow the hardware misbehaves, or maybe if the GuC firmware + * crashes? We probably need to reset the GPU to recover. + * retcode: errno from last guc_submit() */ struct i915_guc_client { struct drm_i915_gem_object *client_obj; void *client_base; /* first page (only) of above */ - struct intel_context *owner; + struct i915_gem_context *owner; struct intel_guc *guc; uint32_t priority; uint32_t ctx_index; @@ -71,12 +81,13 @@ struct i915_guc_client { uint32_t wq_tail; uint32_t unused; /* Was 'wq_head' */ - /* GuC submission statistics & status */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; - uint32_t q_fail; + uint32_t no_wq_space; + uint32_t q_fail; /* No longer used */ uint32_t b_fail; int retcode; - int spare; /* pad to 32 DWords */ + + /* Per-engine counts of GuC submissions */ + uint64_t submissions[I915_NUM_ENGINES]; }; enum intel_guc_fw_status { @@ -133,25 +144,24 @@ struct intel_guc { uint32_t action_fail; /* Total number of failures */ int32_t action_err; /* Last error code */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; - uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; + uint64_t submissions[I915_NUM_ENGINES]; + uint32_t last_seqno[I915_NUM_ENGINES]; }; /* intel_guc_loader.c */ -extern void intel_guc_ucode_init(struct drm_device *dev); -extern int intel_guc_ucode_load(struct drm_device *dev); -extern void intel_guc_ucode_fini(struct drm_device *dev); +extern void intel_guc_init(struct drm_device *dev); +extern int intel_guc_setup(struct drm_device *dev); +extern void intel_guc_fini(struct drm_device *dev); extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); extern int intel_guc_suspend(struct drm_device *dev); extern int intel_guc_resume(struct drm_device *dev); /* i915_guc_submission.c */ -int i915_guc_submission_init(struct drm_device *dev); -int i915_guc_submission_enable(struct drm_device *dev); -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq); -void i915_guc_submission_disable(struct drm_device *dev); -void i915_guc_submission_fini(struct drm_device *dev); -int i915_guc_wq_check_space(struct i915_guc_client *client); +int i915_guc_submission_init(struct drm_i915_private *dev_priv); +int i915_guc_submission_enable(struct drm_i915_private *dev_priv); +int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); +int i915_guc_submit(struct drm_i915_gem_request *rq); +void i915_guc_submission_disable(struct drm_i915_private *dev_priv); +void i915_guc_submission_fini(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 2de57ffe5e18..944786d7075b 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -71,7 +71,8 @@ #define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) #define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) +#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ +#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) #define GUC_DOORBELL_ENABLED 1 #define GUC_DOORBELL_DISABLED 0 diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 876e5da44c4e..605c69658d2c 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,9 +59,15 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" +#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin" MODULE_FIRMWARE(I915_SKL_GUC_UCODE); +#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" +MODULE_FIRMWARE(I915_BXT_GUC_UCODE); + +#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin" +MODULE_FIRMWARE(I915_KBL_GUC_UCODE); + /* User-friendly representation of an enum */ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) { @@ -84,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; int irqs; - /* tell all command streamers NOT to forward interrupts and vblank to GuC */ + /* tell all command streamers NOT to forward interrupts or vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv) @@ -100,10 +106,10 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; int irqs; + u32 tmp; - /* tell all command streamers to forward interrupts and vblank to GuC */ - irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); - irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); + /* tell all command streamers to forward interrupts (but not vblank) to GuC */ + irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv) I915_WRITE(RING_MODE_GEN7(engine), irqs); @@ -114,6 +120,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) I915_WRITE(GUC_BCS_RCS_IER, ~irqs); I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); I915_WRITE(GUC_WD_VECS_IER, ~irqs); + + /* + * If GuC has routed PM interrupts to itself, don't keep it. + * and keep other interrupts those are unmasked by GuC. + */ + tmp = I915_READ(GEN6_PMINTRMSK); + if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) { + dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP); + dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + } } static u32 get_gttype(struct drm_i915_private *dev_priv) @@ -281,13 +297,24 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) return ret; } +static u32 guc_wopcm_size(struct drm_i915_private *dev_priv) +{ + u32 wopcm_size = GUC_WOPCM_TOP; + + /* On BXT, the top of WOPCM is reserved for RC6 context */ + if (IS_BROXTON(dev_priv)) + wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED; + + return wopcm_size; +} + /* * Load the GuC firmware blob into the MinuteIA. */ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int ret; ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); @@ -308,7 +335,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* init WOPCM */ - I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); + I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv)); I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); /* Enable MIA caching. GuC clock gating is disabled. */ @@ -372,66 +399,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv) } /** - * intel_guc_ucode_load() - load GuC uCode into the device + * intel_guc_setup() - finish preparing the GuC for activity * @dev: drm device * * Called from gem_init_hw() during driver loading and also after a GPU reset. * + * The main action required here it to load the GuC uCode into the device. * The firmware image should have already been fetched into memory by the - * earlier call to intel_guc_ucode_init(), so here we need only check that - * is succeeded, and then transfer the image to the h/w. + * earlier call to intel_guc_init(), so here we need only check that worked, + * and then transfer the image to the h/w. * * Return: non-zero code on error */ -int intel_guc_ucode_load(struct drm_device *dev) +int intel_guc_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - int retries, err = 0; + const char *fw_path = guc_fw->guc_fw_path; + int retries, ret, err; - if (!i915.enable_guc_submission) - return 0; - - DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n", + fw_path, intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); - direct_interrupts_to_host(dev_priv); - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) - return 0; - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && - guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) - return -ENOEXEC; - - guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; - - DRM_DEBUG_DRIVER("GuC fw fetch status %s\n", - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); - - switch (guc_fw->guc_fw_fetch_status) { - case GUC_FIRMWARE_FAIL: - /* something went wrong :( */ - err = -EIO; + /* Loading forbidden, or no firmware to load? */ + if (!i915.enable_guc_loading) { + err = 0; goto fail; - - case GUC_FIRMWARE_NONE: - case GUC_FIRMWARE_PENDING: - default: - /* "can't happen" */ - WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n", - guc_fw->guc_fw_path, - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), - guc_fw->guc_fw_fetch_status); + } else if (fw_path == NULL) { + /* Device is known to have no uCode (e.g. no GuC) */ err = -ENXIO; goto fail; + } else if (*fw_path == '\0') { + /* Device has a GuC but we don't know what f/w to load? */ + DRM_INFO("No GuC firmware known for this platform\n"); + err = -ENODEV; + goto fail; + } - case GUC_FIRMWARE_SUCCESS: - break; + /* Fetch failed, or already fetched but failed to load? */ + if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) { + err = -EIO; + goto fail; + } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) { + err = -ENOEXEC; + goto fail; } - err = i915_guc_submission_init(dev); + direct_interrupts_to_host(dev_priv); + + guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; + + DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), + intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); + + err = i915_guc_submission_init(dev_priv); if (err) goto fail; @@ -448,7 +472,7 @@ int intel_guc_ucode_load(struct drm_device *dev) */ err = i915_reset_guc(dev_priv); if (err) { - DRM_ERROR("GuC reset failed, err %d\n", err); + DRM_ERROR("GuC reset failed: %d\n", err); goto fail; } @@ -459,8 +483,8 @@ int intel_guc_ucode_load(struct drm_device *dev) if (--retries == 0) goto fail; - DRM_INFO("GuC fw load failed, err %d; will reset and " - "retry %d more time(s)\n", err, retries); + DRM_INFO("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", err, retries); } guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; @@ -470,10 +494,7 @@ int intel_guc_ucode_load(struct drm_device *dev) intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); if (i915.enable_guc_submission) { - /* The execbuf_client will be recreated. Release it first. */ - i915_guc_submission_disable(dev); - - err = i915_guc_submission_enable(dev); + err = i915_guc_submission_enable(dev_priv); if (err) goto fail; direct_interrupts_to_guc(dev_priv); @@ -482,15 +503,50 @@ int intel_guc_ucode_load(struct drm_device *dev) return 0; fail: - DRM_ERROR("GuC firmware load failed, err %d\n", err); if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; direct_interrupts_to_host(dev_priv); - i915_guc_submission_disable(dev); - i915_guc_submission_fini(dev); + i915_guc_submission_disable(dev_priv); + i915_guc_submission_fini(dev_priv); - return err; + /* + * We've failed to load the firmware :( + * + * Decide whether to disable GuC submission and fall back to + * execlist mode, and whether to hide the error by returning + * zero or to return -EIO, which the caller will treat as a + * nonfatal error (i.e. it doesn't prevent driver load, but + * marks the GPU as wedged until reset). + */ + if (i915.enable_guc_loading > 1) { + ret = -EIO; + } else if (i915.enable_guc_submission > 1) { + ret = -EIO; + } else { + ret = 0; + } + + if (err == 0 && !HAS_GUC_UCODE(dev)) + ; /* Don't mention the GuC! */ + else if (err == 0) + DRM_INFO("GuC firmware load skipped\n"); + else if (ret != -EIO) + DRM_INFO("GuC firmware load failed: %d\n", err); + else + DRM_ERROR("GuC firmware load failed: %d\n", err); + + if (i915.enable_guc_submission) { + if (fw_path == NULL) + DRM_INFO("GuC submission without firmware not supported\n"); + if (ret == 0) + DRM_INFO("Falling back from GuC submission to execlist mode\n"); + else + DRM_ERROR("GuC init failed: %d\n", ret); + } + i915.enable_guc_submission = 0; + + return ret; } static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) @@ -552,9 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) /* Header and uCode will be loaded to WOPCM. Size of the two. */ size = guc_fw->header_size + guc_fw->ucode_size; - - /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ - if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) { + if (size > guc_wopcm_size(to_i915(dev))) { DRM_ERROR("Firmware is too large to fit in WOPCM\n"); goto fail; } @@ -617,22 +671,25 @@ fail: } /** - * intel_guc_ucode_init() - define parameters and fetch firmware + * intel_guc_init() - define parameters and fetch firmware * @dev: drm device * * Called early during driver load, but after GEM is initialised. * * The firmware will be transferred to the GuC's memory later, - * when intel_guc_ucode_load() is called. + * when intel_guc_setup() is called. */ -void intel_guc_ucode_init(struct drm_device *dev) +void intel_guc_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; - if (!HAS_GUC_SCHED(dev)) - i915.enable_guc_submission = false; + /* A negative value means "use platform default" */ + if (i915.enable_guc_loading < 0) + i915.enable_guc_loading = HAS_GUC_UCODE(dev); + if (i915.enable_guc_submission < 0) + i915.enable_guc_submission = HAS_GUC_SCHED(dev); if (!HAS_GUC_UCODE(dev)) { fw_path = NULL; @@ -640,27 +697,30 @@ void intel_guc_ucode_init(struct drm_device *dev) fw_path = I915_SKL_GUC_UCODE; guc_fw->guc_fw_major_wanted = 6; guc_fw->guc_fw_minor_wanted = 1; + } else if (IS_BROXTON(dev)) { + fw_path = I915_BXT_GUC_UCODE; + guc_fw->guc_fw_major_wanted = 8; + guc_fw->guc_fw_minor_wanted = 7; + } else if (IS_KABYLAKE(dev)) { + fw_path = I915_KBL_GUC_UCODE; + guc_fw->guc_fw_major_wanted = 9; + guc_fw->guc_fw_minor_wanted = 14; } else { - i915.enable_guc_submission = false; fw_path = ""; /* unknown device */ } - if (!i915.enable_guc_submission) - return; - guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; + /* Early (and silent) return if GuC loading is disabled */ + if (!i915.enable_guc_loading) + return; if (fw_path == NULL) return; - - if (*fw_path == '\0') { - DRM_ERROR("No GuC firmware known for this platform\n"); - guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; + if (*fw_path == '\0') return; - } guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); @@ -669,18 +729,18 @@ void intel_guc_ucode_init(struct drm_device *dev) } /** - * intel_guc_ucode_fini() - clean up all allocated resources + * intel_guc_fini() - clean up all allocated resources * @dev: drm device */ -void intel_guc_ucode_fini(struct drm_device *dev) +void intel_guc_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; mutex_lock(&dev->struct_mutex); direct_interrupts_to_host(dev_priv); - i915_guc_submission_disable(dev); - i915_guc_submission_fini(dev); + i915_guc_submission_disable(dev_priv); + i915_guc_submission_fini(dev_priv); if (guc_fw->guc_fw_obj) drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c new file mode 100644 index 000000000000..434f4d5c553d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -0,0 +1,104 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "i915_drv.h" +#include "intel_gvt.h" + +/** + * DOC: Intel GVT-g host support + * + * Intel GVT-g is a graphics virtualization technology which shares the + * GPU among multiple virtual machines on a time-sharing basis. Each + * virtual machine is presented a virtual GPU (vGPU), which has equivalent + * features as the underlying physical GPU (pGPU), so i915 driver can run + * seamlessly in a virtual machine. This file provides the englightments + * of GVT and the necessary components used by GVT in i915 driver. + */ + +static bool is_supported_device(struct drm_i915_private *dev_priv) +{ + if (IS_BROADWELL(dev_priv)) + return true; + return false; +} + +/** + * intel_gvt_init - initialize GVT components + * @dev_priv: drm i915 private data + * + * This function is called at the initialization stage to create a GVT device. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init(struct drm_i915_private *dev_priv) +{ + int ret; + + if (!i915.enable_gvt) { + DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); + return 0; + } + + if (!is_supported_device(dev_priv)) { + DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); + goto bail; + } + + /* + * We're not in host or fail to find a MPT module, disable GVT-g + */ + ret = intel_gvt_init_host(); + if (ret) { + DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); + goto bail; + } + + ret = intel_gvt_init_device(dev_priv); + if (ret) { + DRM_DEBUG_DRIVER("Fail to init GVT device\n"); + goto bail; + } + + return 0; + +bail: + i915.enable_gvt = 0; + return 0; +} + +/** + * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading + * @dev_priv: drm i915 private * + * + * This function is called at the i915 driver unloading stage, to shutdown + * GVT components and release the related resources. + */ +void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return; + + intel_gvt_clean_device(dev_priv); +} diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h new file mode 100644 index 000000000000..960211df74db --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -0,0 +1,45 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _INTEL_GVT_H_ +#define _INTEL_GVT_H_ + +#include "gvt/gvt.h" + +#ifdef CONFIG_DRM_I915_GVT +int intel_gvt_init(struct drm_i915_private *dev_priv); +void intel_gvt_cleanup(struct drm_i915_private *dev_priv); +int intel_gvt_init_device(struct drm_i915_private *dev_priv); +void intel_gvt_clean_device(struct drm_i915_private *dev_priv); +int intel_gvt_init_host(void); +#else +static inline int intel_gvt_init(struct drm_i915_private *dev_priv) +{ + return 0; +} +static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +{ +} +#endif + +#endif /* _INTEL_GVT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a8844702d11b..4df9f384910c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -47,7 +47,7 @@ static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t enabled_bits; enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; @@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val = I915_READ(VIDEO_DIP_CTL); int i; @@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); @@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; @@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp, static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg; u32 val = 0; @@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; @@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); @@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); @@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); @@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) static void intel_hdmi_prepare(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; @@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; @@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; int dotclock; @@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder) static void g4x_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; @@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder) static void ibx_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; @@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder) static void cpt_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum pipe pipe = crtc->pipe; @@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder) static void intel_disable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector, static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->base.crtc->dev; - struct drm_atomic_state *state; - struct intel_encoder *encoder; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int count = 0, count_hdmi = 0; - int i; if (HAS_GMCH_DISPLAY(dev)) return false; - state = crtc_state->base.state; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; - count++; - } - /* * HDMI 12bpc affects the clocks, so it's only possible * when not cloning with other encoder types. */ - return count_hdmi > 0 && count_hdmi == count; + return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; } bool intel_hdmi_compute_config(struct intel_encoder *encoder, @@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector, struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); int ret; ret = drm_object_property_set_value(&connector->base, property, val); @@ -1674,39 +1656,16 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - /* Enable clock channels for this port */ - mutex_lock(&dev_priv->sb_lock); - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + vlv_phy_pre_encoder_enable(encoder); /* HDMI 1.0V-2dB */ - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); - vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - - /* Program lane clock */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, + 0x2b247878); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -1719,213 +1678,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_hdmi_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - mutex_unlock(&dev_priv->sb_lock); -} - -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } + vlv_phy_pre_pll_enable(encoder); } static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - u32 val; - intel_hdmi_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, 0x0); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - /* Reset lanes to avoid HDMI flicker (VLV w/a) */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_reset_lanes(encoder); } static void chv_hdmi_post_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev_priv->sb_lock); @@ -1940,142 +1719,16 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - /* Program Tx latency optimal setting */ - for (i = 0; i < 4; i++) { - /* Set the upar bit */ - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + chv_phy_pre_encoder_enable(encoder); /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= 102 << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, 128, 102, false); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -2086,10 +1739,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, dport, 0x0); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -2106,6 +1756,8 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -2114,7 +1766,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { @@ -2138,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; @@ -2242,12 +1893,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_connector_register(connector); intel_hdmi->attached_connector = intel_connector; /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -2280,7 +1929,7 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index bee673005d48..f48957ea100d 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; @@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; int i; @@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) } } if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); @@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.hotplug_work); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; @@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work) /** * intel_hpd_irq_handler - main hotplug irq handler - * @dev: drm device + * @dev_priv: drm_i915_private * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * @@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work) * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ -void intel_hpd_irq_handler(struct drm_device *dev, +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; enum port port; bool storm_detected = false; @@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(!HAS_GMCH_DISPLAY(dev), + WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), "Received HPD interrupt on pin %d although disabled\n", i); continue; } @@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, } if (storm_detected) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); /* @@ -453,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_device *dev, * * This is a separate step from interrupt enabling to simplify the locking rules * in the driver load and resume code. + * + * Also see: intel_hpd_poll_init(), which enables connector polling */ void intel_hpd_init(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; int i; for_each_hpd_pin(i) { dev_priv->hotplug.stats[i].count = 0; dev_priv->hotplug.stats[i].state = HPD_ENABLED; } + + WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); + schedule_work(&dev_priv->hotplug.poll_init_work); + + /* + * Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked checks happy. + */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); +} + +void i915_hpd_poll_init_work(struct work_struct *work) { + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, + hotplug.poll_init_work); + struct drm_device *dev = &dev_priv->drm; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + bool enabled; + + mutex_lock(&dev->mode_config.mutex); + + enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); + list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_connector *intel_connector = + to_intel_connector(connector); connector->polled = intel_connector->polled; /* MST has a dynamic intel_connector->encoder and it's reprobing @@ -475,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) continue; if (!connector->polled && I915_HAS_HOTPLUG(dev) && - intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; + intel_connector->encoder->hpd_pin > HPD_NONE) { + connector->polled = enabled ? + DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT : + DRM_CONNECTOR_POLL_HPD; + } } + if (enabled) + drm_kms_helper_poll_enable_locked(dev); + + mutex_unlock(&dev->mode_config.mutex); + /* - * Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked checks happy. + * We might have missed any hotplugs that happened while we were + * in the middle of disabling polling */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irq(&dev_priv->irq_lock); + if (!enabled) + drm_helper_hpd_irq_event(dev); +} + +/** + * intel_hpd_poll_init - enables/disables polling for connectors with hpd + * @dev_priv: i915 device instance + * @enabled: Whether to enable or disable polling + * + * This function enables polling for all connectors, regardless of whether or + * not they support hotplug detection. Under certain conditions HPD may not be + * functional. On most Intel GPUs, this happens when we enter runtime suspend. + * On Valleyview and Cherryview systems, this also happens when we shut off all + * of the powerwells. + * + * Since this function can get called in contexts where we're already holding + * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate + * worker. + * + * Also see: intel_hpd_init(), which restores hpd handling. + */ +void intel_hpd_poll_init(struct drm_i915_private *dev_priv) +{ + WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); + + /* + * We might already be holding dev->mode_config.mutex, so do this in a + * seperate worker + * As well, there's no issue if we race here since we always reschedule + * this worker anyway + */ + schedule_work(&dev_priv->hotplug.poll_init_work); } void intel_hpd_init_work(struct drm_i915_private *dev_priv) { INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); + INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); } @@ -509,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) cancel_work_sync(&dev_priv->hotplug.dig_port_work); cancel_work_sync(&dev_priv->hotplug.hotplug_work); + cancel_work_sync(&dev_priv->hotplug.poll_init_work); cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); } + +bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) +{ + bool ret = false; + + if (pin == HPD_NONE) + return false; + + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { + dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + ret = true; + } + spin_unlock_irq(&dev_priv->irq_lock); + + return ret; +} + +void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) +{ + if (pin == HPD_NONE) + return; + + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + spin_unlock_irq(&dev_priv->irq_lock); +} diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 81de23098be7..1f266d7df2ec 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c) void intel_i2c_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(GMBUS0, 0); I915_WRITE(GMBUS4, 0); @@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) static u32 get_reserved(struct intel_gmbus *bus) { struct drm_i915_private *dev_priv = bus->dev_priv; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ @@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter) adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - intel_i2c_reset(dev_priv->dev); + intel_i2c_reset(&dev_priv->drm); intel_i2c_quirk_set(dev_priv, true); set_data(bus, 1); set_clock(bus, 1); @@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) { int ret; -#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0) - if (!HAS_GMBUS_IRQ(dev_priv)) - return wait_for(C, 10); + return intel_wait_for_register(dev_priv, + GMBUS2, GMBUS_ACTIVE, 0, + 10); /* Important: The hw handles only the first bit, so set only one! */ I915_WRITE(GMBUS4, GMBUS_IDLE_EN); - ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, + (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0, msecs_to_jiffies_timeout(10)); I915_WRITE(GMBUS4, 0); @@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) return 0; else return -ETIMEDOUT; -#undef C } static int @@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = { */ int intel_setup_gmbus(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_gmbus *bus; unsigned int pin; int ret; @@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev) goto err; } - intel_i2c_reset(dev_priv->dev); + intel_i2c_reset(&dev_priv->drm); return 0; @@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) void intel_teardown_gmbus(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_gmbus *bus; unsigned int pin; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7f2d8415ed8b..414ddda43922 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -208,31 +208,27 @@ } while (0) enum { - ADVANCED_CONTEXT = 0, - LEGACY_32B_CONTEXT, - ADVANCED_AD_CONTEXT, - LEGACY_64B_CONTEXT -}; -#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 -#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ - LEGACY_64B_CONTEXT :\ - LEGACY_32B_CONTEXT) -enum { FAULT_AND_HANG = 0, FAULT_AND_HALT, /* Debug only */ FAULT_AND_STREAM, FAULT_AND_CONTINUE /* Unsupported */ }; #define GEN8_CTX_ID_SHIFT 32 +#define GEN8_CTX_ID_WIDTH 21 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 -static int intel_lr_context_pin(struct intel_context *ctx, +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ + +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); +static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists - * @dev: DRM device. + * @dev_priv: i915 device private * @enable_execlists: value of i915.enable_execlists module parameter. * * Only certain platforms support Execlists (the prerequisites being @@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx, * * Return: 1 if Execlists is supported and has to be enabled. */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) { - WARN_ON(i915.enable_ppgtt == -1); - /* On platforms with execlist available, vGPU will only * support execlist mode, no ring buffer mode. */ - if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) return 1; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return 1; if (enable_execlists == 0) return 0; - if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && + USES_PPGTT(dev_priv) && i915.use_mmio_flip >= 0) return 1; @@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists static void logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = engine->i915; - if (IS_GEN8(dev) || IS_GEN9(dev)) + if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) engine->idle_lite_restore_wa = ~0; - engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) && (engine->id == VCS || engine->id == VCS2); engine->ctx_desc_template = GEN8_CTX_VALID; - engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << - GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(dev)) + if (IS_GEN8(dev_priv)) engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; @@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * descriptor for a pinned context * * @ctx: Context to work on - * @ring: Engine the descriptor will be used with + * @engine: Engine the descriptor will be used with * * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly @@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * which remains valid until the context is unpinned. * * This is what a descriptor looks like, from LSB to MSB: - * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) - * bits 52-63: reserved, may encode the engine ID (for GuC) + * bits 32-52: ctx ID, a globally unique tag + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 */ static void -intel_lr_context_descriptor_update(struct intel_context *ctx, +intel_lr_context_descriptor_update(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - uint64_t lrca, desc; + struct intel_context *ce = &ctx->engine[engine->id]; + u64 desc; - lrca = ctx->engine[engine->id].lrc_vma->node.start + - LRC_PPHWSP_PN * PAGE_SIZE; + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); - desc = engine->ctx_desc_template; /* bits 0-11 */ - desc |= lrca; /* bits 12-31 */ - desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ + desc = ctx->desc_template; /* bits 3-4 */ + desc |= engine->ctx_desc_template; /* bits 0-11 */ + desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + /* bits 12-31 */ + desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ - ctx->engine[engine->id].lrc_desc = desc; + ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { return ctx->engine[engine->id].lrc_desc; } -/** - * intel_execlists_ctx_id() - get the Execlists Context ID - * @ctx: Context to get the ID for - * @ring: Engine to get the ID for - * - * Do not confuse with ctx->id! Unfortunately we have a name overload - * here: the old context ID we pass to userspace as a handler so that - * they can refer to a context, and the new context ID we pass to the - * ELSP so that the GPU can inform us of the context status via - * interrupts. - * - * The context ID is a portion of the context descriptor, so we can - * just extract the required part from the cached descriptor. - * - * Return: 20-bits globally unique context ID. - */ -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT; -} - static void execlists_elsp_write(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { struct intel_engine_cs *engine = rq0->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = rq0->i915; uint64_t desc[2]; if (rq1) { @@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0, spin_unlock_irq(&dev_priv->uncore.lock); } +static inline void execlists_context_status_change( + struct drm_i915_gem_request *rq, + unsigned long status) +{ + /* + * Only used when GVT-g is enabled now. When GVT-g is disabled, + * The compiler should eliminate this function as dead-code. + */ + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return; + + atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); +} + static void execlists_context_unqueue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; @@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); + WARN_ON(!intel_irqs_enabled(engine->i915)); /* Try to read in pairs */ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, @@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; - list_move_tail(&req0->execlist_link, - &engine->execlist_retired_req_list); + list_del(&req0->execlist_link); + i915_gem_request_unreference(req0); req0 = cursor; } else { + if (IS_ENABLED(CONFIG_DRM_I915_GVT)) { + /* + * req0 (after merged) ctx requires single + * submission, stop picking + */ + if (req0->ctx->execlists_force_single_submission) + break; + /* + * req0 ctx doesn't require single submission, + * but next req ctx requires, stop picking + */ + if (cursor->ctx->execlists_force_single_submission) + break; + } req1 = cursor; WARN_ON(req1->elsp_submitted); break; @@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) if (unlikely(!req0)) return; + execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN); + + if (req1) + execlists_context_status_change(req1, + INTEL_CONTEXT_SCHEDULE_IN); + if (req0->elsp_submitted & engine->idle_lite_restore_wa) { /* * WaIdleLiteRestore: make sure we never cause a lite restore @@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) } static unsigned int -execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) +execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id) { struct drm_i915_gem_request *head_req; @@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) struct drm_i915_gem_request, execlist_link); - if (!head_req) - return 0; - - if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) - return 0; + if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id))) + return 0; WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); if (--head_req->elsp_submitted > 0) return 0; - list_move_tail(&head_req->execlist_link, - &engine->execlist_retired_req_list); + execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT); + + list_del(&head_req->execlist_link); + i915_gem_request_unreference(head_req); return 1; } @@ -517,7 +523,7 @@ static u32 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, u32 *context_id) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status; read_pointer %= GEN8_CSB_ENTRIES; @@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, /** * intel_lrc_irq_handler() - handle Context Switch interrupts - * @engine: Engine Command Streamer to handle. + * @data: tasklet handler passed in unsigned long * * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. @@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, static void intel_lrc_irq_handler(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status_pointer; unsigned int read_pointer, write_pointer; u32 csb[GEN8_CSB_ENTRIES][2]; @@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != request->i915->kernel_context) - intel_lr_context_pin(request->ctx, engine); - - i915_gem_request_reference(request); - spin_lock_bh(&engine->execlist_lock); list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) @@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) if (request->ctx == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); - list_move_tail(&tail_req->execlist_link, - &engine->execlist_retired_req_list); + list_del(&tail_req->execlist_link); + i915_gem_request_unreference(tail_req); } } + i915_gem_request_reference(request); list_add_tail(&request->execlist_link, &engine->execlist_queue); + request->ctx_hw_id = request->ctx->hw_id; if (num_elements == 0) execlists_context_unqueue(engine); @@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - int ret = 0; + struct intel_engine_cs *engine = request->engine; + struct intel_context *ce = &request->ctx->engine[engine->id]; + int ret; - request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += EXECLISTS_REQUEST_SIZE; + + if (!ce->state) { + ret = execlists_context_deferred_alloc(request->ctx, engine); + if (ret) + return ret; + } + + request->ringbuf = ce->ringbuf; if (i915.enable_guc_submission) { /* @@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * going any further, as the i915_add_request() call * later on mustn't fail ... */ - struct intel_guc *guc = &request->i915->guc; - - ret = i915_guc_wq_check_space(guc->execbuf_client); + ret = i915_guc_wq_check_space(request); if (ret) return ret; } - if (request->ctx != request->i915->kernel_context) - ret = intel_lr_context_pin(request->ctx, request->engine); + ret = intel_lr_context_pin(request->ctx, engine); + if (ret) + return ret; + + ret = intel_ring_begin(request, 0); + if (ret) + goto err_unpin; + + if (!ce->initialised) { + ret = engine->init_context(request); + if (ret) + goto err_unpin; + + ce->initialised = true; + } + + /* Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + request->reserved_space -= EXECLISTS_REQUEST_SIZE; + return 0; + +err_unpin: + intel_lr_context_unpin(request->ctx, engine); return ret; } @@ -734,7 +774,6 @@ static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct drm_i915_private *dev_priv = request->i915; struct intel_engine_cs *engine = request->engine; intel_logical_ring_advance(ringbuf); @@ -750,54 +789,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_advance(ringbuf); - if (intel_engine_stopped(engine)) - return 0; - - if (engine->last_context != request->ctx) { - if (engine->last_context) - intel_lr_context_unpin(engine->last_context, engine); - if (request->ctx != request->i915->kernel_context) { - intel_lr_context_pin(request->ctx, engine); - engine->last_context = request->ctx; - } else { - engine->last_context = NULL; - } - } + /* We keep the previous context alive until we retire the following + * request. This ensures that any the context object is still pinned + * for any residual writes the HW makes into it on the context switch + * into the next object following the breadcrumb. Otherwise, we may + * retire the context too early. + */ + request->previous_context = engine->last_context; + engine->last_context = request->ctx; - if (dev_priv->guc.execbuf_client) - i915_guc_submit(dev_priv->guc.execbuf_client, request); + if (i915.enable_guc_submission) + i915_guc_submit(request); else execlists_context_queue(request); return 0; } -int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. - */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} - /** * execlists_submission() - submit a batchbuffer for execution, Execlists style - * @dev: DRM device. - * @file: DRM file. - * @ring: Engine Command Streamer to submit to. - * @ctx: Context to employ for this submission. + * @params: execbuffer call parameters. * @args: execbuffer call arguments. * @vmas: list of vmas. - * @batch_obj: the batchbuffer to submit. - * @exec_start: batchbuffer start virtual address pointer. - * @dispatch_flags: translated execbuffer call flags. * * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts * away the submission details of the execbuffer ioctl call. @@ -810,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, { struct drm_device *dev = params->dev; struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; u64 exec_start; int instp_mode; @@ -881,28 +894,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return 0; } -void intel_execlists_retire_requests(struct intel_engine_cs *engine) +void intel_execlists_cancel_requests(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req, *tmp; - struct list_head retired_list; + LIST_HEAD(cancel_list); - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); - if (list_empty(&engine->execlist_retired_req_list)) - return; + WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex)); - INIT_LIST_HEAD(&retired_list); spin_lock_bh(&engine->execlist_lock); - list_replace_init(&engine->execlist_retired_req_list, &retired_list); + list_replace_init(&engine->execlist_queue, &cancel_list); spin_unlock_bh(&engine->execlist_lock); - list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { - struct intel_context *ctx = req->ctx; - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - - if (ctx_obj && (ctx != req->i915->kernel_context)) - intel_lr_context_unpin(ctx, engine); - + list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) { list_del(&req->execlist_link); i915_gem_request_unreference(req); } @@ -910,7 +913,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; if (!intel_engine_initialized(engine)) @@ -923,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) /* TODO: Is this correct with Execlists enabled? */ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + if (intel_wait_for_register(dev_priv, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, MODE_IDLE, + 1000)) { DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); return; } @@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } -static int intel_lr_context_do_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int intel_lr_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; + struct drm_i915_private *dev_priv = ctx->i915; + struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; int ret; - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); + lockdep_assert_held(&ctx->i915->drm.struct_mutex); - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ce->pin_count++) + return 0; + + ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) - return ret; + goto err; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); goto unpin_ctx_obj; @@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf); if (ret) goto unpin_map; - ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + i915_gem_context_reference(ctx); + ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; - ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; - ctx_obj->dirty = true; + + lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start; + ce->lrc_reg_state = lrc_reg_state; + ce->state->dirty = true; /* Invalidate GuC TLB. */ if (i915.enable_guc_submission) I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return ret; + return 0; unpin_map: - i915_gem_object_unpin_map(ctx_obj); + i915_gem_object_unpin_map(ce->state); unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ctx_obj); - + i915_gem_object_ggtt_unpin(ce->state); +err: + ce->pin_count = 0; return ret; } -static int intel_lr_context_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +void intel_lr_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - int ret = 0; + struct intel_context *ce = &ctx->engine[engine->id]; - if (ctx->engine[engine->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ctx, engine); - if (ret) - goto reset_pin_count; + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + GEM_BUG_ON(ce->pin_count == 0); - i915_gem_context_reference(ctx); - } - return ret; + if (--ce->pin_count) + return; -reset_pin_count: - ctx->engine[engine->id].pin_count = 0; - return ret; -} + intel_unpin_ringbuffer_obj(ce->ringbuf); -void intel_lr_context_unpin(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + i915_gem_object_unpin_map(ce->state); + i915_gem_object_ggtt_unpin(ce->state); - WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); - if (--ctx->engine[engine->id].pin_count == 0) { - i915_gem_object_unpin_map(ctx_obj); - intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - ctx->engine[engine->id].lrc_vma = NULL; - ctx->engine[engine->id].lrc_desc = 0; - ctx->engine[engine->id].lrc_reg_state = NULL; + ce->lrc_vma = NULL; + ce->lrc_desc = 0; + ce->lrc_reg_state = NULL; - i915_gem_context_unreference(ctx); - } + i915_gem_context_unreference(ctx); } static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) @@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) int ret, i; struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; @@ -1103,7 +1097,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t index) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); /* @@ -1165,7 +1159,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, /** * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA * - * @ring: only applicable for RCS + * @engine: only applicable for RCS * @wa_ctx: structure representing wa_ctx * offset: specifies start of the batch, should be cache-aligned. This is updated * with the offset value received as input. @@ -1202,7 +1196,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(engine->dev)) { + if (IS_BROADWELL(engine->i915)) { int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; @@ -1239,7 +1233,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /** * gen8_init_perctx_bb() - initialize per ctx batch with WA * - * @ring: only applicable for RCS + * @engine: only applicable for RCS * @wa_ctx: structure representing wa_ctx * offset: specifies start of the batch, should be cache-aligned. * size: size of the batch in DWORDS but HW expects in terms of cachelines @@ -1274,13 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, uint32_t *offset) { int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1305,6 +1298,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, 0); } + + /* WaMediaPoolStateCmdInWABB:bxt */ + if (HAS_POOLED_EU(engine->i915)) { + /* + * EU pool configuration is setup along with golden context + * during context initialization. This value depends on + * device type (2x6 or 3x6) and needs to be updated based + * on which subslice is disabled especially for 2x6 + * devices, however it is safe to load default + * configuration of 3x6 device instead of masking off + * corresponding bits because HW ignores bits of a disabled + * subslice and drops down to appropriate config. Please + * see render_state_setup() in i915_gem_render_state.c for + * possible configurations, to avoid duplication they are + * not shown here again. + */ + u32 eu_pool_config = 0x00777000; + wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE); + wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE); + wa_ctx_emit(batch, index, eu_pool_config); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + } + /* Pad to end of cacheline */ while (index % CACHELINE_DWORDS) wa_ctx_emit(batch, index, MI_NOOP); @@ -1317,12 +1335,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, @@ -1331,7 +1348,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaClearTdlStateAckDirtyBits:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); @@ -1350,8 +1367,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1363,11 +1380,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, - PAGE_ALIGN(size)); - if (!engine->wa_ctx.obj) { + engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm, + PAGE_ALIGN(size)); + if (IS_ERR(engine->wa_ctx.obj)) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); - return -ENOMEM; + ret = PTR_ERR(engine->wa_ctx.obj); + engine->wa_ctx.obj = NULL; + return ret; } ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); @@ -1401,9 +1420,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(engine->dev)->gen > 9) { + if (INTEL_GEN(engine->i915) > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(engine->dev)->gen); + INTEL_GEN(engine->i915)); return 0; } @@ -1423,7 +1442,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(engine->dev)->gen == 8) { + if (IS_GEN8(engine->i915)) { ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1437,7 +1456,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) &offset); if (ret) goto out; - } else if (INTEL_INFO(engine->dev)->gen == 9) { + } else if (IS_GEN9(engine->i915)) { ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1463,7 +1482,7 @@ out: static void lrc_init_hws(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), (u32)engine->status_page.gfx_addr); @@ -1472,8 +1491,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned int next_context_status_buffer_hw; lrc_init_hws(engine); @@ -1520,8 +1538,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen8_init_common_ring(engine); @@ -1598,7 +1615,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (req->ctx->ppgtt && (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { if (!USES_FULL_48BIT_PPGTT(req->i915) && - !intel_vgpu_active(req->i915->dev)) { + !intel_vgpu_active(req->i915)) { ret = intel_logical_ring_emit_pdps(req); if (ret) return ret; @@ -1624,38 +1641,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + struct drm_i915_private *dev_priv = engine->i915; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } -static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int gen8_emit_flush(struct drm_i915_gem_request *request, @@ -1664,8 +1661,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, { struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = request->i915; uint32_t cmd; int ret; @@ -1734,7 +1730,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(engine->dev)) + if (IS_GEN9(request->i915)) vf_flush_wa = true; /* WaForGAMHang:kbl */ @@ -1793,16 +1789,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return 0; } -static u32 gen8_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - -static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); -} - static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) { /* @@ -1818,14 +1804,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) intel_flush_status_page(engine, I915_GEM_HWS_INDEX); } -static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - - /* See bxt_a_get_seqno() explaining the reason for the clflush. */ - intel_flush_status_page(engine, I915_GEM_HWS_INDEX); -} - /* * Reserve space for 2 NOOPs at the end of each request to be * used as a workaround for not being allowed to do lite @@ -1833,11 +1811,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) */ #define WA_TAIL_DWORDS 2 -static inline u32 hws_seqno_address(struct intel_engine_cs *engine) -{ - return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; -} - static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; @@ -1853,10 +1826,10 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_logical_ring_emit(ringbuf, - hws_seqno_address(request->engine) | + intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_logical_ring_emit(ringbuf, request->seqno); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_NOOP); return intel_logical_ring_advance_and_submit(request); @@ -1883,7 +1856,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) (PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); + intel_logical_ring_emit(ringbuf, + intel_hws_seqno_address(request->engine)); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); /* We're thrashing one dword of HWS. */ @@ -1945,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) /** * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer * - * @ring: Engine Command Streamer. + * @engine: Engine Command Streamer. * */ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) @@ -1962,7 +1936,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) tasklet_kill(&engine->irq_tasklet); - dev_priv = engine->dev->dev_private; + dev_priv = engine->i915; if (engine->buffer) { intel_logical_ring_stop(engine); @@ -1975,36 +1949,34 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + intel_engine_fini_breadcrumbs(engine); + if (engine->status_page.obj) { i915_gem_object_unpin_map(engine->status_page.obj); engine->status_page.obj = NULL; } + intel_lr_context_unpin(dev_priv->kernel_context, engine); engine->idle_lite_restore_wa = 0; engine->disable_lite_restore_wa = false; engine->ctx_desc_template = 0; lrc_destroy_wa_ctx_obj(engine); - engine->dev = NULL; + engine->i915 = NULL; } static void -logical_ring_default_vfuncs(struct drm_device *dev, - struct intel_engine_cs *engine) +logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; engine->emit_request = gen8_emit_request; engine->emit_flush = gen8_emit_flush; - engine->irq_get = gen8_logical_ring_get_irq; - engine->irq_put = gen8_logical_ring_put_irq; + engine->irq_enable = gen8_logical_ring_enable_irq; + engine->irq_disable = gen8_logical_ring_disable_irq; engine->emit_bb_start = gen8_emit_bb_start; - engine->get_seqno = gen8_get_seqno; - engine->set_seqno = gen8_set_seqno; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) engine->irq_seqno_barrier = bxt_a_seqno_barrier; - engine->set_seqno = bxt_a_set_seqno; - } } static inline void @@ -2033,60 +2005,28 @@ lrc_setup_hws(struct intel_engine_cs *engine, } static int -logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) +logical_ring_init(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_context *dctx = dev_priv->kernel_context; - enum forcewake_domains fw_domains; + struct i915_gem_context *dctx = engine->i915->kernel_context; int ret; - /* Intentionally left blank. */ - engine->buffer = NULL; - - engine->dev = dev; - INIT_LIST_HEAD(&engine->active_list); - INIT_LIST_HEAD(&engine->request_list); - i915_gem_batch_pool_init(dev, &engine->batch_pool); - init_waitqueue_head(&engine->irq_queue); - - INIT_LIST_HEAD(&engine->buffers); - INIT_LIST_HEAD(&engine->execlist_queue); - INIT_LIST_HEAD(&engine->execlist_retired_req_list); - spin_lock_init(&engine->execlist_lock); - - tasklet_init(&engine->irq_tasklet, - intel_lrc_irq_handler, (unsigned long)engine); - - logical_ring_init_platform_invariants(engine); - - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, - RING_ELSP(engine), - FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_PTR(engine), - FW_REG_READ | FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_BUF_BASE(engine), - FW_REG_READ); - - engine->fw_domains = fw_domains; + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + goto error; ret = i915_cmd_parser_init_ring(engine); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(dctx, engine); + ret = execlists_context_deferred_alloc(dctx, engine); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin(dctx, engine); + ret = intel_lr_context_pin(dctx, engine); if (ret) { - DRM_ERROR( - "Failed to pin and map ringbuffer %s: %d\n", - engine->name, ret); + DRM_ERROR("Failed to pin context for %s: %d\n", + engine->name, ret); goto error; } @@ -2104,26 +2044,16 @@ error: return ret; } -static int logical_render_ring_init(struct drm_device *dev) +static int logical_render_ring_init(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + struct drm_i915_private *dev_priv = engine->i915; int ret; - engine->name = "render ring"; - engine->id = RCS; - engine->exec_id = I915_EXEC_RENDER; - engine->guc_id = GUC_RENDER_ENGINE; - engine->mmio_base = RENDER_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT); - if (HAS_L3_DPF(dev)) + if (HAS_L3_DPF(dev_priv)) engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - logical_ring_default_vfuncs(dev, engine); - /* Override some for render ring. */ - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) engine->init_hw = gen9_init_render_ring; else engine->init_hw = gen8_init_render_ring; @@ -2132,9 +2062,7 @@ static int logical_render_ring_init(struct drm_device *dev) engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; - engine->dev = dev; - - ret = intel_init_pipe_control(engine); + ret = intel_init_pipe_control(engine, 4096); if (ret) return ret; @@ -2149,7 +2077,7 @@ static int logical_render_ring_init(struct drm_device *dev) ret); } - ret = logical_ring_init(dev, engine); + ret = logical_ring_init(engine); if (ret) { lrc_destroy_wa_ctx_obj(engine); } @@ -2157,133 +2085,164 @@ static int logical_render_ring_init(struct drm_device *dev) return ret; } -static int logical_bsd_ring_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS]; - - engine->name = "bsd ring"; - engine->id = VCS; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE; - engine->mmio_base = GEN6_BSD_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); -} +static const struct logical_ring_info { + const char *name; + unsigned exec_id; + unsigned guc_id; + u32 mmio_base; + unsigned irq_shift; + int (*init)(struct intel_engine_cs *engine); +} logical_rings[] = { + [RCS] = { + .name = "render ring", + .exec_id = I915_EXEC_RENDER, + .guc_id = GUC_RENDER_ENGINE, + .mmio_base = RENDER_RING_BASE, + .irq_shift = GEN8_RCS_IRQ_SHIFT, + .init = logical_render_ring_init, + }, + [BCS] = { + .name = "blitter ring", + .exec_id = I915_EXEC_BLT, + .guc_id = GUC_BLITTER_ENGINE, + .mmio_base = BLT_RING_BASE, + .irq_shift = GEN8_BCS_IRQ_SHIFT, + .init = logical_ring_init, + }, + [VCS] = { + .name = "bsd ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE, + .mmio_base = GEN6_BSD_RING_BASE, + .irq_shift = GEN8_VCS1_IRQ_SHIFT, + .init = logical_ring_init, + }, + [VCS2] = { + .name = "bsd2 ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE2, + .mmio_base = GEN8_BSD2_RING_BASE, + .irq_shift = GEN8_VCS2_IRQ_SHIFT, + .init = logical_ring_init, + }, + [VECS] = { + .name = "video enhancement ring", + .exec_id = I915_EXEC_VEBOX, + .guc_id = GUC_VIDEOENHANCE_ENGINE, + .mmio_base = VEBOX_RING_BASE, + .irq_shift = GEN8_VECS_IRQ_SHIFT, + .init = logical_ring_init, + }, +}; -static int logical_bsd2_ring_init(struct drm_device *dev) +static struct intel_engine_cs * +logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; + const struct logical_ring_info *info = &logical_rings[id]; + struct intel_engine_cs *engine = &dev_priv->engine[id]; + enum forcewake_domains fw_domains; - engine->name = "bsd2 ring"; - engine->id = VCS2; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE2; - engine->mmio_base = GEN8_BSD2_RING_BASE; + engine->id = id; + engine->name = info->name; + engine->exec_id = info->exec_id; + engine->guc_id = info->guc_id; + engine->mmio_base = info->mmio_base; - logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + engine->i915 = dev_priv; - return logical_ring_init(dev, engine); -} + /* Intentionally left blank. */ + engine->buffer = NULL; -static int logical_blt_ring_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[BCS]; + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, + RING_ELSP(engine), + FW_REG_WRITE); - engine->name = "blitter ring"; - engine->id = BCS; - engine->exec_id = I915_EXEC_BLT; - engine->guc_id = GUC_BLITTER_ENGINE; - engine->mmio_base = BLT_RING_BASE; + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_PTR(engine), + FW_REG_READ | FW_REG_WRITE); - logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_BUF_BASE(engine), + FW_REG_READ); - return logical_ring_init(dev, engine); -} + engine->fw_domains = fw_domains; -static int logical_vebox_ring_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VECS]; + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); + INIT_LIST_HEAD(&engine->buffers); + INIT_LIST_HEAD(&engine->execlist_queue); + spin_lock_init(&engine->execlist_lock); - engine->name = "video enhancement ring"; - engine->id = VECS; - engine->exec_id = I915_EXEC_VEBOX; - engine->guc_id = GUC_VIDEOENHANCE_ENGINE; - engine->mmio_base = VEBOX_RING_BASE; + tasklet_init(&engine->irq_tasklet, + intel_lrc_irq_handler, (unsigned long)engine); - logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + logical_ring_init_platform_invariants(engine); + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine, info->irq_shift); + + intel_engine_init_hangcheck(engine); + i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool); - return logical_ring_init(dev, engine); + return engine; } /** * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers * @dev: DRM device. * - * This function inits the engines for an Execlists submission style (the equivalent in the - * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for - * those engines that are present in the hardware. + * This function inits the engines for an Execlists submission style (the + * equivalent in the legacy ringbuffer submission world would be + * i915_gem_init_engines). It does it only for those engines that are present in + * the hardware. * * Return: non-zero if the initialization failed. */ int intel_logical_rings_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + unsigned int mask = 0; + unsigned int i; int ret; - ret = logical_render_ring_init(dev); - if (ret) - return ret; + WARN_ON(INTEL_INFO(dev_priv)->ring_mask & + GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); - if (HAS_BSD(dev)) { - ret = logical_bsd_ring_init(dev); - if (ret) - goto cleanup_render_ring; - } + for (i = 0; i < ARRAY_SIZE(logical_rings); i++) { + if (!HAS_ENGINE(dev_priv, i)) + continue; - if (HAS_BLT(dev)) { - ret = logical_blt_ring_init(dev); - if (ret) - goto cleanup_bsd_ring; - } + if (!logical_rings[i].init) + continue; - if (HAS_VEBOX(dev)) { - ret = logical_vebox_ring_init(dev); + ret = logical_rings[i].init(logical_ring_setup(dev_priv, i)); if (ret) - goto cleanup_blt_ring; + goto cleanup; + + mask |= ENGINE_MASK(i); } - if (HAS_BSD2(dev)) { - ret = logical_bsd2_ring_init(dev); - if (ret) - goto cleanup_vebox_ring; + /* + * Catch failures to update logical_rings table when the new engines + * are added to the driver by a warning and disabling the forgotten + * engines. + */ + if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) { + struct intel_device_info *info = + (struct intel_device_info *)&dev_priv->info; + info->ring_mask = mask; } return 0; -cleanup_vebox_ring: - intel_logical_ring_cleanup(&dev_priv->engine[VECS]); -cleanup_blt_ring: - intel_logical_ring_cleanup(&dev_priv->engine[BCS]); -cleanup_bsd_ring: - intel_logical_ring_cleanup(&dev_priv->engine[VCS]); -cleanup_render_ring: - intel_logical_ring_cleanup(&dev_priv->engine[RCS]); +cleanup: + for (i = 0; i < I915_NUM_ENGINES; i++) + intel_logical_ring_cleanup(&dev_priv->engine[i]); return ret; } static u32 -make_rpcs(struct drm_device *dev) +make_rpcs(struct drm_i915_private *dev_priv) { u32 rpcs = 0; @@ -2291,7 +2250,7 @@ make_rpcs(struct drm_device *dev) * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_GEN(dev_priv) < 9) return 0; /* @@ -2300,24 +2259,24 @@ make_rpcs(struct drm_device *dev) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev)->has_slice_pg) { + if (INTEL_INFO(dev_priv)->has_slice_pg) { rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->slice_total << + rpcs |= INTEL_INFO(dev_priv)->slice_total << GEN8_RPCS_S_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_subslice_pg) { + if (INTEL_INFO(dev_priv)->has_subslice_pg) { rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->subslice_per_slice << + rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice << GEN8_RPCS_SS_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_eu_pg) { - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + if (INTEL_INFO(dev_priv)->has_eu_pg) { + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } @@ -2329,9 +2288,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(engine->dev)->gen) { + switch (INTEL_GEN(engine->i915)) { default: - MISSING_CASE(INTEL_INFO(engine->dev)->gen); + MISSING_CASE(INTEL_GEN(engine->i915)); /* fall through */ case 9: indirect_ctx_offset = @@ -2347,13 +2306,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) } static int -populate_lr_context(struct intel_context *ctx, +populate_lr_context(struct i915_gem_context *ctx, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; void *vaddr; u32 *reg_state; @@ -2391,7 +2349,7 @@ populate_lr_context(struct intel_context *ctx, RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - (HAS_RESOURCE_STREAMER(dev) ? + (HAS_RESOURCE_STREAMER(dev_priv) ? CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 0); @@ -2480,7 +2438,7 @@ populate_lr_context(struct intel_context *ctx, if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev)); + make_rpcs(dev_priv)); } i915_gem_object_unpin_map(ctx_obj); @@ -2489,39 +2447,8 @@ populate_lr_context(struct intel_context *ctx, } /** - * intel_lr_context_free() - free the LRC specific bits of a context - * @ctx: the LR context to free. - * - * The real context freeing is done in i915_gem_context_free: this only - * takes care of the bits that are LRC related: the per-engine backing - * objects and the logical ringbuffer. - */ -void intel_lr_context_free(struct intel_context *ctx) -{ - int i; - - for (i = I915_NUM_ENGINES; --i >= 0; ) { - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; - struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - - if (!ctx_obj) - continue; - - if (ctx == ctx->i915->kernel_context) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - i915_gem_object_unpin_map(ctx_obj); - } - - WARN_ON(ctx->engine[i].pin_count); - intel_ringbuffer_free(ringbuf); - drm_gem_object_unreference(&ctx_obj->base); - } -} - -/** * intel_lr_context_size() - return the size of the context for an engine - * @ring: which engine to find the context size for + * @engine: which engine to find the context size for * * Each engine may require a different amount of space for a context image, * so when allocating (or copying) an image, this function can be used to @@ -2537,11 +2464,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(engine->dev)->gen < 8); + WARN_ON(INTEL_GEN(engine->i915) < 8); switch (engine->id) { case RCS: - if (INTEL_INFO(engine->dev)->gen >= 9) + if (INTEL_GEN(engine->i915) >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2558,9 +2485,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) } /** - * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context + * execlists_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. - * @ring: engine to be used with the context. + * @engine: engine to be used with the context. * * This function can be called more than once, with different engines, if we plan * to use the context with them. The context backing objects and the ringbuffers @@ -2570,31 +2497,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) * * Return: non-zero on error. */ - -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; + struct intel_context *ce = &ctx->engine[engine->id]; uint32_t context_size; struct intel_ringbuffer *ringbuf; int ret; - WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); - WARN_ON(ctx->engine[engine->id].state); + WARN_ON(ce->state); context_size = round_up(intel_lr_context_size(engine), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_alloc_object(dev, context_size); - if (!ctx_obj) { + ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); + if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); - return -ENOMEM; + return PTR_ERR(ctx_obj); } - ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); + ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); goto error_deref_obj; @@ -2606,48 +2531,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, goto error_ringbuf; } - ctx->engine[engine->id].ringbuf = ringbuf; - ctx->engine[engine->id].state = ctx_obj; + ce->ringbuf = ringbuf; + ce->state = ctx_obj; + ce->initialised = engine->init_context == NULL; - if (ctx != ctx->i915->kernel_context && engine->init_context) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, ctx); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - DRM_ERROR("ring create req: %d\n", ret); - goto error_ringbuf; - } - - ret = engine->init_context(req); - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("ring init context: %d\n", - ret); - goto error_ringbuf; - } - } return 0; error_ringbuf: intel_ringbuffer_free(ringbuf); error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); - ctx->engine[engine->id].ringbuf = NULL; - ctx->engine[engine->id].state = NULL; + ce->ringbuf = NULL; + ce->state = NULL; return ret; } void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[engine->id].ringbuf; + struct intel_context *ce = &ctx->engine[engine->id]; + struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; @@ -2666,7 +2572,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ctx_obj); - ringbuf->head = 0; - ringbuf->tail = 0; + ce->ringbuf->head = 0; + ce->ringbuf->tail = 0; } } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 60a7385bc531..2b8255c19dcc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -57,6 +57,11 @@ #define GEN8_CSB_READ_PTR(csb_status) \ (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) +enum { + INTEL_CONTEXT_SCHEDULE_IN = 0, + INTEL_CONTEXT_SCHEDULE_OUT, +}; + /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); @@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) -void intel_lr_context_free(struct intel_context *ctx); +struct i915_gem_context; + uint32_t intel_lr_context_size(struct intel_engine_cs *engine); -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine); -void intel_lr_context_unpin(struct intel_context *ctx, +void intel_lr_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); struct drm_i915_private; void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx); -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, + struct i915_gem_context *ctx); +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine); -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine); - /* Execlists */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, + int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -void intel_execlists_retire_requests(struct intel_engine_cs *engine); +void intel_execlists_cancel_requests(struct intel_engine_cs *engine); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 96281e628d2a..49550470483e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; @@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); u32 tmp, flags = 0; @@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; int pipe = crtc->pipe; @@ -184,13 +184,13 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) * panels behave in the two modes. For now, let's just maintain the * value we got from the BIOS. */ - temp &= ~LVDS_A3_POWER_MASK; - temp |= lvds_encoder->a3_power; + temp &= ~LVDS_A3_POWER_MASK; + temp |= lvds_encoder->a3_power; /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the PIPECONF reg. */ - if (INTEL_INFO(dev)->gen == 4) { + if (IS_GEN4(dev_priv)) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ if (crtc->config->dither && crtc->config->pipe_bpp == 18) @@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { @@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); POSTING_READ(lvds_encoder->reg); - if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) + if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(intel_connector); @@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { @@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) } I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) + if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, container_of(nb, struct intel_lvds_connector, lid_notifier); struct drm_connector *connector = &lvds_connector->base.base; struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return NOTIFY_OK; @@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { @@ -556,6 +555,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -bool intel_is_dual_link_lvds(struct drm_device *dev) +struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev) { - struct intel_encoder *encoder; - struct intel_lvds_encoder *lvds_encoder; + struct intel_encoder *intel_encoder; - for_each_intel_encoder(dev, encoder) { - if (encoder->type == INTEL_OUTPUT_LVDS) { - lvds_encoder = to_lvds_encoder(&encoder->base); + for_each_intel_encoder(dev, intel_encoder) + if (intel_encoder->type == INTEL_OUTPUT_LVDS) + return intel_encoder; - return lvds_encoder->is_dual_link; - } - } + return NULL; +} - return false; +bool intel_is_dual_link_lvds(struct drm_device *dev) +{ + struct intel_encoder *encoder = intel_get_lvds_encoder(dev); + + return encoder && to_lvds_encoder(&encoder->base)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { struct drm_device *dev = lvds_encoder->base.base.dev; unsigned int val; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ if (i915.lvds_channel_mode > 0) @@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev) */ void intel_lvds_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder; struct intel_encoder *intel_encoder; struct intel_lvds_connector *lvds_connector; @@ -978,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + DRM_MODE_ENCODER_LVDS, "LVDS"); intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; @@ -992,7 +995,6 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; @@ -1119,6 +1121,7 @@ out: mutex_unlock(&dev->mode_config.mutex); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); + intel_panel_setup_backlight(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); DRM_DEBUG_KMS("detected %s-link lvds configuration\n", @@ -1131,9 +1134,6 @@ out: DRM_DEBUG_KMS("lid notifier registration failed\n"); lvds_connector->lid_notifier.notifier_call = NULL; } - drm_connector_register(connector); - - intel_panel_setup_backlight(connector, INVALID_PIPE); return; diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 6ba4bf7f2a89..927825f5b284 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -66,9 +66,10 @@ struct drm_i915_mocs_table { #define L3_WB 3 /* Target cache */ -#define ELLC 0 -#define LLC 1 -#define LLC_ELLC 2 +#define LE_TC_PAGETABLE 0 +#define LE_TC_LLC 1 +#define LE_TC_LLC_ELLC 2 +#define LE_TC_LLC_ELLC_ALT 3 /* * MOCS tables @@ -96,34 +97,67 @@ struct drm_i915_mocs_table { * end. */ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { - /* { 0x00000009, 0x0010 } */ - { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, - /* { 0x00000038, 0x0030 } */ - { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, - /* { 0x0000003b, 0x0030 } */ - { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } + { /* 0x00000009 */ + .control_value = LE_CACHEABILITY(LE_UC) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + + /* 0x0010 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), + }, + { + /* 0x00000038 */ + .control_value = LE_CACHEABILITY(LE_PAGETABLE) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + /* 0x0030 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), + }, + { + /* 0x0000003b */ + .control_value = LE_CACHEABILITY(LE_WB) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + /* 0x0030 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), + }, }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { - /* { 0x00000009, 0x0010 } */ - { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, - /* { 0x00000038, 0x0030 } */ - { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, - /* { 0x0000003b, 0x0030 } */ - { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } + { + /* 0x00000009 */ + .control_value = LE_CACHEABILITY(LE_UC) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + + /* 0x0010 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), + }, + { + /* 0x00000038 */ + .control_value = LE_CACHEABILITY(LE_PAGETABLE) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + + /* 0x0030 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), + }, + { + /* 0x00000039 */ + .control_value = LE_CACHEABILITY(LE_UC) | + LE_TGT_CACHE(LE_TC_LLC_ELLC) | + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | + LE_PFM(0) | LE_SCF(0), + + /* 0x0030 */ + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), + }, }; /** @@ -156,6 +190,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, "Platform that should have a MOCS table does not.\n"); } + /* WaDisableSkipCaching:skl,bxt,kbl */ + if (IS_GEN9(dev_priv)) { + int i; + + for (i = 0; i < table->size; i++) + if (WARN_ON(table->table[i].l3cc_value & + (L3_ESC(1) | L3_SCC(0x7)))) + return false; + } + return result; } @@ -189,7 +233,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) */ int intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 38a4c8ce7e63..f2584d0a01ab 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -82,7 +82,7 @@ void intel_attach_force_audio_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->force_audio_property; @@ -109,7 +109,7 @@ void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->broadcast_rgb_property; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 16e209d326b6..adca262d591a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -232,18 +232,36 @@ struct opregion_asle_ext { #define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) -#define ACPI_OTHER_OUTPUT (0<<8) -#define ACPI_VGA_OUTPUT (1<<8) -#define ACPI_TV_OUTPUT (2<<8) -#define ACPI_DIGITAL_OUTPUT (3<<8) -#define ACPI_LVDS_OUTPUT (4<<8) +/* + * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices + * Attached to the Display Adapter). + */ +#define ACPI_DISPLAY_INDEX_SHIFT 0 +#define ACPI_DISPLAY_INDEX_MASK (0xf << 0) +#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4 +#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4) +#define ACPI_DISPLAY_TYPE_SHIFT 8 +#define ACPI_DISPLAY_TYPE_MASK (0xf << 8) +#define ACPI_DISPLAY_TYPE_OTHER (0 << 8) +#define ACPI_DISPLAY_TYPE_VGA (1 << 8) +#define ACPI_DISPLAY_TYPE_TV (2 << 8) +#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8) +#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8) +#define ACPI_VENDOR_SPECIFIC_SHIFT 12 +#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12) +#define ACPI_BIOS_CAN_DETECT (1 << 16) +#define ACPI_DEPENDS_ON_VGA (1 << 17) +#define ACPI_PIPE_ID_SHIFT 18 +#define ACPI_PIPE_ID_MASK (7 << 18) +#define ACPI_DEVICE_ID_SCHEME (1 << 31) #define MAX_DSLP 1500 -static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) +static int swsci(struct drm_i915_private *dev_priv, + u32 function, u32 parm, u32 *parm_out) { - struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct pci_dev *pdev = dev_priv->drm.pdev; u32 main_function, sub_function, scic; u16 swsci_val; u32 dslp; @@ -293,16 +311,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) swsci->scic = scic; /* Ensure SCI event is selected and event trigger is cleared. */ - pci_read_config_word(dev->pdev, SWSCI, &swsci_val); + pci_read_config_word(pdev, SWSCI, &swsci_val); if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { swsci_val |= SWSCI_SCISEL; swsci_val &= ~SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); } /* Use event trigger to tell bios to check the mail. */ swsci_val |= SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) @@ -336,13 +354,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { - struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); u32 parm = 0; u32 type = 0; u32 port; /* don't care about old stuff for now */ - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; if (intel_encoder->type == INTEL_OUTPUT_DSI) @@ -365,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, type = DISPLAY_TYPE_CRT; break; case INTEL_OUTPUT_UNKNOWN: - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; @@ -382,7 +400,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, parm |= type << (16 + port * 3); - return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); + return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); } static const struct { @@ -396,27 +414,28 @@ static const struct { { PCI_D3cold, 0x04 }, }; -int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, + pci_power_t state) { int i; - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { if (state == power_state_map[i].pci_power_state) - return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, + return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE, power_state_map[i].parm, NULL); } return -EINVAL; } -static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) +static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; struct opregion_asle *asle = dev_priv->opregion.asle; + struct drm_device *dev = &dev_priv->drm; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -449,7 +468,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) return 0; } -static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) +static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ @@ -457,13 +476,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) return ASLC_ALS_ILLUM_FAILED; } -static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) +static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } -static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) +static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ @@ -471,13 +490,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) return ASLC_PFIT_FAILED; } -static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) +static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) { DRM_DEBUG_DRIVER("SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } -static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) +static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) { if (!iuer) DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); @@ -495,7 +514,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) return ASLC_BUTTON_ARRAY_FAILED; } -static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) +static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); @@ -505,7 +524,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) return ASLC_CONVERTIBLE_FAILED; } -static u32 asle_set_docking(struct drm_device *dev, u32 iuer) +static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); @@ -515,7 +534,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer) return ASLC_DOCKING_FAILED; } -static u32 asle_isct_state(struct drm_device *dev) +static u32 asle_isct_state(struct drm_i915_private *dev_priv) { DRM_DEBUG_DRIVER("ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; @@ -527,7 +546,6 @@ static void asle_work(struct work_struct *work) container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = container_of(opregion, struct drm_i915_private, opregion); - struct drm_device *dev = dev_priv->dev; struct opregion_asle *asle = dev_priv->opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -544,40 +562,38 @@ static void asle_work(struct work_struct *work) } if (aslc_req & ASLC_SET_ALS_ILLUM) - aslc_stat |= asle_set_als_illum(dev, asle->alsi); + aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi); if (aslc_req & ASLC_SET_BACKLIGHT) - aslc_stat |= asle_set_backlight(dev, asle->bclp); + aslc_stat |= asle_set_backlight(dev_priv, asle->bclp); if (aslc_req & ASLC_SET_PFIT) - aslc_stat |= asle_set_pfit(dev, asle->pfit); + aslc_stat |= asle_set_pfit(dev_priv, asle->pfit); if (aslc_req & ASLC_SET_PWM_FREQ) - aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); + aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb); if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) - aslc_stat |= asle_set_supported_rotation_angles(dev, + aslc_stat |= asle_set_supported_rotation_angles(dev_priv, asle->srot); if (aslc_req & ASLC_BUTTON_ARRAY) - aslc_stat |= asle_set_button_array(dev, asle->iuer); + aslc_stat |= asle_set_button_array(dev_priv, asle->iuer); if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) - aslc_stat |= asle_set_convertible(dev, asle->iuer); + aslc_stat |= asle_set_convertible(dev_priv, asle->iuer); if (aslc_req & ASLC_DOCKING_INDICATOR) - aslc_stat |= asle_set_docking(dev, asle->iuer); + aslc_stat |= asle_set_docking(dev_priv, asle->iuer); if (aslc_req & ASLC_ISCT_STATE_CHANGE) - aslc_stat |= asle_isct_state(dev); + aslc_stat |= asle_isct_state(dev_priv); asle->aslc = aslc_stat; } -void intel_opregion_asle_intr(struct drm_device *dev) +void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->opregion.asle) schedule_work(&dev_priv->opregion.asle_work); } @@ -658,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static void intel_didl_outputs(struct drm_device *dev) +static u32 acpi_display_type(struct drm_connector *connector) +{ + u32 display_type; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_DVIA: + display_type = ACPI_DISPLAY_TYPE_VGA; + break; + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_9PinDIN: + case DRM_MODE_CONNECTOR_TV: + display_type = ACPI_DISPLAY_TYPE_TV; + break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_HDMIB: + display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_DSI: + display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_Unknown: + case DRM_MODE_CONNECTOR_VIRTUAL: + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + default: + MISSING_CASE(connector->connector_type); + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + } + + return display_type; +} + +static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; @@ -670,7 +727,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp, max_outputs; int i = 0; - handle = ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; @@ -725,45 +782,25 @@ end: blind_set: i = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - int output_type = ACPI_OTHER_OUTPUT; + list_for_each_entry(connector, + &dev_priv->drm.mode_config.connector_list, head) { + int display_type = acpi_display_type(connector); + if (i >= max_outputs) { DRM_DEBUG_KMS("More than %u outputs in connector list\n", max_outputs); return; } - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_VGA: - case DRM_MODE_CONNECTOR_DVIA: - output_type = ACPI_VGA_OUTPUT; - break; - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_9PinDIN: - output_type = ACPI_TV_OUTPUT; - break; - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_DVID: - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_HDMIB: - output_type = ACPI_DIGITAL_OUTPUT; - break; - case DRM_MODE_CONNECTOR_LVDS: - output_type = ACPI_LVDS_OUTPUT; - break; - } + temp = get_did(opregion, i); - set_did(opregion, i, temp | (1 << 31) | output_type | i); + set_did(opregion, i, temp | (1 << 31) | display_type | i); i++; } goto end; } -static void intel_setup_cadls(struct drm_device *dev) +static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; int i = 0; u32 disp_id; @@ -780,17 +817,16 @@ static void intel_setup_cadls(struct drm_device *dev) } while (++i < 8 && disp_id != 0); } -void intel_opregion_init(struct drm_device *dev) +void intel_opregion_register(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { - intel_didl_outputs(dev); - intel_setup_cadls(dev); + intel_didl_outputs(dev_priv); + intel_setup_cadls(dev_priv); /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. @@ -808,9 +844,8 @@ void intel_opregion_init(struct drm_device *dev) } } -void intel_opregion_fini(struct drm_device *dev) +void intel_opregion_unregister(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) @@ -842,9 +877,8 @@ void intel_opregion_fini(struct drm_device *dev) opregion->lid_state = NULL; } -static void swsci_setup(struct drm_device *dev) +static void swsci_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; bool requested_callbacks = false; u32 tmp; @@ -854,7 +888,7 @@ static void swsci_setup(struct drm_device *dev) opregion->swsci_sbcb_sub_functions = 1; /* We use GBDA to ask for supported GBDA calls. */ - if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ tmp <<= 1; opregion->swsci_gbda_sub_functions |= tmp; @@ -865,7 +899,7 @@ static void swsci_setup(struct drm_device *dev) * must not call interfaces that are not specifically requested by the * bios. */ - if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { /* here, the bits already match sub-function codes */ opregion->swsci_sbcb_sub_functions |= tmp; requested_callbacks = true; @@ -876,7 +910,7 @@ static void swsci_setup(struct drm_device *dev) * the callback is _requested_. But we still can't call interfaces that * are not requested. */ - if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ u32 low = tmp & 0x7ff; u32 high = tmp & ~0xfff; /* bit 11 is reserved */ @@ -918,10 +952,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { { } }; -int intel_opregion_setup(struct drm_device *dev) +int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->drm.pdev; u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; @@ -933,7 +967,7 @@ int intel_opregion_setup(struct drm_device *dev) BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); - pci_read_config_dword(dev->pdev, ASLS, &asls); + pci_read_config_dword(pdev, ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); @@ -965,7 +999,7 @@ int intel_opregion_setup(struct drm_device *dev) if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; - swsci_setup(dev); + swsci_setup(dev_priv); } if (mboxes & MBOX_ASLE) { @@ -1014,12 +1048,12 @@ err_out: } int -intel_opregion_get_panel_type(struct drm_device *dev) +intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) { u32 panel_details; int ret; - ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); + ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) { DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", ret); @@ -1044,7 +1078,7 @@ intel_opregion_get_panel_type(struct drm_device *dev) * vswing instead. Low vswing results in some display flickers, so * let's simply ignore the OpRegion panel type on SKL for now. */ - if (IS_SKYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv)) { DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index bd38e49f7334..3212d8806b5a 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -168,7 +168,7 @@ struct overlay_registers { }; struct intel_overlay { - struct drm_device *dev; + struct drm_i915_private *i915; struct intel_crtc *crtc; struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *old_vid_bo; @@ -190,15 +190,15 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + overlay->flip_addr, + PAGE_SIZE); return regs; } @@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) static void intel_overlay_unmap_regs(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap(regs); } @@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; int ret; WARN_ON(overlay->active); - WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) @@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) intel_ring_emit(engine, flip_addr); intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - if (IS_I830(dev)) { + if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ intel_ring_emit(engine, MI_NOOP); @@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format) } } -static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { mask = 0x1f; shift = 5; } else { @@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) ret <<= 1; ret -= 1; return ret << 2; @@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers __iomem *regs; bool scale_changed = false; - struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->drm.struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; } oconfig = OCONF_CC_OUT_8BIT; - if (IS_GEN4(overlay->dev)) + if (IS_GEN4(dev_priv)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, tmp_width = params->src_w; swidth = params->src_w; - swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; @@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int uv_vscale = uv_vsubsampling(params->format); u32 tmp_U, tmp_V; swidth |= (params->src_w/uv_hscale) << 16; - tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + tmp_U = calc_swidthsw(dev_priv, params->offset_U, params->src_w/uv_hscale); - tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + tmp_V = calc_swidthsw(dev_priv, params->offset_V, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; @@ -840,7 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; - intel_frontbuffer_flip(dev, + intel_frontbuffer_flip(&dev_priv->drm, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; @@ -852,12 +848,12 @@ out_unpin: int intel_overlay_switch_off(struct intel_overlay *overlay) { + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - struct drm_device *dev = overlay->dev; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->drm.struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -897,15 +893,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { @@ -948,7 +943,7 @@ static int check_overlay_scaling(struct put_image_params *rec) return 0; } -static int check_overlay_src(struct drm_device *dev, +static int check_overlay_src(struct drm_i915_private *dev_priv, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { @@ -959,7 +954,7 @@ static int check_overlay_src(struct drm_device *dev, u32 tmp; /* check src dimensions */ - if (IS_845G(dev) || IS_I830(dev)) { + if (IS_845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; @@ -1011,14 +1006,14 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - if (IS_I830(dev) || IS_845G(dev)) + if (IS_I830(dev_priv) || IS_845G(dev_priv)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_GEN4(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev_priv) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1063,13 +1058,13 @@ static int check_overlay_src(struct drm_device *dev, * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe(struct drm_device *dev) +static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ - if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) + if (INTEL_GEN(dev_priv) <= 3 && + (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) return -1; pfit_control = I915_READ(PFIT_CONTROL); @@ -1079,18 +1074,18 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) return -1; /* 965 can place panel fitter on either pipe */ - if (IS_GEN4(dev)) + if (IS_GEN4(dev_priv)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ return 1; } -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; @@ -1162,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, /* line too wide, i.e. one-line-mode */ if (mode->hdisplay > 1024 && - intel_panel_fitter_pipe(dev) == crtc->pipe) { + intel_panel_fitter_pipe(dev_priv) == crtc->pipe) { overlay->pfit_active = true; update_pfit_vscale_ratio(overlay); } else @@ -1196,7 +1191,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; } - ret = check_overlay_src(dev, put_image_rec, new_bo); + ret = check_overlay_src(dev_priv, put_image_rec, new_bo); if (ret != 0) goto out_unlock; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; @@ -1284,11 +1279,11 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs) return 0; } -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; struct overlay_registers __iomem *regs; int ret; @@ -1309,7 +1304,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1341,7 +1336,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) goto out_unlock; if (overlay->active) { @@ -1371,37 +1366,37 @@ out_unlock: return ret; } -void intel_setup_overlay(struct drm_device *dev) +void intel_setup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; int ret; - if (!HAS_OVERLAY(dev)) + if (!HAS_OVERLAY(dev_priv)) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); if (WARN_ON(dev_priv->overlay)) goto out_free; - overlay->dev = dev; + overlay->i915 = dev_priv; reg_bo = NULL; - if (!OVERLAY_NEEDS_PHYSICAL(dev)) - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); - if (reg_bo == NULL) - reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) + reg_bo = i915_gem_object_create_stolen(&dev_priv->drm, + PAGE_SIZE); if (reg_bo == NULL) + reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE); + if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; - if (OVERLAY_NEEDS_PHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); @@ -1441,25 +1436,23 @@ void intel_setup_overlay(struct drm_device *dev) intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); DRM_INFO("initialized overlay support\n"); return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev)) + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) i915_gem_object_ggtt_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); kfree(overlay); return; } -void intel_cleanup_overlay(struct drm_device *dev) +void intel_cleanup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->overlay) return; @@ -1482,18 +1475,17 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + overlay->flip_addr); return regs; } @@ -1501,15 +1493,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap_atomic(regs); } - struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_device *dev) +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; @@ -1523,10 +1513,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; - else - error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); + error->base = overlay->flip_addr; regs = intel_overlay_map_regs_atomic(overlay); if (!regs) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index aba94099886b..96c65d77e886 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -377,7 +377,7 @@ out: enum drm_connector_status intel_panel_detect(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Assume that the BIOS does not lie through the OpRegion... */ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { @@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector) if (panel->backlight.combination_mode) { u8 lbpc; - pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); + pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc); val *= lbpc; } @@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) lbpc = level * 0xfe / panel->backlight.max + 1; level /= lbpc; - pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); + pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc); } if (IS_GEN4(dev_priv)) { @@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) * backlight. This will leave the backlight on unnecessarily when * another client is not activated. */ - if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); return; } @@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_level; int ret; @@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = { .get_brightness = intel_backlight_device_get_brightness, }; -static int intel_backlight_device_register(struct intel_connector *connector) +int intel_backlight_device_register(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; struct backlight_properties props; @@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector) return 0; } -static void intel_backlight_device_unregister(struct intel_connector *connector) +void intel_backlight_device_unregister(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; @@ -1225,14 +1225,6 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) panel->backlight.device = NULL; } } -#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ -static int intel_backlight_device_register(struct intel_connector *connector) -{ - return 0; -} -static void intel_backlight_device_unregister(struct intel_connector *connector) -{ -} #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* @@ -1324,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int clock; if (IS_G4X(dev_priv)) @@ -1724,6 +1716,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) container_of(panel, struct intel_connector, panel); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && + intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && + intel_dsi_dcs_init_backlight_funcs(connector) == 0) + return; + if (IS_BROXTON(dev_priv)) { panel->backlight.setup = bxt_setup_backlight; panel->backlight.enable = bxt_enable_backlight; @@ -1805,19 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel) drm_mode_destroy(intel_connector->base.dev, panel->downclock_mode); } - -void intel_backlight_register(struct drm_device *dev) -{ - struct intel_connector *connector; - - for_each_intel_connector(dev, connector) - intel_backlight_device_register(connector); -} - -void intel_backlight_unregister(struct drm_device *dev) -{ - struct intel_connector *connector; - - for_each_intel_connector(dev, connector) - intel_backlight_device_unregister(connector); -} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2863b92c9da6..f4f3fcc8b3be 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,6 +26,7 @@ */ #include <linux/cpufreq.h> +#include <drm/drm_plane_helper.h> #include "i915_drv.h" #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" @@ -82,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev) static void bxt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen9_init_clock_gating(dev); @@ -108,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev) static void i915_pineview_get_mem_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; tmp = I915_READ(CLKCFG); @@ -147,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_ironlake_get_mem_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u16 ddrpll, csipll; ddrpll = I915_READ16(DDRMPLL1); @@ -318,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 val; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { @@ -374,7 +375,7 @@ static const int pessimal_latency_ns = 5000; static int vlv_get_fifo_size(struct drm_device *dev, enum pipe pipe, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int sprite0_start, sprite1_start, size; switch (pipe) { @@ -425,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev, static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -441,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) static int i830_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -458,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) static int i845_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -636,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) static void pineview_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; const struct cxsr_latency *latency; u32 reg; @@ -933,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate, static void vlv_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* all latencies in usec */ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; @@ -1324,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev, static void vlv_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; struct vlv_wm_values wm = {}; @@ -1380,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; unsigned int enabled = 0; @@ -1437,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) static void i965_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; int srwm = 1; int cursor_sr = 16; @@ -1511,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) static void i9xx_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_watermark_params *wm_info; uint32_t fwater_lo; uint32_t fwater_hi; @@ -1641,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) static void i845_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; uint32_t fwater_lo; @@ -2040,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static uint32_t -hsw_compute_linetime_wm(struct drm_device *dev, - struct intel_crtc_state *cstate) +hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) { - struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_atomic_state *intel_state = + to_intel_atomic_state(cstate->base.state); const struct drm_display_mode *adjusted_mode = &cstate->base.adjusted_mode; u32 linetime, ips_linetime; @@ -2052,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; - if (WARN_ON(dev_priv->cdclk_freq == 0)) + if (WARN_ON(intel_state->cdclk == 0)) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -2061,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, adjusted_mode->crtc_clock); ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - dev_priv->cdclk_freq); + intel_state->cdclk); return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | PIPE_WM_LINETIME_TIME(linetime); @@ -2069,7 +2070,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_GEN9(dev)) { uint32_t val; @@ -2174,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; /* WaDoubleCursorLP3Latency:ivb */ @@ -2235,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev, static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, uint16_t wm[5], uint16_t min) { - int level, max_level = ilk_wm_max_level(dev_priv->dev); + int level, max_level = ilk_wm_max_level(&dev_priv->drm); if (wm[0] >= min) return false; @@ -2249,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, static void snb_wm_latency_quirk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool changed; /* @@ -2271,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev) static void ilk_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_read_wm_latency(dev, dev_priv->wm.pri_latency); @@ -2293,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev) static void skl_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_read_wm_latency(dev, dev_priv->wm.skl_latency); intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); @@ -2329,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; - const struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane; struct intel_plane_state *pristate = NULL; struct intel_plane_state *sprstate = NULL; @@ -2337,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) int level, max_level = ilk_wm_max_level(dev), usable_level; struct ilk_wm_maximums max; - pipe_wm = &cstate->wm.optimal.ilk; + pipe_wm = &cstate->wm.ilk.optimal; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { struct intel_plane_state *ps; @@ -2380,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) pipe_wm->wm[0] = pipe_wm->raw_wm[0]; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); + pipe_wm->linetime = hsw_compute_linetime_wm(cstate); if (!ilk_validate_pipe_wm(dev, pipe_wm)) return -EINVAL; @@ -2419,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *newstate) { - struct intel_pipe_wm *a = &newstate->wm.intermediate; + struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; int level, max_level = ilk_wm_max_level(dev); @@ -2428,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = newstate->wm.optimal.ilk; + *a = newstate->wm.ilk.optimal; a->pipe_enabled |= b->pipe_enabled; a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; @@ -2457,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) + if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0) newstate->wm.need_postvbl_update = false; return 0; @@ -2504,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev, const struct ilk_wm_maximums *max, struct intel_pipe_wm *merged) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev); int last_enabled_level = max_level; @@ -2564,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) /* The value we need to program into the WM_LPx latency field */ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) return 2 * level; @@ -2764,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct ilk_wm_values *previous = &dev_priv->wm.hw; unsigned int dirty; uint32_t val; @@ -2839,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, bool ilk_disable_lp_wm(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } @@ -2877,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane) static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, const struct intel_crtc_state *cstate, - const struct intel_wm_config *config, - struct skl_ddb_entry *alloc /* out */) + struct skl_ddb_entry *alloc, /* out */ + int *num_active /* out */) { + struct drm_atomic_state *state = cstate->base.state; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - struct drm_crtc *crtc; unsigned int pipe_size, ddb_size; int nth_active_pipe; + int pipe = to_intel_crtc(for_crtc)->pipe; - if (!cstate->base.active) { + if (WARN_ON(!state) || !cstate->base.active) { alloc->start = 0; alloc->end = 0; + *num_active = hweight32(dev_priv->active_crtcs); return; } + if (intel_state->active_pipe_changes) + *num_active = hweight32(intel_state->active_crtcs); + else + *num_active = hweight32(dev_priv->active_crtcs); + if (IS_BROXTON(dev)) ddb_size = BXT_DDB_SIZE; else @@ -2898,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, ddb_size -= 4; /* 4 blocks for bypass path allocation */ - nth_active_pipe = 0; - for_each_crtc(dev, crtc) { - if (!to_intel_crtc(crtc)->active) - continue; - - if (crtc == for_crtc) - break; - - nth_active_pipe++; + /* + * If the state doesn't change the active CRTC's, then there's + * no need to recalculate; the existing pipe allocation limits + * should remain unchanged. Note that we're safe from racing + * commits since any racing commit that changes the active CRTC + * list would need to grab _all_ crtc locks, including the one + * we currently hold. + */ + if (!intel_state->active_pipe_changes) { + *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe]; + return; } - pipe_size = ddb_size / config->num_pipes_active; - alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; + nth_active_pipe = hweight32(intel_state->active_crtcs & + (drm_crtc_mask(for_crtc) - 1)); + pipe_size = ddb_size / hweight32(intel_state->active_crtcs); + alloc->start = nth_active_pipe * ddb_size / *num_active; alloc->end = alloc->start + pipe_size; } -static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) +static unsigned int skl_cursor_allocation(int num_active) { - if (config->num_pipes_active == 1) + if (num_active == 1) return 32; return 8; @@ -2960,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, } } +/* + * Determines the downscale amount of a plane for the purposes of watermark calculations. + * The bspec defines downscale amount as: + * + * """ + * Horizontal down scale amount = maximum[1, Horizontal source size / + * Horizontal destination size] + * Vertical down scale amount = maximum[1, Vertical source size / + * Vertical destination size] + * Total down scale amount = Horizontal down scale amount * + * Vertical down scale amount + * """ + * + * Return value is provided in 16.16 fixed point form to retain fractional part. + * Caller should take care of dividing & rounding off the value. + */ +static uint32_t +skl_plane_downscale_amount(const struct intel_plane_state *pstate) +{ + uint32_t downscale_h, downscale_w; + uint32_t src_w, src_h, dst_w, dst_h; + + if (WARN_ON(!pstate->visible)) + return DRM_PLANE_HELPER_NO_SCALING; + + /* n.b., src is 16.16 fixed point, dst is whole integer */ + src_w = drm_rect_width(&pstate->src); + src_h = drm_rect_height(&pstate->src); + dst_w = drm_rect_width(&pstate->dst); + dst_h = drm_rect_height(&pstate->dst); + if (intel_rotation_90_or_270(pstate->base.rotation)) + swap(dst_w, dst_h); + + downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + + /* Provide result in 16.16 fixed point */ + return (uint64_t)downscale_w * downscale_h >> 16; +} + static unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, @@ -2967,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, { struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; + uint32_t down_scale_amount, data_rate; uint32_t width = 0, height = 0; + unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; + + if (!intel_pstate->visible) + return 0; + if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + if (y && format != DRM_FORMAT_NV12) + return 0; width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; @@ -2976,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, swap(width, height); /* for planar format */ - if (fb->pixel_format == DRM_FORMAT_NV12) { + if (format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return width * height * - drm_format_plane_cpp(fb->pixel_format, 0); + data_rate = width * height * + drm_format_plane_cpp(format, 0); else /* uv-plane data rate */ - return (width / 2) * (height / 2) * - drm_format_plane_cpp(fb->pixel_format, 1); + data_rate = (width / 2) * (height / 2) * + drm_format_plane_cpp(format, 1); + } else { + /* for packed formats */ + data_rate = width * height * drm_format_plane_cpp(format, 0); } - /* for packed formats */ - return width * height * drm_format_plane_cpp(fb->pixel_format, 0); + down_scale_amount = skl_plane_downscale_amount(intel_pstate); + + return (uint64_t)data_rate * down_scale_amount >> 16; } /* @@ -2995,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, * 3 * 4096 * 8192 * 4 < 2^32 */ static unsigned int -skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) +skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_device *dev = intel_crtc->base.dev; + struct drm_crtc_state *cstate = &intel_cstate->base; + struct drm_atomic_state *state = cstate->state; + struct drm_crtc *crtc = cstate->crtc; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_plane *plane; const struct intel_plane *intel_plane; - unsigned int total_data_rate = 0; + struct drm_plane_state *pstate; + unsigned int rate, total_data_rate = 0; + int id; + int i; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - const struct drm_plane_state *pstate = intel_plane->base.state; + if (WARN_ON(!state)) + return 0; - if (pstate->fb == NULL) - continue; + /* Calculate and cache data rate for each plane */ + for_each_plane_in_state(state, plane, pstate, i) { + id = skl_wm_plane_id(to_intel_plane(plane)); + intel_plane = to_intel_plane(plane); - if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + if (intel_plane->pipe != intel_crtc->pipe) continue; /* packed/uv */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 0); + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 0); + intel_cstate->wm.skl.plane_data_rate[id] = rate; + + /* y-plane */ + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 1); + intel_cstate->wm.skl.plane_y_data_rate[id] = rate; + } + + /* Calculate CRTC's total data rate from cached values */ + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + int id = skl_wm_plane_id(intel_plane); - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) - /* y-plane */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 1); + /* packed/uv */ + total_data_rate += intel_cstate->wm.skl.plane_data_rate[id]; + total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; } + WARN_ON(cstate->plane_mask && total_data_rate == 0); + return total_data_rate; } -static void +static uint16_t +skl_ddb_min_alloc(const struct drm_plane_state *pstate, + const int y) +{ + struct drm_framebuffer *fb = pstate->fb; + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); + uint32_t src_w, src_h; + uint32_t min_scanlines = 8; + uint8_t plane_bpp; + + if (WARN_ON(!fb)) + return 0; + + /* For packed formats, no y-plane, return 0 */ + if (y && fb->pixel_format != DRM_FORMAT_NV12) + return 0; + + /* For Non Y-tile return 8-blocks */ + if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED && + fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) + return 8; + + src_w = drm_rect_width(&intel_pstate->src) >> 16; + src_h = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(pstate->rotation)) + swap(src_w, src_h); + + /* Halve UV plane width and height for NV12 */ + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { + src_w /= 2; + src_h /= 2; + } + + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); + else + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); + + if (intel_rotation_90_or_270(pstate->rotation)) { + switch (plane_bpp) { + case 1: + min_scanlines = 32; + break; + case 2: + min_scanlines = 16; + break; + case 4: + min_scanlines = 8; + break; + case 8: + min_scanlines = 4; + break; + default: + WARN(1, "Unsupported pixel depth %u for rotation", + plane_bpp); + min_scanlines = 32; + } + } + + return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; +} + +static int skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb /* out */) { + struct drm_atomic_state *state = cstate->base.state; struct drm_crtc *crtc = cstate->base.crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_wm_config *config = &dev_priv->wm.config; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane; + struct drm_plane *plane; + struct drm_plane_state *pstate; enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; uint16_t alloc_size, start, cursor_blocks; - uint16_t minimum[I915_MAX_PLANES]; - uint16_t y_minimum[I915_MAX_PLANES]; + uint16_t *minimum = cstate->wm.skl.minimum_blocks; + uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks; unsigned int total_data_rate; + int num_active; + int id, i; - skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); + if (WARN_ON(!state)) + return 0; + + if (!cstate->base.active) { + ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; + memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); + memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); + return 0; + } + + skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) { memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(&ddb->plane[pipe][PLANE_CURSOR], 0, - sizeof(ddb->plane[pipe][PLANE_CURSOR])); - return; + return 0; } - cursor_blocks = skl_cursor_allocation(config); + cursor_blocks = skl_cursor_allocation(num_active); ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; alloc_size -= cursor_blocks; - alloc->end -= cursor_blocks; /* 1. Allocate the mininum required blocks for each active plane */ - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - int id = skl_wm_plane_id(intel_plane); + for_each_plane_in_state(state, plane, pstate, i) { + intel_plane = to_intel_plane(plane); + id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(plane->state)->visible) + if (intel_plane->pipe != pipe) continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (!to_intel_plane_state(pstate)->visible) { + minimum[id] = 0; + y_minimum[id] = 0; + continue; + } + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + minimum[id] = 0; + y_minimum[id] = 0; continue; + } + + minimum[id] = skl_ddb_min_alloc(pstate, 0); + y_minimum[id] = skl_ddb_min_alloc(pstate, 1); + } - minimum[id] = 8; - alloc_size -= minimum[id]; - y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; - alloc_size -= y_minimum[id]; + for (i = 0; i < PLANE_CURSOR; i++) { + alloc_size -= minimum[i]; + alloc_size -= y_minimum[i]; } /* @@ -3084,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * FIXME: we may not allocate every single block here. */ total_data_rate = skl_get_total_relative_data_rate(cstate); + if (total_data_rate == 0) + return 0; start = alloc->start; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_plane_state *pstate = intel_plane->base.state; unsigned int data_rate, y_data_rate; uint16_t plane_blocks, y_plane_blocks = 0; int id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(pstate)->visible) - continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); + data_rate = cstate->wm.skl.plane_data_rate[id]; /* * allocation for (packed formats) or (uv-plane part of planar format): @@ -3109,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, plane_blocks += div_u64((uint64_t)alloc_size * data_rate, total_data_rate); - ddb->plane[pipe][id].start = start; - ddb->plane[pipe][id].end = start + plane_blocks; + /* Leave disabled planes at (0,0) */ + if (data_rate) { + ddb->plane[pipe][id].start = start; + ddb->plane[pipe][id].end = start + plane_blocks; + } start += plane_blocks; /* * allocation for y_plane part of planar format: */ - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { - y_data_rate = skl_plane_relative_data_rate(cstate, - pstate, - 1); - y_plane_blocks = y_minimum[id]; - y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, - total_data_rate); + y_data_rate = cstate->wm.skl.plane_y_data_rate[id]; + + y_plane_blocks = y_minimum[id]; + y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, + total_data_rate); + if (y_data_rate) { ddb->y_plane[pipe][id].start = start; ddb->y_plane[pipe][id].end = start + y_plane_blocks; - - start += y_plane_blocks; } + start += y_plane_blocks; } + return 0; } static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) @@ -3189,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, return ret; } -static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, - const struct intel_crtc *intel_crtc) +static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, + struct intel_plane_state *pstate) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; + uint64_t adjusted_pixel_rate; + uint64_t downscale_amount; + uint64_t pixel_rate; + + /* Shouldn't reach here on disabled planes... */ + if (WARN_ON(!pstate->visible)) + return 0; /* - * If ddb allocation of pipes changed, it may require recalculation of - * watermarks + * Adjusted plane pixel rate is just the pipe's adjusted pixel rate + * with additional adjustments for plane-specific scaling. */ - if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) - return true; + adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); + downscale_amount = skl_plane_downscale_amount(pstate); + + pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; + WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); - return false; + return pixel_rate; } -static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, - struct intel_plane *intel_plane, - uint16_t ddb_allocation, - int level, - uint16_t *out_blocks, /* out */ - uint8_t *out_lines /* out */) +static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, + struct intel_crtc_state *cstate, + struct intel_plane_state *intel_pstate, + uint16_t ddb_allocation, + int level, + uint16_t *out_blocks, /* out */ + uint8_t *out_lines, /* out */ + bool *enabled /* out */) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - struct intel_plane_state *intel_pstate = - to_intel_plane_state(plane->state); + struct drm_plane_state *pstate = &intel_pstate->base; + struct drm_framebuffer *fb = pstate->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; @@ -3225,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t selected_result; uint8_t cpp; uint32_t width = 0, height = 0; + uint32_t plane_pixel_rate; - if (latency == 0 || !cstate->base.active || !intel_pstate->visible) - return false; + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { + *enabled = false; + return 0; + } width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; - if (intel_rotation_90_or_270(plane->state->rotation)) + if (intel_rotation_90_or_270(pstate->rotation)) swap(width, height); cpp = drm_format_plane_cpp(fb->pixel_format, 0); - method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), - cpp, latency); - method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), + plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); + + method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); + method2 = skl_wm_method2(plane_pixel_rate, cstate->base.adjusted_mode.crtc_htotal, width, cpp, @@ -3252,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { uint32_t min_scanlines = 4; uint32_t y_tile_minimum; - if (intel_rotation_90_or_270(plane->state->rotation)) { + if (intel_rotation_90_or_270(pstate->rotation)) { int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? drm_format_plane_cpp(fb->pixel_format, 1) : drm_format_plane_cpp(fb->pixel_format, 0); @@ -3288,40 +3464,100 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_blocks++; } - if (res_blocks >= ddb_allocation || res_lines > 31) - return false; + if (res_blocks >= ddb_allocation || res_lines > 31) { + *enabled = false; + + /* + * If there are no valid level 0 watermarks, then we can't + * support this display configuration. + */ + if (level) { + return 0; + } else { + DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); + DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", + to_intel_crtc(cstate->base.crtc)->pipe, + skl_wm_plane_id(to_intel_plane(pstate->plane)), + res_blocks, ddb_allocation, res_lines); + + return -EINVAL; + } + } *out_blocks = res_blocks; *out_lines = res_lines; + *enabled = true; - return true; + return 0; } -static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb, - struct intel_crtc_state *cstate, - int level, - struct skl_wm_level *result) +static int +skl_compute_wm_level(const struct drm_i915_private *dev_priv, + struct skl_ddb_allocation *ddb, + struct intel_crtc_state *cstate, + int level, + struct skl_wm_level *result) { - struct drm_device *dev = dev_priv->dev; + struct drm_atomic_state *state = cstate->base.state; struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_plane *plane; struct intel_plane *intel_plane; + struct intel_plane_state *intel_pstate; uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; + int ret; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + /* + * We'll only calculate watermarks for planes that are actually + * enabled, so make sure all other planes are set as disabled. + */ + memset(result, 0, sizeof(*result)); + + for_each_intel_plane_mask(&dev_priv->drm, + intel_plane, + cstate->base.plane_mask) { int i = skl_wm_plane_id(intel_plane); + plane = &intel_plane->base; + intel_pstate = NULL; + if (state) + intel_pstate = + intel_atomic_get_existing_plane_state(state, + intel_plane); + + /* + * Note: If we start supporting multiple pending atomic commits + * against the same planes/CRTC's in the future, plane->state + * will no longer be the correct pre-state to use for the + * calculations here and we'll need to change where we get the + * 'unchanged' plane data from. + * + * For now this is fine because we only allow one queued commit + * against a CRTC. Even if the plane isn't modified by this + * transaction and we don't have a plane lock, we still have + * the CRTC's lock, so we know that no other transactions are + * racing with us to update it. + */ + if (!intel_pstate) + intel_pstate = to_intel_plane_state(plane->state); + + WARN_ON(!intel_pstate->base.fb); + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); - result->plane_en[i] = skl_compute_plane_wm(dev_priv, - cstate, - intel_plane, - ddb_blocks, - level, - &result->plane_res_b[i], - &result->plane_res_l[i]); + ret = skl_compute_plane_wm(dev_priv, + cstate, + intel_pstate, + ddb_blocks, + level, + &result->plane_res_b[i], + &result->plane_res_l[i], + &result->plane_en[i]); + if (ret) + return ret; } + + return 0; } static uint32_t @@ -3355,21 +3591,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, } } -static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, - struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm) +static int skl_build_pipe_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; - const struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev); + int ret; for (level = 0; level <= max_level; level++) { - skl_compute_wm_level(dev_priv, ddb, cstate, - level, &pipe_wm->wm[level]); + ret = skl_compute_wm_level(dev_priv, ddb, cstate, + level, &pipe_wm->wm[level]); + if (ret) + return ret; } pipe_wm->linetime = skl_compute_linetime_wm(cstate); skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); + + return 0; } static void skl_compute_wm_results(struct drm_device *dev, @@ -3442,14 +3683,16 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, static void skl_write_wm_values(struct drm_i915_private *dev_priv, const struct skl_wm_values *new) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) { int i, level, max_level = ilk_wm_max_level(dev); enum pipe pipe = crtc->pipe; - if (!new->dirty[pipe]) + if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0) + continue; + if (!crtc->active) continue; I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); @@ -3537,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old, static void skl_flush_wm_values(struct drm_i915_private *dev_priv, struct skl_wm_values *new_values) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct skl_ddb_allocation *cur_ddb, *new_ddb; bool reallocated[I915_MAX_PIPES] = {}; struct intel_crtc *crtc; @@ -3616,116 +3859,182 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, } } -static bool skl_update_pipe_wm(struct drm_crtc *crtc, - struct skl_ddb_allocation *ddb, /* out */ - struct skl_pipe_wm *pipe_wm /* out */) +static int skl_update_pipe_wm(struct drm_crtc_state *cstate, + struct skl_ddb_allocation *ddb, /* out */ + struct skl_pipe_wm *pipe_wm, /* out */ + bool *changed /* out */) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc); + struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); + int ret; - skl_allocate_pipe_ddb(cstate, ddb); - skl_compute_pipe_wm(cstate, ddb, pipe_wm); + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (ret) + return ret; if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) - return false; + *changed = false; + else + *changed = true; - intel_crtc->wm.active.skl = *pipe_wm; + return 0; +} - return true; +static uint32_t +pipes_modified(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + uint32_t i, ret = 0; + + for_each_crtc_in_state(state, crtc, cstate, i) + ret |= drm_crtc_mask(crtc); + + return ret; } -static void skl_update_other_pipe_wm(struct drm_device *dev, - struct drm_crtc *crtc, - struct skl_wm_values *r) +static int +skl_compute_ddb(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *intel_crtc; - struct intel_crtc *this_crtc = to_intel_crtc(crtc); + struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + uint32_t realloc_pipes = pipes_modified(state); + int ret; /* - * If the WM update hasn't changed the allocation for this_crtc (the - * crtc we are currently computing the new WM values for), other - * enabled crtcs will keep the same allocation and we don't need to - * recompute anything for them. + * If this is our first atomic update following hardware readout, + * we can't trust the DDB that the BIOS programmed for us. Let's + * pretend that all pipes switched active status so that we'll + * ensure a full DDB recompute. */ - if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) - return; + if (dev_priv->wm.distrust_bios_wm) + intel_state->active_pipe_changes = ~0; /* - * Otherwise, because of this_crtc being freshly enabled/disabled, the - * other active pipes need new DDB allocation and WM values. + * If the modeset changes which CRTC's are active, we need to + * recompute the DDB allocation for *all* active pipes, even + * those that weren't otherwise being modified in any way by this + * atomic commit. Due to the shrinking of the per-pipe allocations + * when new active CRTC's are added, it's possible for a pipe that + * we were already using and aren't changing at all here to suddenly + * become invalid if its DDB needs exceeds its new allocation. + * + * Note that if we wind up doing a full DDB recompute, we can't let + * any other display updates race with this transaction, so we need + * to grab the lock on *all* CRTC's. */ - for_each_intel_crtc(dev, intel_crtc) { - struct skl_pipe_wm pipe_wm = {}; - bool wm_changed; - - if (this_crtc->pipe == intel_crtc->pipe) - continue; + if (intel_state->active_pipe_changes) { + realloc_pipes = ~0; + intel_state->wm_results.dirty_pipes = ~0; + } - if (!intel_crtc->active) - continue; + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { + struct intel_crtc_state *cstate; - wm_changed = skl_update_pipe_wm(&intel_crtc->base, - &r->ddb, &pipe_wm); + cstate = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(cstate)) + return PTR_ERR(cstate); - /* - * If we end up re-computing the other pipe WM values, it's - * because it was really needed, so we expect the WM values to - * be different. - */ - WARN_ON(!wm_changed); - - skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); - r->dirty[intel_crtc->pipe] = true; + ret = skl_allocate_pipe_ddb(cstate, ddb); + if (ret) + return ret; } + + return 0; } -static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) +static int +skl_compute_wm(struct drm_atomic_state *state) { - watermarks->wm_linetime[pipe] = 0; - memset(watermarks->plane[pipe], 0, - sizeof(uint32_t) * 8 * I915_MAX_PLANES); - memset(watermarks->plane_trans[pipe], - 0, sizeof(uint32_t) * I915_MAX_PLANES); - watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct skl_wm_values *results = &intel_state->wm_results; + struct skl_pipe_wm *pipe_wm; + bool changed = false; + int ret, i; + + /* + * If this transaction isn't actually touching any CRTC's, don't + * bother with watermark calculation. Note that if we pass this + * test, we're guaranteed to hold at least one CRTC state mutex, + * which means we can safely use values like dev_priv->active_crtcs + * since any racing commits that want to update them would need to + * hold _all_ CRTC state mutexes. + */ + for_each_crtc_in_state(state, crtc, cstate, i) + changed = true; + if (!changed) + return 0; + + /* Clear all dirty flags */ + results->dirty_pipes = 0; + + ret = skl_compute_ddb(state); + if (ret) + return ret; + + /* + * Calculate WM's for all pipes that are part of this transaction. + * Note that the DDB allocation above may have added more CRTC's that + * weren't otherwise being modified (and set bits in dirty_pipes) if + * pipe allocations had to change. + * + * FIXME: Now that we're doing this in the atomic check phase, we + * should allow skl_update_pipe_wm() to return failure in cases where + * no suitable watermark values can be found. + */ + for_each_crtc_in_state(state, crtc, cstate, i) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *intel_cstate = + to_intel_crtc_state(cstate); + + pipe_wm = &intel_cstate->wm.skl.optimal; + ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm, + &changed); + if (ret) + return ret; - /* Clear ddb entries for pipe */ - memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); - memset(&watermarks->ddb.plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.y_plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, - sizeof(struct skl_ddb_entry)); + if (changed) + results->dirty_pipes |= drm_crtc_mask(crtc); + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) + /* This pipe's WM's did not change */ + continue; + + intel_cstate->update_wm_pre = true; + skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc); + } + + return 0; } static void skl_update_wm(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_wm_values *results = &dev_priv->wm.skl_results; struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - - /* Clear all dirty flags */ - memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES); - - skl_clear_wm(results, intel_crtc->pipe); - - if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) return; - skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); - results->dirty[intel_crtc->pipe] = true; + intel_crtc->wm.active.skl = *pipe_wm; + + mutex_lock(&dev_priv->wm.wm_mutex); - skl_update_other_pipe_wm(dev, crtc, results); skl_write_wm_values(dev_priv, results); skl_flush_wm_values(dev_priv, results); /* store the new configuration */ dev_priv->wm.skl_hw = *results; + + mutex_unlock(&dev_priv->wm.wm_mutex); } static void ilk_compute_wm_config(struct drm_device *dev, @@ -3748,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev, static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; struct intel_wm_config config = {}; @@ -3785,7 +4094,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate) struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = cstate->wm.intermediate; + intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -3797,7 +4106,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) mutex_lock(&dev_priv->wm.wm_mutex); if (cstate->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; + intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; ilk_program_watermarks(dev_priv); } mutex_unlock(&dev_priv->wm.wm_mutex); @@ -3850,11 +4159,11 @@ static void skl_pipe_wm_active_state(uint32_t val, static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_wm_values *hw = &dev_priv->wm.skl_hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *active = &cstate->wm.optimal.skl; + struct skl_pipe_wm *active = &cstate->wm.skl.optimal; enum pipe pipe = intel_crtc->pipe; int level, i, max_level; uint32_t temp; @@ -3877,7 +4186,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (!intel_crtc->active) return; - hw->dirty[pipe] = true; + hw->dirty_pipes |= drm_crtc_mask(crtc); active->linetime = hw->wm_linetime[pipe]; @@ -3904,23 +4213,31 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) void skl_wm_get_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; struct drm_crtc *crtc; skl_ddb_get_hw_state(dev_priv, ddb); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) skl_pipe_wm_get_hw_state(crtc); + + if (dev_priv->active_crtcs) { + /* Fully recompute DDB on first atomic commit */ + dev_priv->wm.distrust_bios_wm = true; + } else { + /* Easy/common case; just sanitize DDB now if everything off */ + memset(ddb, 0, sizeof(*ddb)); + } } static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; + struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; enum pipe pipe = intel_crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, @@ -4120,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) void ilk_wm_get_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; @@ -4182,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev) */ void intel_update_watermarks(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); if (dev_priv->display.update_wm) dev_priv->display.update_wm(crtc); @@ -4197,9 +4514,8 @@ DEFINE_SPINLOCK(mchdev_lock); * mchdev_lock. */ static struct drm_i915_private *i915_mch_dev; -bool ironlake_set_drps(struct drm_device *dev, u8 val) +bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; assert_spin_locked(&mchdev_lock); @@ -4221,9 +4537,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) return true; } -static void ironlake_enable_drps(struct drm_device *dev) +static void ironlake_enable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl; u8 fmax, fmin, fstart, vstart; @@ -4280,7 +4595,7 @@ static void ironlake_enable_drps(struct drm_device *dev) DRM_ERROR("stuck trying to change perf mode\n"); mdelay(1); - ironlake_set_drps(dev, fstart); + ironlake_set_drps(dev_priv, fstart); dev_priv->ips.last_count1 = I915_READ(DMIEC) + I915_READ(DDREC) + I915_READ(CSIEC); @@ -4291,9 +4606,8 @@ static void ironlake_enable_drps(struct drm_device *dev) spin_unlock_irq(&mchdev_lock); } -static void ironlake_disable_drps(struct drm_device *dev) +static void ironlake_disable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; spin_lock_irq(&mchdev_lock); @@ -4308,7 +4622,7 @@ static void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->ips.fstart); + ironlake_set_drps(dev_priv, dev_priv->ips.fstart); mdelay(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); @@ -4354,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) new_power = dev_priv->rps.power; switch (dev_priv->rps.power) { case LOW_POWER: - if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) + if (val > dev_priv->rps.efficient_freq + 1 && + val > dev_priv->rps.cur_freq) new_power = BETWEEN; break; case BETWEEN: - if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) + if (val <= dev_priv->rps.efficient_freq && + val < dev_priv->rps.cur_freq) new_power = LOW_POWER; - else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) + else if (val >= dev_priv->rps.rp0_freq && + val > dev_priv->rps.cur_freq) new_power = HIGH_POWER; break; case HIGH_POWER: - if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) + if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && + val < dev_priv->rps.cur_freq) new_power = BETWEEN; break; } @@ -4412,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) } I915_WRITE(GEN6_RP_UP_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_up)); + GT_INTERVAL_FROM_US(dev_priv, ei_up)); I915_WRITE(GEN6_RP_UP_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); + GT_INTERVAL_FROM_US(dev_priv, + ei_up * threshold_up / 100)); I915_WRITE(GEN6_RP_DOWN_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_down)); + GT_INTERVAL_FROM_US(dev_priv, ei_down)); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); + GT_INTERVAL_FROM_US(dev_priv, + ei_down * threshold_down / 100)); - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); dev_priv->rps.power = new_power; dev_priv->rps.up_threshold = threshold_up; @@ -4452,12 +4772,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) /* gen6_set_rps is called to update the frequency request, but should also be * called when the range (min_delay and max_delay) is modified so that we can * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static void gen6_set_rps(struct drm_device *dev, u8 val) +static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) return; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4470,10 +4788,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_GEN9(dev)) + if (IS_GEN9(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val)); else @@ -4495,15 +4813,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); } -static void valleyview_set_rps(struct drm_device *dev, u8 val) +static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(val > dev_priv->rps.max_freq); WARN_ON(val < dev_priv->rps.min_freq); - if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), + if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), "Odd GPU freq value\n")) val &= ~1; @@ -4536,7 +4852,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) /* Wake up the media well, as that takes a lot less * power than the Render well. */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); - valleyview_set_rps(dev_priv->dev, val); + valleyview_set_rps(dev_priv, val); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); } @@ -4548,20 +4864,33 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) gen6_rps_reset_ei(dev_priv); I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); + + gen6_enable_rps_interrupts(dev_priv); + + /* Ensure we start at the user's desired frequency */ + intel_set_rps(dev_priv, + clamp(dev_priv->rps.cur_freq, + dev_priv->rps.min_freq_softlimit, + dev_priv->rps.max_freq_softlimit)); } mutex_unlock(&dev_priv->rps.hw_lock); } void gen6_rps_idle(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + /* Flush our bottom-half so that it does not race with us + * setting the idle frequency and so that it is bounded by + * our rpm wakeref. And then disable the interrupts to stop any + * futher RPS reclocking whilst we are asleep. + */ + gen6_disable_rps_interrupts(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } @@ -4580,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, /* This is intentionally racy! We peek at the state here, then * validate inside the RPS worker. */ - if (!(dev_priv->mm.busy && + if (!(dev_priv->gt.awake && dev_priv->rps.enabled && dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) return; @@ -4596,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->rps.interrupts_enabled) { dev_priv->rps.client_boost = true; - queue_work(dev_priv->wq, &dev_priv->rps.work); + schedule_work(&dev_priv->rps.work); } spin_unlock_irq(&dev_priv->irq_lock); @@ -4609,49 +4938,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->rps.client_lock); } -void intel_set_rps(struct drm_device *dev, u8 val) +void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - valleyview_set_rps(dev, val); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + valleyview_set_rps(dev_priv, val); else - gen6_set_rps(dev, val); + gen6_set_rps(dev_priv, val); } -static void gen9_disable_rc6(struct drm_device *dev) +static void gen9_disable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN9_PG_ENABLE, 0); } -static void gen9_disable_rps(struct drm_device *dev) +static void gen9_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_device *dev) +static void cherryview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_device *dev) +static void valleyview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4661,34 +4980,45 @@ static void valleyview_disable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void intel_print_rc6_info(struct drm_device *dev, u32 mode) +static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) mode = GEN6_RC_CTL_RC6_ENABLE; else mode = 0; } - if (HAS_RC6p(dev)) - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", - onoff(mode & GEN6_RC_CTL_RC6_ENABLE), - onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), - onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); + if (HAS_RC6p(dev_priv)) + DRM_DEBUG_DRIVER("Enabling RC6 states: " + "RC6 %s RC6p %s RC6pp %s\n", + onoff(mode & GEN6_RC_CTL_RC6_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); else - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", - onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); + DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n", + onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } -static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; + u32 rc_ctl; + int rc_sw_target; + + rc_ctl = I915_READ(GEN6_RC_CONTROL); + rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> + RC_SW_TARGET_STATE_SHIFT; + DRM_DEBUG_DRIVER("BIOS enabled RC states: " + "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", + onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + rc_sw_target); if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { - DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); + DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); enable_rc6 = false; } @@ -4700,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + ggtt->stolen_reserved_size))) { - DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); + DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); enable_rc6 = false; } @@ -4708,31 +5038,40 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { - DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); + DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!I915_READ(GEN8_PUSHBUS_CONTROL) || + !I915_READ(GEN8_PUSHBUS_ENABLE) || + !I915_READ(GEN8_PUSHBUS_SHIFT)) { + DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + enable_rc6 = false; + } + + if (!I915_READ(GEN6_GFXPAUSE)) { + DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); enable_rc6 = false; } - if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_HW_ENABLE)) && - ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || - !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { - DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); + if (!I915_READ(GEN8_MISC_CTRL0)) { + DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); enable_rc6 = false; } return enable_rc6; } -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return 0; if (!enable_rc6) return 0; - if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { DRM_INFO("RC6 disabled by BIOS\n"); return 0; } @@ -4741,33 +5080,28 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) if (enable_rc6 >= 0) { int mask; - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | INTEL_RC6pp_ENABLE; else mask = INTEL_RC6_ENABLE; if ((enable_rc6 & mask) != enable_rc6) - DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", - enable_rc6 & mask, enable_rc6, mask); + DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d " + "(requested %d, valid %d)\n", + enable_rc6 & mask, enable_rc6, mask); return enable_rc6 & mask; } - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); return INTEL_RC6_ENABLE; } -int intel_enable_rc6(const struct drm_device *dev) +static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { - return i915.enable_rc6; -} - -static void gen6_init_rps_frequencies(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t rp_state_cap; u32 ddcc_status = 0; int ret; @@ -4775,7 +5109,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) /* All of these values are in units of 50MHz */ dev_priv->rps.cur_freq = 0; /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; @@ -4791,8 +5125,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ret = sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status); @@ -4804,7 +5138,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is the natural hardware unit for SKL */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; @@ -4821,7 +5155,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; if (dev_priv->rps.min_freq_softlimit == 0) { - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dev_priv->rps.min_freq_softlimit = max_t(int, dev_priv->rps.efficient_freq, intel_freq_opcode(dev_priv, 450)); @@ -4832,16 +5166,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) } /* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_device *dev) +static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { /* * BIOS could leave the Hw Turbo enabled, so need to explicitly * clear out the Control register just to avoid inconsitency @@ -4851,7 +5183,7 @@ static void gen9_enable_rps(struct drm_device *dev) * if the Turbo is left enabled in the Control register, as the * Up/Down interrupts would remain masked. */ - gen9_disable_rps(dev); + gen9_disable_rps(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return; } @@ -4870,14 +5202,13 @@ static void gen9_enable_rps(struct drm_device *dev) * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen9_enable_rc6(struct drm_device *dev) +static void gen9_enable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4894,7 +5225,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 2b: Program RC6 thresholds.*/ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4903,7 +5234,7 @@ static void gen9_enable_rc6(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC_UCODE(dev)) + if (HAS_GUC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -4913,12 +5244,12 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | @@ -4934,19 +5265,17 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - } -static void gen8_enable_rps(struct drm_device *dev) +static void gen8_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4961,7 +5290,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); @@ -4970,16 +5299,16 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - intel_print_rc6_info(dev, rc6_mask); - if (IS_BROADWELL(dev)) + intel_print_rc6_info(dev_priv, rc6_mask); + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | rc6_mask); @@ -5020,14 +5349,13 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen6_enable_rps(struct drm_device *dev) +static void gen6_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; @@ -5054,7 +5382,7 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -5070,7 +5398,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); @@ -5078,12 +5406,12 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ - rc6_mode = intel_enable_rc6(dev_priv->dev); + rc6_mode = intel_enable_rc6(); if (rc6_mode & INTEL_RC6_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; /* We don't use those on Haswell */ - if (!IS_HASWELL(dev)) { + if (!IS_HASWELL(dev_priv)) { if (rc6_mode & INTEL_RC6p_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; @@ -5091,7 +5419,7 @@ static void gen6_enable_rps(struct drm_device *dev) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; } - intel_print_rc6_info(dev, rc6_mask); + intel_print_rc6_info(dev_priv, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -5115,13 +5443,13 @@ static void gen6_enable_rps(struct drm_device *dev) } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { + if (IS_GEN6(dev_priv) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; @@ -5134,9 +5462,8 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void __gen6_update_ring_freq(struct drm_device *dev) +static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; @@ -5165,7 +5492,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -5183,16 +5510,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. */ ring_freq = gpu_freq; - } else if (INTEL_INFO(dev)->gen >= 8) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { /* max(2 * GT, DDR). NB: GT is 50MHz units */ ring_freq = max(min_ring_freq, gpu_freq); - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ring_freq = mult_frac(gpu_freq, 5, 4); ring_freq = max(min_ring_freq, ring_freq); /* leave ia_freq as the default, chosen by cpufreq */ @@ -5219,26 +5546,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev) } } -void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_CORE_RING_FREQ(dev)) + if (!HAS_CORE_RING_FREQ(dev_priv)) return; mutex_lock(&dev_priv->rps.hw_lock); - __gen6_update_ring_freq(dev); + __gen6_update_ring_freq(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); } static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; u32 val, rp0; val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (INTEL_INFO(dev)->eu_total) { + switch (INTEL_INFO(dev_priv)->eu_total) { case 8: /* (2 * 4) config */ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); @@ -5349,9 +5673,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); } -static void cherryview_setup_pctx(struct drm_device *dev) +static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long pctx_paddr, paddr; u32 pcbr; @@ -5370,15 +5693,14 @@ static void cherryview_setup_pctx(struct drm_device *dev) DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); } -static void valleyview_setup_pctx(struct drm_device *dev) +static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *pctx; unsigned long pctx_paddr; u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5386,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) int pcbr_offset; pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; - pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, + pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, pcbr_offset, I915_GTT_OFFSET_NONE, pctx_size); @@ -5403,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(dev, pctx_size); + pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; @@ -5415,13 +5737,11 @@ static void valleyview_setup_pctx(struct drm_device *dev) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } -static void valleyview_cleanup_pctx(struct drm_device *dev) +static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (WARN_ON(!dev_priv->vlv_pctx)) return; @@ -5440,12 +5760,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) dev_priv->rps.gpll_ref_freq); } -static void valleyview_init_gt_powersave(struct drm_device *dev) +static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - valleyview_setup_pctx(dev); + valleyview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5499,12 +5818,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void cherryview_init_gt_powersave(struct drm_device *dev) +static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - cherryview_setup_pctx(dev); + cherryview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5564,14 +5882,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void valleyview_cleanup_gt_powersave(struct drm_device *dev) +static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - valleyview_cleanup_pctx(dev); + valleyview_cleanup_pctx(dev_priv); } -static void cherryview_enable_rps(struct drm_device *dev) +static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; @@ -5616,8 +5933,8 @@ static void cherryview_enable_rps(struct drm_device *dev) pcbr = I915_READ(VLV_PCBR); /* 3: Enable RC6 */ - if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && - (pcbr >> VLV_PCBR_ADDR_SHIFT)) + if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5662,14 +5979,13 @@ static void cherryview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void valleyview_enable_rps(struct drm_device *dev) +static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; @@ -5722,10 +6038,10 @@ static void valleyview_enable_rps(struct drm_device *dev) VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; - intel_print_rc6_info(dev, rc6_mode); + intel_print_rc6_info(dev_priv, rc6_mode); I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5752,7 +6068,7 @@ static void valleyview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5842,10 +6158,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -5885,11 +6200,10 @@ static int _pxvid_to_vd(u8 pxvid) static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - struct drm_device *dev = dev_priv->dev; const int vd = _pxvid_to_vd(pxvid); const int vm = vd - 1125; - if (INTEL_INFO(dev)->is_mobile) + if (INTEL_INFO(dev_priv)->is_mobile) return vm > 0 ? vm : 0; return vd; @@ -5930,9 +6244,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return; spin_lock_irq(&mchdev_lock); @@ -5981,10 +6293,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -6125,7 +6436,7 @@ bool i915_gpu_turbo_disable(void) dev_priv->ips.max_delay = dev_priv->ips.fstart; - if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) + if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) ret = false; out_unlock: @@ -6173,9 +6484,8 @@ void intel_gpu_ips_teardown(void) spin_unlock_irq(&mchdev_lock); } -static void intel_init_emon(struct drm_device *dev) +static void intel_init_emon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; u8 pxw[16]; int i; @@ -6244,10 +6554,8 @@ static void intel_init_emon(struct drm_device *dev) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_gt_powersave(struct drm_device *dev) +void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6257,74 +6565,66 @@ void intel_init_gt_powersave(struct drm_device *dev) intel_runtime_pm_get(dev_priv); } - if (IS_CHERRYVIEW(dev)) - cherryview_init_gt_powersave(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_init_gt_powersave(dev); + if (IS_CHERRYVIEW(dev_priv)) + cherryview_init_gt_powersave(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_init_gt_powersave(dev_priv); } -void intel_cleanup_gt_powersave(struct drm_device *dev) +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return; - else if (IS_VALLEYVIEW(dev)) - valleyview_cleanup_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } -static void gen6_suspend_rps(struct drm_device *dev) +static void gen6_suspend_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - gen6_disable_rps_interrupts(dev); + gen6_disable_rps_interrupts(dev_priv); } /** * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev: drm device + * @dev_priv: i915 device * * We don't want to disable RC6 or other features here, we just want * to make sure any work we've queued has finished and won't bother * us while we're suspended. */ -void intel_suspend_gt_powersave(struct drm_device *dev) +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); /* Force GPU to min freq during suspend */ gen6_rps_idle(dev_priv); } -void intel_disable_gt_powersave(struct drm_device *dev) +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_IRONLAKE_M(dev)) { - ironlake_disable_drps(dev); - } else if (INTEL_INFO(dev)->gen >= 6) { - intel_suspend_gt_powersave(dev); + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_disable_drps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { + intel_suspend_gt_powersave(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_INFO(dev)->gen >= 9) { - gen9_disable_rc6(dev); - gen9_disable_rps(dev); - } else if (IS_CHERRYVIEW(dev)) - cherryview_disable_rps(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_disable_rps(dev); + if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_disable_rc6(dev_priv); + gen9_disable_rps(dev_priv); + } else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rps(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rps(dev_priv); else - gen6_disable_rps(dev); + gen6_disable_rps(dev_priv); dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); @@ -6336,27 +6636,26 @@ static void intel_gen6_powersave_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - gen6_reset_rps_interrupts(dev); + gen6_reset_rps_interrupts(dev_priv); - if (IS_CHERRYVIEW(dev)) { - cherryview_enable_rps(dev); - } else if (IS_VALLEYVIEW(dev)) { - valleyview_enable_rps(dev); - } else if (INTEL_INFO(dev)->gen >= 9) { - gen9_enable_rc6(dev); - gen9_enable_rps(dev); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - __gen6_update_ring_freq(dev); - } else if (IS_BROADWELL(dev)) { - gen8_enable_rps(dev); - __gen6_update_ring_freq(dev); + if (IS_CHERRYVIEW(dev_priv)) { + cherryview_enable_rps(dev_priv); + } else if (IS_VALLEYVIEW(dev_priv)) { + valleyview_enable_rps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_enable_rc6(dev_priv); + gen9_enable_rps(dev_priv); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + __gen6_update_ring_freq(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + gen8_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } else { - gen6_enable_rps(dev); - __gen6_update_ring_freq(dev); + gen6_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); @@ -6367,27 +6666,25 @@ static void intel_gen6_powersave_work(struct work_struct *work) dev_priv->rps.enabled = true; - gen6_enable_rps_interrupts(dev); + gen6_enable_rps_interrupts(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); } -void intel_enable_gt_powersave(struct drm_device *dev) +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) return; - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - mutex_lock(&dev->struct_mutex); - intel_init_emon(dev); - mutex_unlock(&dev->struct_mutex); - } else if (INTEL_INFO(dev)->gen >= 6) { + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_enable_drps(dev_priv); + mutex_lock(&dev_priv->drm.struct_mutex); + intel_init_emon(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -6406,20 +6703,18 @@ void intel_enable_gt_powersave(struct drm_device *dev) } } -void intel_reset_gt_powersave(struct drm_device *dev) +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); dev_priv->rps.enabled = false; } static void ibx_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -6431,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) static void g4x_disable_trickle_feed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -6446,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) static void ilk_init_lp_watermarks(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); @@ -6460,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev) static void ironlake_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; /* @@ -6534,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) static void cpt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; uint32_t val; @@ -6571,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev) static void gen6_check_mch_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; tmp = I915_READ(MCH_SSKPD); @@ -6582,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev) static void gen6_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); @@ -6697,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) static void lpt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * TODO: this bit should only be enabled when really needed, then @@ -6716,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev) static void lpt_suspend_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_LPT_LP(dev)) { uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); @@ -6726,6 +7021,29 @@ static void lpt_suspend_hw(struct drm_device *dev) } } +static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, + int general_prio_credits, + int high_prio_credits) +{ + u32 misccpctl; + + /* WaTempDisableDOPClkGating:bdw */ + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + + I915_WRITE(GEN8_L3SQCREG1, + L3_GENERAL_PRIO_CREDITS(general_prio_credits) | + L3_HIGH_PRIO_CREDITS(high_prio_credits)); + + /* + * Wait at least 100 clocks before re-enabling clock gating. + * See the definition of L3SQCREG1 in BSpec. + */ + POSTING_READ(GEN8_L3SQCREG1); + udelay(1); + I915_WRITE(GEN7_MISCCPCTL, misccpctl); +} + static void kabylake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -6753,6 +7071,10 @@ static void skylake_init_clock_gating(struct drm_device *dev) gen9_init_clock_gating(dev); + /* WAC6entrylatency:skl */ + I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + FBC_LLC_FULLY_OPEN); + /* WaFbcNukeOnHostModify:skl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); @@ -6760,9 +7082,8 @@ static void skylake_init_clock_gating(struct drm_device *dev) static void broadwell_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; - uint32_t misccpctl; ilk_init_lp_watermarks(dev); @@ -6793,20 +7114,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); - /* - * WaProgramL3SqcReg1Default:bdw - * WaTempDisableDOPClkGating:bdw - */ - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); - /* - * Wait at least 100 clocks before re-enabling clock gating. See - * the definition of L3SQCREG1 in BSpec. - */ - POSTING_READ(GEN8_L3SQCREG1); - udelay(1); - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + /* WaProgramL3SqcReg1Default:bdw */ + gen8_set_l3sqc_credits(dev_priv, 30, 2); /* * WaGttCachingOffByDefault:bdw @@ -6815,12 +7124,16 @@ static void broadwell_init_clock_gating(struct drm_device *dev) */ I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); + /* WaKVMNotificationOnConfigChange:bdw */ + I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) + | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); + lpt_init_clock_gating(dev); } static void haswell_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); ilk_init_lp_watermarks(dev); @@ -6876,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev) static void ivybridge_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t snpcr; ilk_init_lp_watermarks(dev); @@ -6974,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) static void valleyview_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, @@ -7056,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev) static void cherryview_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ @@ -7077,6 +7390,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev) GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* + * WaProgramL3SqcReg1Default:chv + * See gfxspecs/Related Documents/Performance Guide/ + * LSQC Setting Recommendations. + */ + gen8_set_l3sqc_credits(dev_priv, 38, 2); + + /* * GTT cache may not work with big pages, so if those * are ever enabled GTT cache may need to be disabled. */ @@ -7085,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev) static void g4x_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -7112,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev) static void crestline_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); @@ -7128,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev) static void broadwater_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | @@ -7145,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev) static void gen3_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dstate = I915_READ(D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | @@ -7170,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev) static void i85x_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); @@ -7184,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev) static void i830_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); @@ -7195,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->display.init_clock_gating(dev); } @@ -7263,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_fbc_init(dev_priv); @@ -7277,6 +7597,7 @@ void intel_init_pm(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); dev_priv->display.update_wm = skl_update_wm; + dev_priv->display.compute_global_watermarks = skl_compute_wm; } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); @@ -7340,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + /* GEN6_PCODE_* are outside of the forcewake domain, we can + * use te fw I915_READ variants to reduce the amount of work + * required when reading/writing. + */ + + if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); return -EAGAIN; } - I915_WRITE(GEN6_PCODE_DATA, *val); - I915_WRITE(GEN6_PCODE_DATA1, 0); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + I915_WRITE_FW(GEN6_PCODE_DATA, *val); + I915_WRITE_FW(GEN6_PCODE_DATA1, 0); + I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { + if (intel_wait_for_register_fw(dev_priv, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, + 500)) { DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); return -ETIMEDOUT; } - *val = I915_READ(GEN6_PCODE_DATA); - I915_WRITE(GEN6_PCODE_DATA, 0); + *val = I915_READ_FW(GEN6_PCODE_DATA); + I915_WRITE_FW(GEN6_PCODE_DATA, 0); return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, + u32 mbox, u32 val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + /* GEN6_PCODE_* are outside of the forcewake domain, we can + * use te fw I915_READ variants to reduce the amount of work + * required when reading/writing. + */ + + if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); return -EAGAIN; } - I915_WRITE(GEN6_PCODE_DATA, val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + I915_WRITE_FW(GEN6_PCODE_DATA, val); + I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { + if (intel_wait_for_register_fw(dev_priv, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, + 500)) { DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); return -ETIMEDOUT; } - I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE_FW(GEN6_PCODE_DATA, 0); return 0; } @@ -7449,23 +7783,21 @@ static void __intel_rps_boost_work(struct work_struct *work) struct request_boost *boost = container_of(work, struct request_boost, work); struct drm_i915_gem_request *req = boost->req; - if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->engine->dev), NULL, - req->emitted_jiffies); + if (!i915_gem_request_completed(req)) + gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); - i915_gem_request_unreference__unlocked(req); + i915_gem_request_unreference(req); kfree(boost); } -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req) +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) { struct request_boost *boost; - if (req == NULL || INTEL_INFO(dev)->gen < 6) + if (req == NULL || INTEL_GEN(req->i915) < 6) return; - if (i915_gem_request_completed(req, true)) + if (i915_gem_request_completed(req)) return; boost = kmalloc(sizeof(*boost), GFP_ATOMIC); @@ -7476,12 +7808,12 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev, boost->req = req; INIT_WORK(&boost->work, __intel_rps_boost_work); - queue_work(to_i915(dev)->wq, &boost->work); + queue_work(req->i915->wq, &boost->work); } void intel_pm_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->rps.hw_lock); spin_lock_init(&dev_priv->rps.client_lock); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index a788d1e9589b..2b0d1baf15b3 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp) static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val; val = I915_READ(VLV_PSRSTAT(pipe)) & @@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); @@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = intel_dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; uint32_t val; @@ -173,10 +173,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t aux_clock_divider; i915_reg_t aux_ctl_reg; - int precharge = 0x3; static const uint8_t aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, @@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) [4] = DP_SET_POWER_D0, }; enum port port = dig_port->port; + u32 aux_ctl; int i; BUILD_BUG_ON(sizeof(aux_msg) > 20); @@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_AUX_FRAME_SYNC_ENABLE); + if (dev_priv->psr.link_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE); + aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); /* Setup AUX registers */ @@ -204,40 +211,16 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); - if (INTEL_INFO(dev)->gen >= 9) { - uint32_t val; - - val = I915_READ(aux_ctl_reg); - val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK; - val |= DP_AUX_CH_CTL_TIME_OUT_1600us; - val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK; - val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - /* Use hardcoded data values for PSR, frame sync and GTC */ - val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL; - I915_WRITE(aux_ctl_reg, val); - } else { - I915_WRITE(aux_ctl_reg, - DP_AUX_CH_CTL_TIME_OUT_400us | - (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); - } - - if (dev_priv->psr.link_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE); + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_clock_divider); + I915_WRITE(aux_ctl_reg, aux_ctl); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -252,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -269,17 +252,17 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t max_sleep_time = 0x1f; - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + /* Lately it was identified that depending on panel idle frame count + * calculated at HW can be off by 1. So let's use what came + * from VBT + 1. + * There are also other cases where panel demands at least 4 + * but VBT is not being set. To cover these 2 cases lets use + * at least 5 when VBT isn't set to be on the safest side. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1; uint32_t val = EDP_PSR_ENABLE; val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; @@ -341,9 +324,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_display_mode *adjusted_mode = + &intel_crtc->config->base.adjusted_mode; + int psr_setup_time; lockdep_assert_held(&dev_priv->psr.lock); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); @@ -382,11 +368,25 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) } if (IS_HASWELL(dev) && - intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); return false; } + psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); + if (psr_setup_time < 0) { + DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", + intel_dp->psr_dpcd[1]); + return false; + } + + if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { + DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", + psr_setup_time); + return false; + } + dev_priv->psr.source_ok = true; return true; } @@ -395,7 +395,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); @@ -424,7 +424,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); if (!HAS_PSR(dev)) { @@ -511,15 +511,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(intel_dig_port->base.base.crtc); uint32_t val; if (dev_priv->psr.active) { /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ - if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & - VLV_EDP_PSR_IN_TRANS) == 0, 1)) + if (intel_wait_for_register(dev_priv, + VLV_PSRSTAT(intel_crtc->pipe), + VLV_EDP_PSR_IN_TRANS, + 0, + 1)) WARN(1, "PSR transition took longer than expected\n"); val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); @@ -538,16 +541,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->psr.active) { I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); /* Wait till PSR is idle */ - if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & - EDP_PSR_STATUS_STATE_MASK) == 0, - 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 2000)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); dev_priv->psr.active = false; @@ -566,7 +571,7 @@ void intel_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { @@ -603,14 +608,20 @@ static void intel_psr_work(struct work_struct *work) * and be ready for re-enable. */ if (HAS_DDI(dev_priv)) { - if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & - EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 50)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); return; } } else { - if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_IN_TRANS) == 0, 1)) { + if (intel_wait_for_register(dev_priv, + VLV_PSRSTAT(pipe), + VLV_EDP_PSR_IN_TRANS, + 0, + 1)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); return; } @@ -636,7 +647,7 @@ unlock: static void intel_psr_exit(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dp *intel_dp = dev_priv->psr.enabled; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -691,7 +702,7 @@ static void intel_psr_exit(struct drm_device *dev) void intel_psr_single_frame_update(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; u32 val; @@ -739,7 +750,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, void intel_psr_invalidate(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -777,7 +788,7 @@ void intel_psr_invalidate(struct drm_device *dev, void intel_psr_flush(struct drm_device *dev, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -813,7 +824,7 @@ void intel_psr_flush(struct drm_device *dev, */ void intel_psr_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 68c5af079ef8..cca7792f26d5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -34,6 +34,11 @@ #include "i915_trace.h" #include "intel_drv.h" +/* Rough estimate of the typical request size, performing a flush, + * set-context and then emitting the batch. + */ +#define LEGACY_REQUEST_SIZE 200 + int __intel_ring_space(int head, int tail, int size) { int space = head - tail; @@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) ringbuf->tail, ringbuf->size); } -bool intel_engine_stopped(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->dev->dev_private; - return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); -} - static void __intel_ring_advance(struct intel_engine_cs *engine) { struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; - if (intel_engine_stopped(engine)) - return; engine->write_tail(engine, ringbuf->tail); } @@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req->i915) || IS_GEN5(req->i915))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(dev_priv)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (IS_GEN(dev_priv, 6, 7)) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH)); - if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, + reg, INSTPM_SYNC_FLUSH, 0, + 1000)) DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", engine->name); } @@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + if (intel_wait_for_register(dev_priv, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, + MODE_IDLE, + 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", engine->name); /* Sometimes we observe that the idle flag is not @@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -641,59 +640,42 @@ out: return ret; } -void -intel_fini_pipe_control(struct intel_engine_cs *engine) +void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { - kunmap(sg_page(engine->scratch.obj->pages->sgl)); - i915_gem_object_ggtt_unpin(engine->scratch.obj); - } - + i915_gem_object_ggtt_unpin(engine->scratch.obj); drm_gem_object_unreference(&engine->scratch.obj->base); engine->scratch.obj = NULL; } -int -intel_init_pipe_control(struct intel_engine_cs *engine) +int intel_init_pipe_control(struct intel_engine_cs *engine, int size) { + struct drm_i915_gem_object *obj; int ret; WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); - if (engine->scratch.obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; + obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + if (!obj) + obj = i915_gem_object_create(&engine->i915->drm, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + ret = PTR_ERR(obj); goto err; } - ret = i915_gem_object_set_cache_level(engine->scratch.obj, - I915_CACHE_LLC); - if (ret) - goto err_unref; - - ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); + ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH); if (ret) goto err_unref; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); - engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); - if (engine->scratch.cpu_page == NULL) { - ret = -ENOMEM; - goto err_unpin; - } - + engine->scratch.obj = obj; + engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", engine->name, engine->scratch.gtt_offset); return 0; -err_unpin: - i915_gem_object_ggtt_unpin(engine->scratch.obj); err_unref: drm_gem_object_unreference(&engine->scratch.obj->base); err: @@ -702,11 +684,9 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; + int ret, i; if (w->count == 0) return 0; @@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -911,8 +888,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ @@ -937,14 +913,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -970,8 +946,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); @@ -1035,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1077,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1090,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1104,30 +1078,30 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1135,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* WaDisableGafsUnitClkGating:skl */ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + /* WaInPlaceDecompressionHang:skl */ + if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1145,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1155,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1168,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + /* WaDisablePooledEuLoadBalancingFix:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { + WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1179,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1189,17 +1173,27 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) return ret; } + /* WaProgramL3SqcReg1DefaultForPerf:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) + I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + /* WaInsertDummyPushConstPs:bxt */ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + /* WaInPlaceDecompressionHang:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + return 0; } static int kbl_init_workarounds(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen9_init_workarounds(engine); @@ -1241,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaInPlaceDecompressionHang:kbl */ + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + /* WaDisableLSQCROPERFforOCL:kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1251,24 +1249,23 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(dev_priv)) return bxt_init_workarounds(engine); if (IS_KABYLAKE(dev_priv)) @@ -1279,14 +1276,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine) static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (IS_GEN(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1295,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(dev_priv)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1320,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (INTEL_INFO(dev_priv)->gen >= 6) + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1348,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1363,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_FLUSH_ENABLE); + PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, 0); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); @@ -1390,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1405,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_ring_emit(signaller, lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); @@ -1429,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1448,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; if (i915_mmio_reg_valid(mbox_reg)) { - u32 seqno = i915_gem_request_get_seqno(signaller_req); - intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit_reg(signaller, mbox_reg); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); } } @@ -1488,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(engine, req->seqno); + intel_ring_emit(engine, MI_USER_INTERRUPT); + __intel_ring_advance(engine); + + return 0; +} + +static int +gen8_render_add_request(struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *engine = req->engine; + int ret; + + if (engine->semaphore.signal) + ret = engine->semaphore.signal(req, 8); + else + ret = intel_ring_begin(req, 8); + if (ret) + return ret; + + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); + intel_ring_emit(engine, 0); intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + /* We're thrashing one dword of HWS. */ + intel_ring_emit(engine, 0); intel_ring_emit(engine, MI_USER_INTERRUPT); + intel_ring_emit(engine, MI_NOOP); __intel_ring_advance(engine); return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->last_seqno < seqno; } @@ -1516,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = waiter_req->i915; + u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); + struct i915_hw_ppgtt *ppgtt; int ret; ret = intel_ring_begin(waiter_req, 4); @@ -1525,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD); intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, - lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); - intel_ring_emit(waiter, - upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_emit(waiter, lower_32_bits(offset)); + intel_ring_emit(waiter, upper_32_bits(offset)); intel_ring_advance(waiter); + + /* When the !RCS engines idle waiting upon a semaphore, they lose their + * pagetables and we must reload them before executing the batch. + * We do this on the i915_switch_context() following the wait and + * before the dispatch. + */ + ppgtt = waiter_req->ctx->ppgtt; + if (ppgtt && waiter_req->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); return 0; } @@ -1561,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return ret; /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); @@ -1577,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return 0; } -#define PIPE_CONTROL_FLUSH(ring__, addr__) \ -do { \ - intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ - PIPE_CONTROL_DEPTH_STALL); \ - intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ - intel_ring_emit(ring__, 0); \ - intel_ring_emit(ring__, 0); \ -} while (0) - -static int -pc_render_add_request(struct drm_i915_gem_request *req) +static void +gen5_seqno_barrier(struct intel_engine_cs *ring) { - struct intel_engine_cs *engine = req->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; - int ret; - - /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently - * incoherent with writes to memory, i.e. completely fubar, - * so we need to use PIPE_NOTIFY instead. + /* MI_STORE are internally buffered by the GPU and not flushed + * either by MI_FLUSH or SyncFlush or any other combination of + * MI commands. * - * However, we also need to workaround the qword write - * incoherence by flushing the 6 PIPE_NOTIFY buffers out to - * memory before requesting an interrupt. + * "Only the submission of the store operation is guaranteed. + * The write result will be complete (coherent) some time later + * (this is practically a finite period but there is no guaranteed + * latency)." + * + * Empirically, we observe that we need a delay of at least 75us to + * be sure that the seqno write is visible by the CPU. */ - ret = intel_ring_begin(req, 32); - if (ret) - return ret; - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - __intel_ring_advance(engine); - - return 0; + usleep_range(125, 250); } static void gen6_seqno_barrier(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1664,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) spin_unlock_irq(&dev_priv->uncore.lock); } -static u32 -ring_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - static void -ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); -} - -static u32 -pc_render_get_seqno(struct intel_engine_cs *engine) +gen5_irq_enable(struct intel_engine_cs *engine) { - return engine->scratch.cpu_page[0]; + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - engine->scratch.cpu_page[0] = seqno; -} - -static bool -gen5_ring_get_irq(struct intel_engine_cs *engine) +gen5_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -gen5_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); -} - -static bool -i9xx_ring_get_irq(struct intel_engine_cs *engine) -{ - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -i9xx_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); } -static bool -i8xx_ring_get_irq(struct intel_engine_cs *engine) +static void +i8xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(RING_IMR(engine->mmio_base)); } static void -i8xx_ring_put_irq(struct intel_engine_cs *engine) +i8xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); } static int @@ -1824,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, req->seqno); intel_ring_emit(engine, MI_USER_INTERRUPT); __intel_ring_advance(engine); return 0; } -static bool -gen6_ring_get_irq(struct intel_engine_cs *engine) +static void +gen6_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); - else - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); } static void -gen6_ring_put_irq(struct intel_engine_cs *engine) +gen6_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); - else - I915_WRITE_IMR(engine, ~0); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); } -static bool -hsw_vebox_get_irq(struct intel_engine_cs *engine) +static void +hsw_vebox_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); } static void -hsw_vebox_put_irq(struct intel_engine_cs *engine) +hsw_vebox_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~0); - gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~0); + gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); } -static bool -gen8_ring_get_irq(struct intel_engine_cs *engine) +static void +gen8_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); - } else { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -gen8_ring_put_irq(struct intel_engine_cs *engine) +gen8_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); - } else { - I915_WRITE_IMR(engine, ~0); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int @@ -2066,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) return; - drm_pci_free(engine->dev, dev_priv->status_page_dmah); + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2097,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_alloc_object(engine->dev, 4096); - if (obj == NULL) { + obj = i915_gem_object_create(&engine->i915->drm, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); @@ -2108,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine->i915)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2142,11 +1974,11 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2159,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { + GEM_BUG_ON(ringbuf->vma == NULL); + GEM_BUG_ON(ringbuf->virtual_start == NULL); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) i915_gem_object_unpin_map(ringbuf->obj); else - iounmap(ringbuf->virtual_start); + i915_vma_unpin_iomap(ringbuf->vma); ringbuf->virtual_start = NULL; - ringbuf->vma = NULL; + i915_gem_object_ggtt_unpin(ringbuf->obj); + ringbuf->vma = NULL; } -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ unsigned flags = PIN_OFFSET_BIAS | 4096; @@ -2206,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(dev_priv); - addr = ioremap_wc(ggtt->mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (addr == NULL) { - ret = -ENOMEM; + addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); goto err_unpin; } } @@ -2238,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, if (!HAS_LLC(dev)) obj = i915_gem_object_create_stolen(dev, ringbuf->size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, ringbuf->size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, ringbuf->size); + if (IS_ERR(obj)) + return PTR_ERR(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; @@ -2272,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine->i915) || IS_845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->dev, ring); + ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2298,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) kfree(ring); } +static int intel_ring_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + int ret; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (ce->pin_count++) + return 0; + + if (ce->state) { + ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); + if (ret) + goto error; + } + + /* The kernel context is only used as a placeholder for flushing the + * active context. It is never used for submitting user rendering and + * as such never requires the golden render context, and so we can skip + * emitting it when we switch to the kernel context. This is required + * as during eviction we cannot allocate and pin the renderstate in + * order to initialise the context. + */ + if (ctx == ctx->i915->kernel_context) + ce->initialised = true; + + i915_gem_context_reference(ctx); + return 0; + +error: + ce->pin_count = 0; + return ret; +} + +static void intel_ring_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (--ce->pin_count) + return; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + + i915_gem_context_unreference(ctx); +} + static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf; int ret; WARN_ON(engine->buffer); - engine->dev = dev; + engine->i915 = dev_priv; INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->execlist_queue); @@ -2315,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - init_waitqueue_head(&engine->irq_queue); + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + goto error; + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = intel_ring_context_pin(dev_priv->kernel_context, engine); + if (ret) + goto error; ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); if (IS_ERR(ringbuf)) { @@ -2324,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, } engine->buffer = ringbuf; - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); if (ret) goto error; @@ -2335,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", engine->name, ret); @@ -2361,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = engine->i915; if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2375,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2384,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - engine->dev = NULL; + intel_engine_fini_breadcrumbs(engine); + + intel_ring_context_unpin(dev_priv->kernel_context, engine); + + engine->i915 = NULL; } int intel_engine_idle(struct intel_engine_cs *engine) @@ -2407,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->engine->buffer; - return 0; -} + int ret; -int intel_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} + request->reserved_space += LEGACY_REQUEST_SIZE; -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) -{ - GEM_BUG_ON(ringbuf->reserved_size); - ringbuf->reserved_size = size; -} - -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + request->ringbuf = request->engine->buffer; -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + ret = intel_ring_begin(request, 0); + if (ret) + return ret; -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(ringbuf->reserved_size); + request->reserved_space -= LEGACY_REQUEST_SIZE; + return 0; } static int wait_for_space(struct drm_i915_gem_request *req, int bytes) @@ -2468,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * * See also i915_gem_request_alloc() and i915_add_request(). */ - GEM_BUG_ON(!ringbuf->reserved_size); + GEM_BUG_ON(!req->reserved_space); list_for_each_entry(target, &engine->request_list, list) { unsigned space; @@ -2503,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int total_bytes, wait_bytes; bool need_wrap = false; - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + req->reserved_space; if (unlikely(bytes > remain_usable)) { /* @@ -2519,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) * and only need to effectively wait for the reserved * size space from the start of ringbuffer. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + req->reserved_space; } else { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -2576,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; /* Our semaphore implementation is strictly monotonic (i.e. we proceed * so long as the semaphore value in the register/page is greater @@ -2586,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) * the semaphore value, then when the seqno moves backwards all * future waits will complete instantly (causing rendering corruption). */ - if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); if (HAS_VEBOX(dev_priv)) @@ -2603,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - engine->set_seqno(engine, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); engine->last_submitted_seqno = seqno; engine->hangcheck.seqno = seqno; + + /* After manually advancing the seqno, fake the interrupt in case + * there are any waiters for that seqno. + */ + rcu_read_lock(); + intel_engine_wakeup(engine); + rcu_read_unlock(); } static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; + + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Every tail move must follow the sequence below */ /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ - I915_WRITE64(GEN6_BSD_RNCID, 0x0); + I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ - if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & - GEN6_BSD_SLEEP_INDICATOR) == 0, - 50)) + if (intel_wait_for_register_fw(dev_priv, + GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_INDICATOR, + 0, + 50)) DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ - I915_WRITE_TAIL(engine, value); - POSTING_READ(RING_TAIL(engine->mmio_base)); + I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); + POSTING_READ_FW(RING_TAIL(engine->mmio_base)); /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, @@ -2654,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2676,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2767,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2776,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2797,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2809,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return 0; } +static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + int ret, i; + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { + obj = i915_gem_object_create(&dev_priv->drm, 4096); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + dev_priv->semaphore_obj = obj; + } + } + } + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8) { + u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); + + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + + for (i = 0; i < I915_NUM_ENGINES; i++) { + u64 ring_offset; + + if (i != engine->id) + ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); + else + ring_offset = MI_SEMAPHORE_SYNC_INVALID; + + engine->semaphore.signal_ggtt[i] = ring_offset; + } + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between RCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between VCS2 and RCS later. + */ + for (i = 0; i < I915_NUM_ENGINES; i++) { + static const struct { + u32 wait_mbox; + i915_reg_t mbox_reg; + } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { + [RCS] = { + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, + }, + [VCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, + }, + [BCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, + }, + [VECS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, + }, + }; + u32 wait_mbox; + i915_reg_t mbox_reg; + + if (i == engine->id || i == VCS2) { + wait_mbox = MI_SEMAPHORE_SYNC_INVALID; + mbox_reg = GEN6_NOSYNC; + } else { + wait_mbox = sem_data[engine->id][i].wait_mbox; + mbox_reg = sem_data[engine->id][i].mbox_reg; + } + + engine->semaphore.mbox.wait[i] = wait_mbox; + engine->semaphore.mbox.signal[i] = mbox_reg; + } + } +} + +static void intel_ring_init_irq(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + if (INTEL_GEN(dev_priv) >= 8) { + engine->irq_enable = gen8_irq_enable; + engine->irq_disable = gen8_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->irq_enable = gen6_irq_enable; + engine->irq_disable = gen6_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 5) { + engine->irq_enable = gen5_irq_enable; + engine->irq_disable = gen5_irq_disable; + engine->irq_seqno_barrier = gen5_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 3) { + engine->irq_enable = i9xx_irq_enable; + engine->irq_disable = i9xx_irq_disable; + } else { + engine->irq_enable = i8xx_irq_enable; + engine->irq_disable = i8xx_irq_disable; + } +} + +static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + engine->init_hw = init_ring_common; + engine->write_tail = ring_write_tail; + + engine->add_request = i9xx_add_request; + if (INTEL_GEN(dev_priv) >= 6) + engine->add_request = gen6_add_request; + + if (INTEL_GEN(dev_priv) >= 8) + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 6) + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 4) + engine->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) + engine->dispatch_execbuffer = i830_dispatch_execbuffer; + else + engine->dispatch_execbuffer = i915_dispatch_execbuffer; + + intel_ring_init_irq(dev_priv, engine); + intel_ring_init_semaphores(dev_priv, engine); +} + int intel_init_render_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[RCS]; - struct drm_i915_gem_object *obj; int ret; engine->name = "render ring"; @@ -2822,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 8) { - if (i915_semaphore_is_enabled(dev)) { - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else - dev_priv->semaphore_obj = obj; - } - } + intel_ring_default_vfuncs(dev_priv, engine); + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + if (HAS_L3_DPF(dev_priv)) + engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + + if (INTEL_GEN(dev_priv) >= 8) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; + engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { - WARN_ON(!dev_priv->semaphore_obj); - engine->semaphore.sync_to = gen8_ring_sync; + if (i915_semaphore_is_enabled(dev_priv)) engine->semaphore.signal = gen8_rcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between RCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between VCS2 and RCS later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } else if (IS_GEN5(dev)) { - engine->add_request = pc_render_add_request; + } else if (IS_GEN5(dev_priv)) { engine->flush = gen4_render_ring_flush; - engine->get_seqno = pc_render_get_seqno; - engine->set_seqno = pc_render_set_seqno; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | - GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { - engine->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { - engine->irq_get = i8xx_ring_get_irq; - engine->irq_put = i8xx_ring_put_irq; - } else { - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } engine->irq_enable_mask = I915_USER_INTERRUPT; } - engine->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev)) - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 4) - engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) - engine->dispatch_execbuffer = i830_dispatch_execbuffer; - else - engine->dispatch_execbuffer = i915_dispatch_execbuffer; + engine->init_hw = init_render_ring; engine->cleanup = render_ring_cleanup; - /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev)) { - obj = i915_gem_alloc_object(dev, I830_WA_SIZE); - if (obj == NULL) { - DRM_ERROR("Failed to allocate batch bo\n"); - return -ENOMEM; - } - - ret = i915_gem_obj_ggtt_pin(obj, 0, 0); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to ping batch bo\n"); - return ret; - } - - engine->scratch.obj = obj; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); - } - ret = intel_init_ring_buffer(dev, engine); if (ret) return ret; - if (INTEL_INFO(dev)->gen >= 5) { - ret = intel_init_pipe_control(engine); + if (INTEL_GEN(dev_priv) >= 6) { + ret = intel_init_pipe_control(engine, 4096); + if (ret) + return ret; + } else if (HAS_BROKEN_CS_TLB(dev_priv)) { + ret = intel_init_pipe_control(engine, I830_WA_SIZE); if (ret) return ret; } @@ -2965,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS]; engine->name = "bsd ring"; @@ -2973,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->exec_id = I915_EXEC_BSD; engine->hw_id = 1; - engine->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) { + intel_ring_default_vfuncs(dev_priv, engine); + + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = - gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } } else { engine->mmio_base = BSD_RING_BASE; engine->flush = bsd_ring_flush; - engine->add_request = i9xx_add_request; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - } else { + else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } - engine->dispatch_execbuffer = i965_dispatch_execbuffer; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } @@ -3044,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) */ int intel_init_bsd2_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; engine->name = "bsd2 ring"; engine->id = VCS2; engine->exec_id = I915_EXEC_BSD; engine->hw_id = 4; - - engine->write_tail = ring_write_tail; engine->mmio_base = GEN8_BSD2_RING_BASE; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_blt_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[BCS]; engine->name = "blitter ring"; engine->id = BCS; engine->exec_id = I915_EXEC_BLT; engine->hw_id = 2; - engine->mmio_base = BLT_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.signal = gen6_signal; - engine->semaphore.sync_to = gen6_ring_sync; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between BCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between BCS and VCS2 later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_vebox_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VECS]; engine->name = "video enhancement ring"; engine->id = VECS; engine->exec_id = I915_EXEC_VEBOX; engine->hw_id = 3; - engine->mmio_base = VEBOX_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } } else { engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - engine->irq_get = hsw_vebox_get_irq; - engine->irq_put = hsw_vebox_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } + engine->irq_enable = hsw_vebox_irq_enable; + engine->irq_disable = hsw_vebox_irq_disable; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ff126485d398..12cb7ed90014 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -62,18 +62,6 @@ struct intel_hw_status_page { (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) -#define GEN8_RING_SEMAPHORE_INIT(e) do { \ - if (!dev_priv->semaphore_obj) { \ - break; \ - } \ - (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \ - (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \ - (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \ - (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \ - (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \ - (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \ - } while(0) - enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, @@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; + unsigned long user_interrupts; u32 seqno; - unsigned user_interrupts; int score; enum intel_ring_hangcheck_action action; int deadlock; @@ -107,7 +95,6 @@ struct intel_ringbuffer { int space; int size; int effective_size; - int reserved_size; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU @@ -120,7 +107,7 @@ struct intel_ringbuffer { u32 last_retired_head; }; -struct intel_context; +struct i915_gem_context; struct drm_i915_reg_table; /* @@ -142,7 +129,10 @@ struct i915_ctx_workarounds { struct drm_i915_gem_object *obj; }; -struct intel_engine_cs { +struct drm_i915_gem_request; + +struct intel_engine_cs { + struct drm_i915_private *i915; const char *name; enum intel_engine_id { RCS = 0, @@ -157,10 +147,42 @@ struct intel_engine_cs { unsigned int hw_id; unsigned int guc_id; /* XXX same as hw_id? */ u32 mmio_base; - struct drm_device *dev; struct intel_ringbuffer *buffer; struct list_head buffers; + /* Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ + struct intel_breadcrumbs { + struct task_struct *irq_seqno_bh; /* bh for user interrupts */ + unsigned long irq_wakeups; + bool irq_posted; + + spinlock_t lock; /* protects the lists of requests */ + struct rb_root waiters; /* sorted by retirement, priority */ + struct rb_root signals; /* sorted by retirement */ + struct intel_wait *first_wait; /* oldest waiter by retirement */ + struct task_struct *signaler; /* used for fence signalling */ + struct drm_i915_gem_request *first_signal; + struct timer_list fake_irq; /* used after a missed interrupt */ + + bool irq_enabled : 1; + bool rpm_wakelock : 1; + } breadcrumbs; + /* * A pool of objects to use as shadow copies of client batch buffers * when the command parser is enabled. Prevents the client from @@ -171,11 +193,10 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; - unsigned irq_refcount; /* protected by dev_priv->irq_lock */ - u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - struct drm_i915_gem_request *trace_irq_req; - bool __must_check (*irq_get)(struct intel_engine_cs *ring); - void (*irq_put)(struct intel_engine_cs *ring); + u32 irq_keep_mask; /* always keep these interrupts */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + void (*irq_enable)(struct intel_engine_cs *ring); + void (*irq_disable)(struct intel_engine_cs *ring); int (*init_hw)(struct intel_engine_cs *ring); @@ -194,9 +215,6 @@ struct intel_engine_cs { * monotonic, even if not coherent. */ void (*irq_seqno_barrier)(struct intel_engine_cs *ring); - u32 (*get_seqno)(struct intel_engine_cs *ring); - void (*set_seqno)(struct intel_engine_cs *ring, - u32 seqno); int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags); @@ -268,13 +286,11 @@ struct intel_engine_cs { struct tasklet_struct irq_tasklet; spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ struct list_head execlist_queue; - struct list_head execlist_retired_req_list; unsigned int fw_domains; unsigned int next_context_status_buffer; unsigned int idle_lite_restore_wa; bool disable_lite_restore_wa; u32 ctx_desc_template; - u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_flush)(struct drm_i915_gem_request *request, u32 invalidate_domains, @@ -306,20 +322,16 @@ struct intel_engine_cs { * inspecting request list. */ u32 last_submitted_seqno; - unsigned user_interrupts; bool gpu_caches_dirty; - wait_queue_head_t irq_queue; - - struct intel_context *last_context; + struct i915_gem_context *last_context; struct intel_ring_hangcheck hangcheck; struct { struct drm_i915_gem_object *obj; u32 gtt_offset; - volatile u32 *cpu_page; } scratch; bool needs_cmd_parser; @@ -350,13 +362,13 @@ struct intel_engine_cs { }; static inline bool -intel_engine_initialized(struct intel_engine_cs *engine) +intel_engine_initialized(const struct intel_engine_cs *engine) { - return engine->dev != NULL; + return engine->i915 != NULL; } static inline unsigned -intel_engine_flag(struct intel_engine_cs *engine) +intel_engine_flag(const struct intel_engine_cs *engine) { return 1 << engine->id; } @@ -427,7 +439,7 @@ intel_write_status_page(struct intel_engine_cs *engine, struct intel_ringbuffer * intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_ringbuffer_free(struct intel_ringbuffer *ring); @@ -458,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine) } int __intel_ring_space(int head, int tail, int size); void intel_ring_update_space(struct intel_ringbuffer *ringbuf); -bool intel_engine_stopped(struct intel_engine_cs *engine); int __must_check intel_engine_idle(struct intel_engine_cs *engine); void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); +int intel_init_pipe_control(struct intel_engine_cs *engine, int size); void intel_fini_pipe_control(struct intel_engine_cs *engine); -int intel_init_pipe_control(struct intel_engine_cs *engine); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); @@ -475,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev); u64 intel_ring_get_active_head(struct intel_engine_cs *engine); +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); +} int init_workarounds_ring(struct intel_engine_cs *engine); @@ -486,26 +501,73 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) /* * Arbitrary size for largest possible 'add request' sequence. The code paths * are complex and variable. Empirical measurement shows that the worst case - * is ILK at 136 words. Reserving too much is better than reserving too little - * as that allows for corner cases that might have been missed. So the figure - * has been rounded up to 160 words. + * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, + * we need to allocate double the largest single packet within that emission + * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). */ -#define MIN_SPACE_FOR_ADD_REQUEST 160 +#define MIN_SPACE_FOR_ADD_REQUEST 336 -/* - * Reserve space in the ring to guarantee that the i915_add_request() call - * will always have sufficient room to do its stuff. The request creation - * code calls this automatically. - */ -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); -/* Cancel the reservation, e.g. because the request is being discarded. */ -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); -/* Use the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); -/* Finish with the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); - -/* Legacy ringbuffer specific portion of reservation code: */ -int intel_ring_reserve_space(struct drm_i915_gem_request *request); +static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) +{ + return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; +} + +/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ +struct intel_wait { + struct rb_node node; + struct task_struct *tsk; + u32 seqno; +}; + +struct intel_signal_node { + struct rb_node node; + struct intel_wait wait; +}; + +int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); + +static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) +{ + wait->tsk = current; + wait->seqno = seqno; +} + +static inline bool intel_wait_complete(const struct intel_wait *wait) +{ + return RB_EMPTY_NODE(&wait->node); +} + +bool intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait); +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct intel_wait *wait); +void intel_engine_enable_signaling(struct drm_i915_gem_request *request); + +static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) +{ + return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); +} + +static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) +{ + bool wakeup = false; + struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + /* Note that for this not to dangerously chase a dangling pointer, + * the caller is responsible for ensure that the task remain valid for + * wake_up_process() i.e. that the RCU grace period cannot expire. + * + * Also note that tsk is likely to be in !TASK_RUNNING state so an + * early test for tsk->state != TASK_RUNNING before wake_up_process() + * is unlikely to be beneficial. + */ + if (tsk) + wakeup = wake_up_process(tsk); + return wakeup; +} + +void intel_engine_enable_fake_irq(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); +unsigned int intel_kick_waiters(struct drm_i915_private *i915); +unsigned int intel_kick_signalers(struct drm_i915_private *i915); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7fb1da4e7fc3..1c603bbe5784 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -65,6 +65,9 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, int power_well_id); +static struct i915_power_well * +lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); + const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { @@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv, power_well->ops->disable(dev_priv, power_well); } +static void intel_power_well_get(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!power_well->count++) + intel_power_well_enable(dev_priv, power_well); +} + +static void intel_power_well_put(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN(!power_well->count, "Use count on power well %s is already zero", + power_well->name); + + if (!--power_well->count) + intel_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -267,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv, */ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -298,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -345,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, if (!is_enabled) { DRM_DEBUG_KMS("Enabling power well\n"); - if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & - HSW_PWR_WELL_STATE_ENABLED), 20)) + if (intel_wait_for_register(dev_priv, + HSW_PWR_WELL_DRIVER, + HSW_PWR_WELL_STATE_ENABLED, + HSW_PWR_WELL_STATE_ENABLED, + 20)) DRM_ERROR("Timeout enabling power well\n"); hsw_power_well_post_enable(dev_priv); } @@ -419,6 +442,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_MODESET) | \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_AUX_B) | \ + BIT(POWER_DOMAIN_AUX_C) | \ + BIT(POWER_DOMAIN_INIT)) static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { @@ -548,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC9\n"); + intel_power_sequencer_reset(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); } @@ -669,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, switch (power_well->data) { case SKL_DISP_PW_1: - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG0_DIST_STATUS), 1)) { + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG0_DIST_STATUS, + SKL_FUSE_PG0_DIST_STATUS, + 1)) { DRM_ERROR("PG0 not enabled\n"); return; } @@ -731,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, if (check_fuse_status) { if (power_well->data == SKL_DISP_PW_1) { - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG1_DIST_STATUS), 1)) + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG1_DIST_STATUS, + SKL_FUSE_PG1_DIST_STATUS, + 1)) DRM_ERROR("PG1 distributing status timeout\n"); } else if (power_well->data == SKL_DISP_PW_2) { - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG2_DIST_STATUS), 1)) + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG2_DIST_STATUS, + SKL_FUSE_PG2_DIST_STATUS, + 1)) DRM_ERROR("PG2 distributing status timeout\n"); } } @@ -800,21 +843,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv, skl_set_power_well(dev_priv, power_well, false); } +static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) +{ + enum skl_disp_power_wells power_well_id = power_well->data; + + return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; +} + +static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum skl_disp_power_wells power_well_id = power_well->data; + struct i915_power_well *cmn_a_well; + + if (power_well_id == BXT_DPIO_CMN_BC) { + /* + * We need to copy the GRC calibration value from the eDP PHY, + * so make sure it's powered up. + */ + cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + intel_power_well_get(dev_priv, cmn_a_well); + } + + bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); + + if (power_well_id == BXT_DPIO_CMN_BC) + intel_power_well_put(dev_priv, cmn_a_well); +} + +static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); +} + +static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return bxt_ddi_phy_is_enabled(dev_priv, + bxt_power_well_to_phy(power_well)); +} + +static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (power_well->count > 0) + bxt_dpio_cmn_power_well_enable(dev_priv, power_well); + else + bxt_dpio_cmn_power_well_disable(dev_priv, power_well); +} + + +static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *power_well; + + power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, + bxt_power_well_to_phy(power_well)); + + power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, + bxt_power_well_to_phy(power_well)); +} + static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; } +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u32 tmp = I915_READ(DBUF_CTL); + + WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != + (DBUF_POWER_STATE | DBUF_POWER_REQUEST), + "Unexpected DBuf power power state (0x%08x)\n", tmp); +} + static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - if (IS_BROXTON(dev_priv)) { - broxton_cdclk_verify_state(dev_priv); - broxton_ddi_phy_verify_state(dev_priv); - } + WARN_ON(dev_priv->cdclk_freq != + dev_priv->display.get_display_clock_speed(&dev_priv->drm)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_BROXTON(dev_priv)) + bxt_verify_ddi_phy_power_wells(dev_priv); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, @@ -948,10 +1069,16 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); I915_WRITE(CBR1_VLV, 0); + + WARN_ON(dev_priv->rawclk_freq == 0); + + I915_WRITE(RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) { + struct intel_encoder *encoder; enum pipe pipe; /* @@ -962,7 +1089,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * * CHV DPLL B/C have some issues if VGA mode is enabled. */ - for_each_pipe(dev_priv->dev, pipe) { + for_each_pipe(&dev_priv->drm, pipe) { u32 val = I915_READ(DPLL(pipe)); val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -987,7 +1114,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); - i915_redisable_vga_power_on(dev_priv->dev); + /* Re-enable the ADPA, if we have one */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_ANALOG) + intel_crt_reset(&encoder->base); + } + + i915_redisable_vga_power_on(&dev_priv->drm); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) @@ -997,9 +1130,11 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); + + intel_power_sequencer_reset(dev_priv); - vlv_power_sequencer_reset(dev_priv); + intel_hpd_poll_init(dev_priv); } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, @@ -1092,7 +1227,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) u32 phy_control = dev_priv->chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; - u32 tmp; /* * The BIOS can leave the PHY is some weird state @@ -1180,10 +1314,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) - WARN(phy_status != tmp, - "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - tmp, phy_status, dev_priv->chv_phy_control); + if (intel_wait_for_register(dev_priv, + DISPLAY_PHY_STATUS, + phy_status_mask, + phy_status, + 10)) + DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); } #undef BITS_SET @@ -1211,7 +1349,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) + if (intel_wait_for_register(dev_priv, + DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), + PHY_POWERGOOD(phy), + 1)) DRM_ERROR("Display PHY %d is not power up\n", phy); mutex_lock(&dev_priv->sb_lock); @@ -1501,10 +1643,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, struct i915_power_well *power_well; int i; - for_each_power_well(i, power_well, BIT(domain), power_domains) { - if (!power_well->count++) - intel_power_well_enable(dev_priv, power_well); - } + for_each_power_well(i, power_well, BIT(domain), power_domains) + intel_power_well_get(dev_priv, power_well); power_domains->domain_use_count[domain]++; } @@ -1598,14 +1738,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_display_power_domain_str(domain)); power_domains->domain_use_count[domain]--; - for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { - WARN(!power_well->count, - "Use count on power well %s is already zero", - power_well->name); - - if (!--power_well->count) - intel_power_well_disable(dev_priv, power_well); - } + for_each_power_well_rev(i, power_well, BIT(domain), power_domains) + intel_power_well_put(dev_priv, power_well); mutex_unlock(&power_domains->lock); @@ -1776,6 +1910,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { .is_enabled = gen9_dc_off_power_well_enabled, }; +static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { + .sync_hw = bxt_dpio_cmn_power_well_sync_hw, + .enable = bxt_dpio_cmn_power_well_enable, + .disable = bxt_dpio_cmn_power_well_disable, + .is_enabled = bxt_dpio_cmn_power_well_enabled, +}; + static struct i915_power_well hsw_power_wells[] = { { .name = "always-on", @@ -2012,6 +2153,18 @@ static struct i915_power_well bxt_power_wells[] = { .ops = &skl_power_well_ops, .data = SKL_DISP_PW_2, }, + { + .name = "dpio-common-a", + .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .data = BXT_DPIO_CMN_A, + }, + { + .name = "dpio-common-bc", + .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .data = BXT_DPIO_CMN_BC, + }, }; static int @@ -2131,7 +2284,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_fini(struct drm_i915_private *dev_priv) { - struct device *device = &dev_priv->dev->pdev->dev; + struct device *device = &dev_priv->drm.pdev->dev; /* * The i915.ko module is still not prepared to be loaded when @@ -2171,6 +2324,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) + DRM_ERROR("DBuf power enable timeout\n"); +} + +static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) + DRM_ERROR("DBuf power disable timeout!\n"); +} + static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { @@ -2195,12 +2370,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - if (!resume) - return; - skl_init_cdclk(dev_priv); - if (dev_priv->csr.dmc_payload) + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); } @@ -2211,6 +2385,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_dbuf_disable(dev_priv); + skl_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -2254,11 +2430,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - broxton_init_cdclk(dev_priv); - broxton_ddi_phy_init(dev_priv); + bxt_init_cdclk(dev_priv); - broxton_cdclk_verify_state(dev_priv); - broxton_ddi_phy_verify_state(dev_priv); + gen9_dbuf_enable(dev_priv); if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); @@ -2271,8 +2445,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - broxton_ddi_phy_uninit(dev_priv); - broxton_uninit_cdclk(dev_priv); + gen9_dbuf_disable(dev_priv); + + bxt_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -2403,13 +2578,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) /** * intel_power_domains_init_hw - initialize hardware power domain state * @dev_priv: i915 device instance + * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all * power domains using intel_display_set_init_power(). */ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct i915_power_domains *power_domains = &dev_priv->power_domains; power_domains->initializing = true; @@ -2471,7 +2647,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; pm_runtime_get_sync(device); @@ -2492,7 +2668,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) */ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; if (IS_ENABLED(CONFIG_PM)) { @@ -2534,7 +2710,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; assert_rpm_wakelock_held(dev_priv); @@ -2553,7 +2729,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; assert_rpm_wakelock_held(dev_priv); @@ -2576,7 +2752,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2128fae5687d..e378f35365a2 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 bval = val, cval = val; int i; @@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct drm_display_mode *mode = &crtc->config->base.mode; @@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; u32 tmp; @@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_dtd dtd; int encoder_pixel_multiplier = 0; @@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, static void intel_disable_sdvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder) static void intel_enable_sdvo(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector) static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); return drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, @@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector, { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint16_t temp_value; uint8_t cmd; int ret; @@ -2177,12 +2177,39 @@ done: #undef CHECK_PROPERTY } +static int +intel_sdvo_connector_register(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + return sysfs_create_link(&connector->kdev->kobj, + &sdvo->ddc.dev.kobj, + sdvo->ddc.dev.kobj.name); +} + +static void +intel_sdvo_connector_unregister(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + + sysfs_remove_link(&connector->kdev->kobj, + sdvo->ddc.dev.kobj.name); + intel_connector_unregister(connector); +} + static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_sdvo_connector_register, + .early_unregister = intel_sdvo_connector_unregister, .destroy = intel_sdvo_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -2191,7 +2218,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) @@ -2312,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { @@ -2346,20 +2372,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) return 0x72; } -static void -intel_sdvo_connector_unregister(struct intel_connector *intel_connector) -{ - struct drm_connector *drm_connector; - struct intel_sdvo *sdvo_encoder; - - drm_connector = &intel_connector->base; - sdvo_encoder = intel_attached_sdvo(&intel_connector->base); - - sysfs_remove_link(&drm_connector->kdev->kobj, - sdvo_encoder->ddc.dev.kobj.name); - intel_connector_unregister(intel_connector); -} - static int intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) @@ -2382,27 +2394,10 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; - connector->base.unregister = intel_sdvo_connector_unregister; intel_connector_attach_encoder(&connector->base, &encoder->base); - ret = drm_connector_register(drm_connector); - if (ret < 0) - goto err1; - - ret = sysfs_create_link(&drm_connector->kdev->kobj, - &encoder->ddc.dev.kobj, - encoder->ddc.dev.kobj.name); - if (ret < 0) - goto err2; return 0; - -err2: - drm_connector_unregister(drm_connector); -err1: - drm_connector_cleanup(drm_connector); - - return ret; } static void @@ -2529,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2608,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return true; err: - drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2959,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, bool intel_sdvo_init(struct drm_device *dev, i915_reg_t sdvo_reg, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; @@ -2981,7 +2974,7 @@ bool intel_sdvo_init(struct drm_device *dev, intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, - NULL); + "SDVO %c", port_name(port)); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index c3998188cf35..1a840bf92eea 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { + if (intel_wait_for_register(dev_priv, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 5)) { DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", is_read ? "read" : "write"); return -EAGAIN; @@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, I915_WRITE(VLV_IOSF_DATA, *val); I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); - if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { + if (intel_wait_for_register(dev_priv, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 5)) { DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", is_read ? "read" : "write"); return -ETIMEDOUT; @@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, u32 value = 0; WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, SBI_BUSY, 0, + 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); return 0; } @@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, + SBI_BUSY | SBI_RESPONSE_FAIL, + 0, + 100)) { DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); return 0; } @@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, SBI_BUSY, 0, + 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); return; } @@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, + SBI_BUSY | SBI_RESPONSE_FAIL, + 0, + 100)) { DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); return; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0f3e2303e0e9..7c08e4f29032 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -53,8 +53,8 @@ format_is_yuv(uint32_t format) } } -static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs) +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs) { /* paranoia */ if (!adjusted_mode->crtc_htotal) @@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, */ void intel_pipe_update_start(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; - enum pipe pipe = crtc->pipe; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); @@ -93,7 +91,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) vblank_start = DIV_ROUND_UP(vblank_start, 2); /* FIXME needs to be calibrated sensibly */ - min = vblank_start - usecs_to_scanlines(adjusted_mode, 100); + min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100); max = vblank_start - 1; local_irq_disable(); @@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); - crtc->debug.start_vbl_count = - dev->driver->get_vblank_counter(dev, pipe); + crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_i915_pipe_update_vblank_evaded(crtc); } @@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc) * re-enables interrupts and verifies the update was actually completed * before a vblank using the value of @start_vbl_count. */ -void intel_pipe_update_end(struct intel_crtc *crtc) +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work) { - struct drm_device *dev = crtc->base.dev; enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); - u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); + u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); ktime_t end_vbl_time = ktime_get(); + if (work) { + work->flip_queued_vblank = end_vbl_count; + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); + } + trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); + /* We're still in the vblank-evade critical section, this can't race. + * Would be slightly nice to just grab the vblank count and arm the + * event outside of the critical section - the spinlock might spin for a + * while ... */ + if (crtc->base.state->event) { + WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); + + spin_lock(&crtc->base.dev->event_lock); + drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event); + spin_unlock(&crtc->base.dev->event_lock); + + crtc->base.state->event = NULL; + } + local_irq_enable(); if (crtc->debug.start_vbl_count && @@ -183,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = drm_plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane, uint32_t y = plane_state->src.y1 >> 16; uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; - const struct intel_scaler *scaler = - &crtc_state->scaler_state.scalers[plane_state->scaler_id]; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane, /* program plane scaler */ if (plane_state->scaler_id >= 0) { - uint32_t ps_ctrl = 0; int scaler_id = plane_state->scaler_id; + const struct intel_scaler *scaler; DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, PS_PLANE_SEL(plane)); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; - I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); + + scaler = &crtc_state->scaler_state.scalers[scaler_id]; + + I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), + PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), @@ -286,7 +303,7 @@ static void skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; @@ -300,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) static void chv_update_csc(struct intel_plane *intel_plane, uint32_t format) { - struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); int plane = intel_plane->plane; /* Seems RGB data bypasses the CSC always */ @@ -342,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane, const struct intel_plane_state *plane_state) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -468,7 +485,7 @@ static void vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); int pipe = intel_plane->pipe; int plane = intel_plane->plane; @@ -485,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -607,7 +624,7 @@ static void ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; @@ -626,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -736,7 +753,7 @@ static void ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; @@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) possible_crtcs = (1 << pipe); - ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, - &intel_plane_funcs, - plane_formats, num_plane_formats, - DRM_PLANE_TYPE_OVERLAY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "plane %d%c", plane + 2, pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "sprite %c", sprite_name(pipe, plane)); if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 223129d3c765..49136ad5473e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -826,7 +826,7 @@ static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp = I915_READ(TV_CTL); if (!(tmp & TV_ENC_ENABLE)) @@ -841,7 +841,7 @@ static void intel_enable_tv(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_wait_for_vblank(encoder->base.dev, @@ -854,7 +854,7 @@ static void intel_disable_tv(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); } @@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv, static void intel_tv_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); @@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_crtc *crtc = connector->state->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; int type; @@ -1501,6 +1501,8 @@ out: static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_tv_detect, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, .atomic_get_property = intel_connector_atomic_get_property, @@ -1512,7 +1514,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { void intel_tv_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_connector *connector; struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; @@ -1591,7 +1592,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC, NULL); + DRM_MODE_ENCODER_TVDAC, "TV"); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; @@ -1600,7 +1601,6 @@ intel_tv_init(struct drm_device *dev) intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; @@ -1642,5 +1642,4 @@ intel_tv_init(struct drm_device *dev) drm_object_attach_property(&connector->base, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); - drm_connector_register(connector); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f1dfe616856..ff80a81b1a84 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_NORESTART; } -void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore) { - struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; int retry_count = 100; @@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) if (fw) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } @@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) return false; } -static void __intel_uncore_early_sanitize(struct drm_device *dev, +static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { __raw_i915_write32(dev_priv, GTFIFOCTL, __raw_i915_read32(dev_priv, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | GT_FIFO_CTL_RC6_POLICY_STALL); } - intel_uncore_forcewake_reset(dev, restore_forcewake); + intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, + bool restore_forcewake) { - __intel_uncore_early_sanitize(dev, restore_forcewake); - i915_check_and_clear_faults(dev); + __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + i915_check_and_clear_faults(dev_priv); } -void intel_uncore_sanitize(struct drm_device *dev) +void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, @@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, fw_domain_reset(d); } -static void intel_uncore_fw_domains_init(struct drm_device *dev) +static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev_priv)->gen <= 5) return; - if (IS_GEN9(dev)) { + if (IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, @@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_ACK_BLITTER_GEN9); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - if (!IS_CHERRYVIEW(dev)) + if (!IS_CHERRYVIEW(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else @@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev_priv)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1302,11 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - mutex_lock(&dev->struct_mutex); + spin_lock_irq(&dev_priv->uncore.lock); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); + spin_unlock_irq(&dev_priv->uncore.lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); @@ -1314,7 +1311,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = @@ -1327,26 +1324,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) WARN_ON(dev_priv->uncore.fw_domains == 0); } -void intel_uncore_init(struct drm_device *dev) +void intel_uncore_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - i915_check_vgpu(dev); + i915_check_vgpu(dev_priv); intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev); - __intel_uncore_early_sanitize(dev, false); + intel_uncore_fw_domains_init(dev_priv); + __intel_uncore_early_sanitize(dev_priv, false); dev_priv->uncore.unclaimed_mmio_check = 1; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { default: case 9: ASSIGN_WRITE_MMIO_VFUNCS(gen9); ASSIGN_READ_MMIO_VFUNCS(gen9); break; case 8: - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv); @@ -1357,13 +1352,13 @@ void intel_uncore_init(struct drm_device *dev) break; case 7: case 6: - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(hsw); } else { ASSIGN_WRITE_MMIO_VFUNCS(gen6); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv)) { ASSIGN_READ_MMIO_VFUNCS(vlv); } else { ASSIGN_READ_MMIO_VFUNCS(gen6); @@ -1381,24 +1376,24 @@ void intel_uncore_init(struct drm_device *dev) break; } - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(vgpu); ASSIGN_READ_MMIO_VFUNCS(vgpu); } - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); } #undef ASSIGN_WRITE_MMIO_VFUNCS #undef ASSIGN_READ_MMIO_VFUNCS -void intel_uncore_fini(struct drm_device *dev) +void intel_uncore_fini(struct drm_i915_private *dev_priv) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_sanitize(dev_priv); + intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK(h, l) +#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) static const struct register_whitelist { i915_reg_t offset_ldw, offset_udw; @@ -1414,7 +1409,7 @@ static const struct register_whitelist { int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; unsigned size; @@ -1423,7 +1418,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) break; } @@ -1467,83 +1462,47 @@ out: return ret; } -int i915_get_reset_stats_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reset_stats *args = data; - struct i915_ctx_hang_stats *hs; - struct intel_context *ctx; - int ret; - - if (args->flags || args->pad) - return -EINVAL; - - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } - hs = &ctx->hang_stats; - - if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); - else - args->reset_count = 0; - - args->batch_active = hs->batch_active; - args->batch_pending = hs->batch_pending; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static int i915_reset_complete(struct drm_device *dev) +static int i915_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { + struct pci_dev *pdev = dev_priv->drm.pdev; + /* assert reset for at least 20 usec */ - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); udelay(20); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); - return wait_for(i915_reset_complete(dev), 500); + return wait_for(i915_reset_complete(pdev), 500); } -static int g4x_reset_complete(struct drm_device *dev) +static int g4x_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(dev), 500); + struct pci_dev *pdev = dev_priv->drm.pdev; + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1551,9 +1510,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1561,27 +1520,29 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); return 0; } -static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); - ret = wait_for((I915_READ(ILK_GDSR) & - ILK_GRDOM_RESET_ENABLE) == 0, 500); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); if (ret) return ret; I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); - ret = wait_for((I915_READ(ILK_GDSR) & - ILK_GRDOM_RESET_ENABLE) == 0, 500); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); if (ret) return ret; @@ -1594,25 +1555,21 @@ static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, u32 hw_domain_mask) { - int ret; - /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); -#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0) /* Spin waiting for the device to ack the reset requests */ - ret = wait_for(ACKED, 500); -#undef ACKED - - return ret; + return intel_wait_for_register_fw(dev_priv, + GEN6_GDRST, hw_domain_mask, 0, + 500); } /** * gen6_reset_engines - reset individual engines - * @dev: DRM device + * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset * * This function will reset the individual engines that are set in engine_mask. @@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * * Returns 0 on success, nonzero on error. */ -static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen6_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { [RCS] = GEN6_GRDOM_RENDER, @@ -1647,33 +1604,94 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) ret = gen6_hw_domain_reset(dev_priv, hw_mask); - intel_uncore_forcewake_reset(dev, true); + intel_uncore_forcewake_reset(dev_priv, true); return ret; } -static int wait_for_register_fw(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const u32 mask, - const u32 value, - const unsigned long timeout_ms) +/** + * intel_wait_for_register_fw - wait until register matches expected state + * @dev_priv: the i915 device + * @reg: the register to read + * @mask: mask to apply to register value + * @value: expected value + * @timeout_ms: timeout in millisecond + * + * This routine waits until the target register @reg contains the expected + * @value after applying the @mask, i.e. it waits until + * (I915_READ_FW(@reg) & @mask) == @value + * Otherwise, the wait will timeout after @timeout_ms milliseconds. + * + * Note that this routine assumes the caller holds forcewake asserted, it is + * not suitable for very long waits. See intel_wait_for_register() if you + * wish to wait without holding forcewake for the duration (i.e. you expect + * the wait to be slow). + * + * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + */ +int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms) +{ +#define done ((I915_READ_FW(reg) & mask) == value) + int ret = wait_for_us(done, 2); + if (ret) + ret = wait_for(done, timeout_ms); + return ret; +#undef done +} + +/** + * intel_wait_for_register - wait until register matches expected state + * @dev_priv: the i915 device + * @reg: the register to read + * @mask: mask to apply to register value + * @value: expected value + * @timeout_ms: timeout in millisecond + * + * This routine waits until the target register @reg contains the expected + * @value after applying the @mask, i.e. it waits until + * (I915_READ(@reg) & @mask) == @value + * Otherwise, the wait will timeout after @timeout_ms milliseconds. + * + * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + */ +int intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms) { - return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms); + + unsigned fw = + intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + int ret; + + intel_uncore_forcewake_get(dev_priv, fw); + ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); + intel_uncore_forcewake_put(dev_priv, fw); + if (ret) + ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, + timeout_ms); + + return ret; } static int gen8_request_engine_reset(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_i915_private *dev_priv = engine->dev->dev_private; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - ret = wait_for_register_fw(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700); + ret = intel_wait_for_register_fw(dev_priv, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700); if (ret) DRM_ERROR("%s: reset request timeout\n", engine->name); @@ -1682,22 +1700,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen8_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; for_each_engine_masked(engine, dev_priv, engine_mask) if (gen8_request_engine_reset(engine)) goto not_ready; - return gen6_reset_engines(dev, engine_mask); + return gen6_reset_engines(dev_priv, engine_mask); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask) @@ -1706,35 +1724,35 @@ not_ready: return -EIO; } -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, - unsigned engine_mask) +typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { if (!i915.reset) return NULL; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_INFO(dev_priv)->gen >= 8) return gen8_reset_engines; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_INFO(dev_priv)->gen >= 6) return gen6_reset_engines; - else if (IS_GEN5(dev)) + else if (IS_GEN5(dev_priv)) return ironlake_do_reset; - else if (IS_G4X(dev)) + else if (IS_G4X(dev_priv)) return g4x_do_reset; - else if (IS_G33(dev)) + else if (IS_G33(dev_priv)) return g33_do_reset; - else if (INTEL_INFO(dev)->gen >= 3) + else if (INTEL_INFO(dev_priv)->gen >= 3) return i915_do_reset; else return NULL; } -int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = to_i915(dev); - int (*reset)(struct drm_device *, unsigned); + reset_func reset; int ret; - reset = intel_get_gpu_reset(dev); + reset = intel_get_gpu_reset(dev_priv); if (reset == NULL) return -ENODEV; @@ -1742,15 +1760,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = reset(dev, engine_mask); + ret = reset(dev_priv, engine_mask); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } -bool intel_has_gpu_reset(struct drm_device *dev) +bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) { - return intel_get_gpu_reset(dev) != NULL; + return intel_get_gpu_reset(dev_priv) != NULL; } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1758,7 +1776,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv) int ret; unsigned long irqflags; - if (!i915.enable_guc_submission) + if (!HAS_GUC(dev_priv)) return -EINVAL; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -1802,10 +1820,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); break; @@ -1842,10 +1860,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); break; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 44fb0b35eed3..68db9621f1f0 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -447,10 +447,16 @@ struct bdb_lfp_backlight_data_entry { u8 obsolete3; } __packed; +struct bdb_lfp_backlight_control_method { + u8 type:4; + u8 controller:4; +} __packed; + struct bdb_lfp_backlight_data { u8 entry_size; struct bdb_lfp_backlight_data_entry data[16]; u8 level[16]; + struct bdb_lfp_backlight_control_method backlight_control[16]; } __packed; struct aimdb_header { diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index a1844b50546c..f2c9ae822149 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -1,7 +1,6 @@ config DRM_IMX tristate "DRM Support for Freescale i.MX" select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index a24631fdf4ad..359cd2765552 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -28,6 +28,11 @@ struct imx_hdmi { struct regmap *regmap; }; +static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e) +{ + return container_of(e, struct imx_hdmi, encoder); +} + static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = { { 45250000, { @@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder) { } -static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) +static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder) { -} - -static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder) -{ - struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); + struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder); int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, @@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder) mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT); } -static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder) +static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24); + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); + + imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24; + imx_crtc_state->di_hsync_pin = 2; + imx_crtc_state->di_vsync_pin = 3; + + return 0; } static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { - .mode_set = dw_hdmi_imx_encoder_mode_set, - .prepare = dw_hdmi_imx_encoder_prepare, - .commit = dw_hdmi_imx_encoder_commit, + .enable = dw_hdmi_imx_encoder_enable, .disable = dw_hdmi_imx_encoder_disable, + .atomic_check = dw_hdmi_imx_atomic_check, }; static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 82656654fb21..9f7dafce3a4c 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -15,10 +15,14 @@ */ #include <linux/component.h> #include <linux/device.h> +#include <linux/dma-buf.h> #include <linux/fb.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/reservation.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -41,6 +45,7 @@ struct imx_drm_device { struct imx_drm_crtc *crtc[MAX_CRTC]; unsigned int pipes; struct drm_fbdev_cma *fbhelper; + struct drm_atomic_state *state; }; struct imx_drm_crtc { @@ -85,45 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm) return 0; } -static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) -{ - struct imx_drm_device *imxdrm = crtc->dev->dev_private; - unsigned i; - - for (i = 0; i < MAX_CRTC; i++) - if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc) - return imxdrm->crtc[i]; - - return NULL; -} - -int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, - int hsync_pin, int vsync_pin, u32 bus_flags) -{ - struct imx_drm_crtc_helper_funcs *helper; - struct imx_drm_crtc *imx_crtc; - - imx_crtc = imx_drm_find_crtc(encoder->crtc); - if (!imx_crtc) - return -EINVAL; - - helper = &imx_crtc->imx_drm_helper_funcs; - if (helper->set_interface_pix_fmt) - return helper->set_interface_pix_fmt(encoder->crtc, - bus_format, hsync_pin, vsync_pin, - bus_flags); - return 0; -} -EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); - -int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) -{ - return imx_drm_set_bus_config(encoder, bus_format, 2, 3, - DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_NEGEDGE); -} -EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); - int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) { return drm_crtc_vblank_get(imx_drm_crtc->crtc); @@ -208,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm) static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = imx_drm_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane_state *plane_state; + struct drm_gem_cma_object *cma_obj; + struct fence *excl; + unsigned shared_count; + struct fence **shared; + unsigned int i, j; + int ret; + + /* Wait for fences. */ + for_each_crtc_in_state(state, crtc, crtc_state, i) { + plane_state = crtc->primary->state; + if (plane_state->fb) { + cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0); + if (cma_obj->base.dma_buf) { + ret = reservation_object_get_fences_rcu( + cma_obj->base.dma_buf->resv, &excl, + &shared_count, &shared); + if (unlikely(ret)) + DRM_ERROR("failed to get fences " + "for buffer\n"); + + if (excl) { + fence_wait(excl, false); + fence_put(excl); + } + for (j = 0; j < shared_count; i++) { + fence_wait(shared[j], false); + fence_put(shared[j]); + } + } + } + } + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, true); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { + .atomic_commit_tail = imx_drm_atomic_commit_tail, }; /* @@ -249,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &imx_drm_mode_config_funcs; + drm->mode_config.helper_private = &imx_drm_mode_config_helpers; drm_mode_config_init(drm); @@ -279,6 +303,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) } } + drm_mode_config_reset(drm); + /* * All components are now initialised, so setup the fb helper. * The fb helper takes copies of key hardware information, so the @@ -289,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; } - drm_helper_disable_unused_functions(drm); imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, drm->mode_config.num_crtc, MAX_CRTC); if (IS_ERR(imxdrm->fbhelper)) { @@ -403,11 +428,11 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = { }; static struct drm_driver imx_drm_driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | + DRIVER_ATOMIC, .load = imx_drm_driver_load, .unload = imx_drm_driver_unload, .lastclose = imx_drm_driver_lastclose, - .set_busid = drm_platform_set_busid, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, @@ -492,6 +517,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev) static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct imx_drm_device *imxdrm; /* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) @@ -499,17 +525,26 @@ static int imx_drm_suspend(struct device *dev) drm_kms_helper_poll_disable(drm_dev); + imxdrm = drm_dev->dev_private; + imxdrm->state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(imxdrm->state)) { + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(imxdrm->state); + } + return 0; } static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct imx_drm_device *imx_drm; if (drm_dev == NULL) return 0; - drm_helper_resume_force_mode(drm_dev); + imx_drm = drm_dev->dev_private; + drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev); return 0; diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 74320a1723b7..07d33e45f90f 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -15,12 +15,22 @@ struct platform_device; unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc); +struct imx_crtc_state { + struct drm_crtc_state base; + u32 bus_format; + u32 bus_flags; + int di_hsync_pin; + int di_vsync_pin; +}; + +static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s) +{ + return container_of(s, struct imx_crtc_state, base); +} + struct imx_drm_crtc_helper_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); - int (*set_interface_pix_fmt)(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin, - u32 bus_flags); const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; }; @@ -42,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); -int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, - int hsync_pin, int vsync_pin, u32 bus_flags); -int imx_drm_set_bus_format(struct drm_encoder *encoder, - u32 bus_format); - int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index beff793bb717..b03919ed60ba 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -17,6 +17,8 @@ #include <linux/clk.h> #include <linux/component.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_of.h> @@ -49,9 +51,6 @@ #define LDB_DI1_VS_POL_ACT_LOW (1 << 10) #define LDB_BGREF_RMODE_INT (1 << 15) -#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector) -#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder) - struct imx_ldb; struct imx_ldb_channel { @@ -66,9 +65,19 @@ struct imx_ldb_channel { int edid_len; struct drm_display_mode mode; int mode_valid; - int bus_format; + u32 bus_format; }; +static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) +{ + return container_of(c, struct imx_ldb_channel, connector); +} + +static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) +{ + return container_of(e, struct imx_ldb_channel, encoder); +} + struct bus_mux { int reg; int shift; @@ -93,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect( return connector_status_connected; } +static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch, + u32 bus_format) +{ + struct imx_ldb *ldb = imx_ldb_ch->ldb; + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; + + switch (bus_format) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + if (imx_ldb_ch->chno == 0 || dual) + ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24; + if (imx_ldb_ch->chno == 1 || dual) + ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + if (imx_ldb_ch->chno == 0 || dual) + ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | + LDB_BIT_MAP_CH0_JEIDA; + if (imx_ldb_ch->chno == 1 || dual) + ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | + LDB_BIT_MAP_CH1_JEIDA; + break; + } +} + static int imx_ldb_connector_get_modes(struct drm_connector *connector) { struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); @@ -100,11 +135,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs && imx_ldb_ch->panel->funcs->get_modes) { - struct drm_display_info *di = &connector->display_info; - num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel); - if (!imx_ldb_ch->bus_format && di->num_bus_formats) - imx_ldb_ch->bus_format = di->bus_formats[0]; if (num_modes > 0) return num_modes; } @@ -141,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder( return &imx_ldb_ch->encoder; } -static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode) -{ -} - static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { @@ -173,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, chno); } -static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) -{ - struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); - struct imx_ldb *ldb = imx_ldb_ch->ldb; - int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; - u32 bus_format; - - switch (imx_ldb_ch->bus_format) { - default: - dev_warn(ldb->dev, - "could not determine data mapping, default to 18-bit \"spwg\"\n"); - /* fallthrough */ - case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: - bus_format = MEDIA_BUS_FMT_RGB666_1X18; - break; - case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: - bus_format = MEDIA_BUS_FMT_RGB888_1X24; - if (imx_ldb_ch->chno == 0 || dual) - ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24; - if (imx_ldb_ch->chno == 1 || dual) - ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24; - break; - case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: - bus_format = MEDIA_BUS_FMT_RGB888_1X24; - if (imx_ldb_ch->chno == 0 || dual) - ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | - LDB_BIT_MAP_CH0_JEIDA; - if (imx_ldb_ch->chno == 1 || dual) - ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | - LDB_BIT_MAP_CH1_JEIDA; - break; - } - - imx_drm_set_bus_format(encoder, bus_format); -} - -static void imx_ldb_encoder_commit(struct drm_encoder *encoder) +static void imx_ldb_encoder_enable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; @@ -219,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder) drm_panel_prepare(imx_ldb_ch->panel); if (dual) { + clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]); + clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]); + clk_prepare_enable(ldb->clk[0]); clk_prepare_enable(ldb->clk[1]); + } else { + clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]); } if (imx_ldb_ch == &ldb->channel[0] || dual) { @@ -265,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, unsigned long serial_clk; unsigned long di_clk = mode->clock * 1000; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + u32 bus_format = imx_ldb_ch->bus_format; if (mode->clock > 170000) { dev_warn(ldb->dev, @@ -286,18 +283,33 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, } /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ - if (imx_ldb_ch == &ldb->channel[0]) { + if (imx_ldb_ch == &ldb->channel[0] || dual) { if (mode->flags & DRM_MODE_FLAG_NVSYNC) ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW; else if (mode->flags & DRM_MODE_FLAG_PVSYNC) ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW; } - if (imx_ldb_ch == &ldb->channel[1]) { + if (imx_ldb_ch == &ldb->channel[1] || dual) { if (mode->flags & DRM_MODE_FLAG_NVSYNC) ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW; else if (mode->flags & DRM_MODE_FLAG_PVSYNC) ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW; } + + if (!bus_format) { + struct drm_connector *connector; + + drm_for_each_connector(connector, encoder->dev) { + struct drm_display_info *di = &connector->display_info; + + if (connector->encoder == encoder && + di->num_bus_formats) { + bus_format = di->bus_formats[0]; + break; + } + } + } + imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format); } static void imx_ldb_encoder_disable(struct drm_encoder *encoder) @@ -357,11 +369,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) drm_panel_unprepare(imx_ldb_ch->panel); } +static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); + struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); + struct drm_display_info *di = &conn_state->connector->display_info; + u32 bus_format = imx_ldb_ch->bus_format; + + /* Bus format description in DT overrides connector display info. */ + if (!bus_format && di->num_bus_formats) + bus_format = di->bus_formats[0]; + switch (bus_format) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24; + break; + default: + return -EINVAL; + } + + imx_crtc_state->di_hsync_pin = 2; + imx_crtc_state->di_vsync_pin = 3; + + return 0; +} + + static const struct drm_connector_funcs imx_ldb_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_ldb_connector_detect, .destroy = imx_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { @@ -374,11 +420,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { }; static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { - .dpms = imx_ldb_encoder_dpms, - .prepare = imx_ldb_encoder_prepare, - .commit = imx_ldb_encoder_commit, .mode_set = imx_ldb_encoder_mode_set, + .enable = imx_ldb_encoder_enable, .disable = imx_ldb_encoder_disable, + .atomic_check = imx_ldb_encoder_atomic_check, }; static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno) @@ -400,10 +445,10 @@ static int imx_ldb_register(struct drm_device *drm, struct imx_ldb_channel *imx_ldb_ch) { struct imx_ldb *ldb = imx_ldb_ch->ldb; + struct drm_encoder *encoder = &imx_ldb_ch->encoder; int ret; - ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder, - imx_ldb_ch->child); + ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); if (ret) return ret; @@ -417,9 +462,8 @@ static int imx_ldb_register(struct drm_device *drm, return ret; } - drm_encoder_helper_add(&imx_ldb_ch->encoder, - &imx_ldb_encoder_helper_funcs); - drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs, + drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); + drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs, DRM_MODE_ENCODER_LVDS, NULL); drm_connector_helper_add(&imx_ldb_ch->connector, @@ -427,11 +471,14 @@ static int imx_ldb_register(struct drm_device *drm, drm_connector_init(drm, &imx_ldb_ch->connector, &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - if (imx_ldb_ch->panel) - drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector); + if (imx_ldb_ch->panel) { + ret = drm_panel_attach(imx_ldb_ch->panel, + &imx_ldb_ch->connector); + if (ret) + return ret; + } - drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, - &imx_ldb_ch->encoder); + drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder); return 0; } @@ -560,6 +607,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) struct imx_ldb_channel *channel; struct device_node *ddc_node; struct device_node *ep; + int bus_format; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) @@ -632,21 +680,22 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) } } - channel->bus_format = of_get_bus_format(dev, child); - if (channel->bus_format == -EINVAL) { + bus_format = of_get_bus_format(dev, child); + if (bus_format == -EINVAL) { /* * If no bus format was specified in the device tree, * we can still get it from the connected panel later. */ if (channel->panel && channel->panel->funcs && channel->panel->funcs->get_modes) - channel->bus_format = 0; + bus_format = 0; } - if (channel->bus_format < 0) { + if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", - channel->bus_format); - return channel->bus_format; + bus_format); + return bus_format; } + channel->bus_format = bus_format; ret = imx_ldb_register(drm, channel); if (ret) diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index baf788121287..5e875944ffa2 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -23,6 +23,7 @@ #include <linux/spinlock.h> #include <linux/videodev2.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <video/imx-ipu-v3.h> @@ -97,9 +98,6 @@ /* TVE_TST_MODE_REG */ #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) -#define con_to_tve(x) container_of(x, struct imx_tve, connector) -#define enc_to_tve(x) container_of(x, struct imx_tve, encoder) - enum { TVE_MODE_TVOUT, TVE_MODE_VGA, @@ -112,6 +110,8 @@ struct imx_tve { spinlock_t lock; /* register lock */ bool enabled; int mode; + int di_hsync_pin; + int di_vsync_pin; struct regmap *regmap; struct regulator *dac_reg; @@ -120,10 +120,18 @@ struct imx_tve { struct clk *di_sel_clk; struct clk_hw clk_hw_di; struct clk *di_clk; - int vsync_pin; - int hsync_pin; }; +static inline struct imx_tve *con_to_tve(struct drm_connector *c) +{ + return container_of(c, struct imx_tve, connector); +} + +static inline struct imx_tve *enc_to_tve(struct drm_encoder *e) +{ + return container_of(e, struct imx_tve, encoder); +} + static void tve_lock(void *__tve) __acquires(&tve->lock) { @@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve) tve->enabled = true; clk_prepare_enable(tve->clk); ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, - TVE_IPU_CLK_EN | TVE_EN, - TVE_IPU_CLK_EN | TVE_EN); + TVE_EN, TVE_EN); } /* clear interrupt status register */ @@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve) if (tve->enabled) { tve->enabled = false; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, - TVE_IPU_CLK_EN | TVE_EN, 0); + TVE_EN, 0); clk_disable_unprepare(tve->clk); } } @@ -275,36 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder( return &tve->encoder; } -static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct imx_tve *tve = enc_to_tve(encoder); - int ret; - - ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, - TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE); - if (ret < 0) - dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret); -} - -static void imx_tve_encoder_prepare(struct drm_encoder *encoder) -{ - struct imx_tve *tve = enc_to_tve(encoder); - - tve_disable(tve); - - switch (tve->mode) { - case TVE_MODE_VGA: - imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, - tve->hsync_pin, tve->vsync_pin, - DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_NEGEDGE); - break; - case TVE_MODE_TVOUT: - imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); - break; - } -} - static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *orig_mode, struct drm_display_mode *mode) @@ -333,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, ret); } + regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, + TVE_IPU_CLK_EN, TVE_IPU_CLK_EN); + if (tve->mode == TVE_MODE_VGA) ret = tve_setup_vga(tve); else @@ -341,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, dev_err(tve->dev, "failed to set configuration: %d\n", ret); } -static void imx_tve_encoder_commit(struct drm_encoder *encoder) +static void imx_tve_encoder_enable(struct drm_encoder *encoder) { struct imx_tve *tve = enc_to_tve(encoder); @@ -355,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder) tve_disable(tve); } +static int imx_tve_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); + struct imx_tve *tve = enc_to_tve(encoder); + + imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24; + imx_crtc_state->di_hsync_pin = tve->di_hsync_pin; + imx_crtc_state->di_vsync_pin = tve->di_vsync_pin; + + return 0; +} + static const struct drm_connector_funcs imx_tve_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_tve_connector_detect, .destroy = imx_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { @@ -373,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = { }; static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { - .dpms = imx_tve_encoder_dpms, - .prepare = imx_tve_encoder_prepare, .mode_set = imx_tve_encoder_mode_set, - .commit = imx_tve_encoder_commit, + .enable = imx_tve_encoder_enable, .disable = imx_tve_encoder_disable, + .atomic_check = imx_tve_atomic_check, }; static irqreturn_t imx_tve_irq_handler(int irq, void *data) @@ -495,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) encoder_type = tve->mode == TVE_MODE_VGA ? DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC; - ret = imx_drm_encoder_parse_of(drm, &tve->encoder, - tve->dev->of_node); + ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node); if (ret) return ret; @@ -587,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) if (tve->mode == TVE_MODE_VGA) { ret = of_property_read_u32(np, "fsl,hsync-pin", - &tve->hsync_pin); + &tve->di_hsync_pin); if (ret < 0) { - dev_err(dev, "failed to get vsync pin\n"); + dev_err(dev, "failed to get hsync pin\n"); return ret; } - ret |= of_property_read_u32(np, "fsl,vsync-pin", - &tve->vsync_pin); + ret = of_property_read_u32(np, "fsl,vsync-pin", + &tve->di_vsync_pin); if (ret < 0) { dev_err(dev, "failed to get vsync pin\n"); @@ -633,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) tve->dac_reg = devm_regulator_get(dev, "dac"); if (!IS_ERR(tve->dac_reg)) { - regulator_set_voltage(tve->dac_reg, 2750000, 2750000); + ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000); + if (ret) + return ret; ret = regulator_enable(tve->dac_reg); if (ret) return ret; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index fc040417e1e8..08e188bc10fc 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -18,12 +18,12 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/errno.h> -#include <linux/reservation.h> -#include <linux/dma-buf.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -33,23 +33,6 @@ #define DRIVER_DESC "i.MX IPUv3 Graphics" -enum ipu_flip_status { - IPU_FLIP_NONE, - IPU_FLIP_PENDING, - IPU_FLIP_SUBMITTED, -}; - -struct ipu_flip_work { - struct work_struct unref_work; - struct drm_gem_object *bo; - struct drm_pending_vblank_event *page_flip_event; - struct work_struct fence_work; - struct ipu_crtc *crtc; - struct fence *excl; - unsigned shared_count; - struct fence **shared; -}; - struct ipu_crtc { struct device *dev; struct drm_crtc base; @@ -60,201 +43,166 @@ struct ipu_crtc { struct ipu_dc *dc; struct ipu_di *di; - int enabled; - enum ipu_flip_status flip_state; - struct workqueue_struct *flip_queue; - struct ipu_flip_work *flip_work; int irq; - u32 bus_format; - u32 bus_flags; - int di_hsync_pin; - int di_vsync_pin; }; -#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base) +static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct ipu_crtc, base); +} -static void ipu_fb_enable(struct ipu_crtc *ipu_crtc) +static void ipu_crtc_enable(struct drm_crtc *crtc) { + struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); - if (ipu_crtc->enabled) - return; - ipu_dc_enable(ipu); - ipu_plane_enable(ipu_crtc->plane[0]); - /* Start DC channel and DI after IDMAC */ ipu_dc_enable_channel(ipu_crtc->dc); ipu_di_enable(ipu_crtc->di); - drm_crtc_vblank_on(&ipu_crtc->base); - - ipu_crtc->enabled = 1; } -static void ipu_fb_disable(struct ipu_crtc *ipu_crtc) +static void ipu_crtc_disable(struct drm_crtc *crtc) { + struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); - if (!ipu_crtc->enabled) - return; - - /* Stop DC channel and DI before IDMAC */ ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); - ipu_plane_disable(ipu_crtc->plane[0]); ipu_dc_disable(ipu); - drm_crtc_vblank_off(&ipu_crtc->base); - ipu_crtc->enabled = 0; + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); } -static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) +static void imx_drm_crtc_reset(struct drm_crtc *crtc) { - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); + struct imx_crtc_state *state; - dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode); - - switch (mode) { - case DRM_MODE_DPMS_ON: - ipu_fb_enable(ipu_crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - ipu_fb_disable(ipu_crtc); - break; + if (crtc->state) { + if (crtc->state->mode_blob) + drm_property_unreference_blob(crtc->state->mode_blob); + + state = to_imx_crtc_state(crtc->state); + memset(state, 0, sizeof(*state)); + } else { + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + crtc->state = &state->base; } + + state->base.crtc = crtc; } -static void ipu_flip_unref_work_func(struct work_struct *__work) +static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc) { - struct ipu_flip_work *work = - container_of(__work, struct ipu_flip_work, unref_work); + struct imx_crtc_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; - drm_gem_object_unreference_unlocked(work->bo); - kfree(work); + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + + WARN_ON(state->base.crtc != crtc); + state->base.crtc = crtc; + + return &state->base; } -static void ipu_flip_fence_work_func(struct work_struct *__work) +static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - struct ipu_flip_work *work = - container_of(__work, struct ipu_flip_work, fence_work); - int i; - - /* wait for all fences attached to the FB obj to signal */ - if (work->excl) { - fence_wait(work->excl, false); - fence_put(work->excl); - } - for (i = 0; i < work->shared_count; i++) { - fence_wait(work->shared[i], false); - fence_put(work->shared[i]); - } + __drm_atomic_helper_crtc_destroy_state(state); + kfree(to_imx_crtc_state(state)); +} + +static const struct drm_crtc_funcs ipu_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = imx_drm_crtc_reset, + .atomic_duplicate_state = imx_drm_crtc_duplicate_state, + .atomic_destroy_state = imx_drm_crtc_destroy_state, +}; - work->crtc->flip_state = IPU_FLIP_SUBMITTED; +static irqreturn_t ipu_irq_handler(int irq, void *dev_id) +{ + struct ipu_crtc *ipu_crtc = dev_id; + + imx_drm_handle_vblank(ipu_crtc->imx_crtc); + + return IRQ_HANDLED; } -static int ipu_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - struct ipu_flip_work *flip_work; + struct videomode vm; int ret; - if (ipu_crtc->flip_state != IPU_FLIP_NONE) - return -EBUSY; - - ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); - if (ret) { - dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n"); - list_del(&event->base.link); - - return ret; - } + drm_display_mode_to_videomode(adjusted_mode, &vm); - flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); - if (!flip_work) { - ret = -ENOMEM; - goto put_vblank; - } - INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func); - flip_work->page_flip_event = event; + ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm); + if (ret) + return false; - /* get BO backing the old framebuffer and take a reference */ - flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; - drm_gem_object_reference(flip_work->bo); + if ((vm.vsync_len == 0) || (vm.hsync_len == 0)) + return false; - ipu_crtc->flip_work = flip_work; - /* - * If the object has a DMABUF attached, we need to wait on its fences - * if there are any. - */ - if (cma_obj->base.dma_buf) { - INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func); - flip_work->crtc = ipu_crtc; + drm_display_mode_from_videomode(&vm, adjusted_mode); - ret = reservation_object_get_fences_rcu( - cma_obj->base.dma_buf->resv, &flip_work->excl, - &flip_work->shared_count, &flip_work->shared); + return true; +} - if (unlikely(ret)) { - DRM_ERROR("failed to get fences for buffer\n"); - goto free_flip_work; - } +static int ipu_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary); - /* No need to queue the worker if the are no fences */ - if (!flip_work->excl && !flip_work->shared_count) { - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; - } else { - ipu_crtc->flip_state = IPU_FLIP_PENDING; - queue_work(ipu_crtc->flip_queue, - &flip_work->fence_work); - } - } else { - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; - } + if (state->active && (primary_plane_mask & state->plane_mask) == 0) + return -EINVAL; return 0; - -free_flip_work: - drm_gem_object_unreference_unlocked(flip_work->bo); - kfree(flip_work); - ipu_crtc->flip_work = NULL; -put_vblank: - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); - - return ret; } -static const struct drm_crtc_funcs ipu_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, - .destroy = drm_crtc_cleanup, - .page_flip = ipu_page_flip, -}; +static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +} -static int ipu_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *orig_mode, - struct drm_display_mode *mode, - int x, int y, - struct drm_framebuffer *old_fb) +static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state); struct ipu_di_signal_cfg sig_cfg = {}; unsigned long encoder_types = 0; - int ret; dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__, mode->hdisplay); dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__, mode->vdisplay); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) encoder_types |= BIT(encoder->encoder_type); + } dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n", __func__, encoder_types); @@ -272,114 +220,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, else sig_cfg.clkflags = 0; - sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); + sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW); /* Default to driving pixel data on negative clock edges */ - sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & + sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE); - sig_cfg.bus_format = ipu_crtc->bus_format; + sig_cfg.bus_format = imx_crtc_state->bus_format; sig_cfg.v_to_h_sync = 0; - sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; - sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin; + sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin; + sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin; drm_display_mode_to_videomode(mode, &sig_cfg.mode); - ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, - mode->flags & DRM_MODE_FLAG_INTERLACE, - ipu_crtc->bus_format, mode->hdisplay); - if (ret) { - dev_err(ipu_crtc->dev, - "initializing display controller failed with %d\n", - ret); - return ret; - } - - ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg); - if (ret) { - dev_err(ipu_crtc->dev, - "initializing panel failed with %d\n", ret); - return ret; - } - - return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, - crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x, y, mode->hdisplay, mode->vdisplay, - mode->flags & DRM_MODE_FLAG_INTERLACE); -} - -static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) -{ - unsigned long flags; - struct drm_device *drm = ipu_crtc->base.dev; - struct ipu_flip_work *work = ipu_crtc->flip_work; - - spin_lock_irqsave(&drm->event_lock, flags); - if (work->page_flip_event) - drm_crtc_send_vblank_event(&ipu_crtc->base, - work->page_flip_event); - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); - spin_unlock_irqrestore(&drm->event_lock, flags); -} - -static irqreturn_t ipu_irq_handler(int irq, void *dev_id) -{ - struct ipu_crtc *ipu_crtc = dev_id; - - imx_drm_handle_vblank(ipu_crtc->imx_crtc); - - if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) { - struct ipu_plane *plane = ipu_crtc->plane[0]; - - ipu_plane_set_base(plane, ipu_crtc->base.primary->fb, - plane->x, plane->y); - ipu_crtc_handle_pageflip(ipu_crtc); - queue_work(ipu_crtc->flip_queue, - &ipu_crtc->flip_work->unref_work); - ipu_crtc->flip_state = IPU_FLIP_NONE; - } - - return IRQ_HANDLED; -} - -static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - struct videomode vm; - int ret; - - drm_display_mode_to_videomode(adjusted_mode, &vm); - - ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm); - if (ret) - return false; - - drm_display_mode_from_videomode(&vm, adjusted_mode); - - return true; -} - -static void ipu_crtc_prepare(struct drm_crtc *crtc) -{ - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - - ipu_fb_disable(ipu_crtc); -} - -static void ipu_crtc_commit(struct drm_crtc *crtc) -{ - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - - ipu_fb_enable(ipu_crtc); + ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, + mode->flags & DRM_MODE_FLAG_INTERLACE, + imx_crtc_state->bus_format, mode->hdisplay); + ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg); } static const struct drm_crtc_helper_funcs ipu_helper_funcs = { - .dpms = ipu_crtc_dpms, .mode_fixup = ipu_crtc_mode_fixup, - .mode_set = ipu_crtc_mode_set, - .prepare = ipu_crtc_prepare, - .commit = ipu_crtc_commit, + .mode_set_nofb = ipu_crtc_mode_set_nofb, + .atomic_check = ipu_crtc_atomic_check, + .atomic_begin = ipu_crtc_atomic_begin, + .disable = ipu_crtc_disable, + .enable = ipu_crtc_enable, }; static int ipu_enable_vblank(struct drm_crtc *crtc) @@ -398,23 +262,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) disable_irq_nosync(ipu_crtc->irq); } -static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) -{ - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - - ipu_crtc->bus_format = bus_format; - ipu_crtc->bus_flags = bus_flags; - ipu_crtc->di_hsync_pin = hsync_pin; - ipu_crtc->di_vsync_pin = vsync_pin; - - return 0; -} - static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = { .enable_vblank = ipu_enable_vblank, .disable_vblank = ipu_disable_vblank, - .set_interface_pix_fmt = ipu_set_interface_pix_fmt, .crtc_funcs = &ipu_crtc_funcs, .crtc_helper_funcs = &ipu_helper_funcs, }; @@ -496,8 +346,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, IPU_DP_FLOW_SYNC_FG, drm_crtc_mask(&ipu_crtc->base), DRM_PLANE_TYPE_OVERLAY); - if (IS_ERR(ipu_crtc->plane[1])) + if (IS_ERR(ipu_crtc->plane[1])) { ipu_crtc->plane[1] = NULL; + } else { + ret = ipu_plane_get_resources(ipu_crtc->plane[1]); + if (ret) { + dev_err(ipu_crtc->dev, "getting plane 1 " + "resources failed with %d.\n", ret); + goto err_put_plane0_res; + } + } } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); @@ -505,16 +363,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); - goto err_put_plane_res; + goto err_put_plane1_res; } /* Only enable IRQ when we actually need it to trigger work. */ disable_irq(ipu_crtc->irq); - ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip"); - return 0; -err_put_plane_res: +err_put_plane1_res: + if (ipu_crtc->plane[1]) + ipu_plane_put_resources(ipu_crtc->plane[1]); +err_put_plane0_res: ipu_plane_put_resources(ipu_crtc->plane[0]); err_remove_crtc: imx_drm_remove_crtc(ipu_crtc->imx_crtc); @@ -553,9 +412,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master, imx_drm_remove_crtc(ipu_crtc->imx_crtc); - destroy_workqueue(ipu_crtc->flip_queue); - ipu_plane_put_resources(ipu_crtc->plane[0]); ipu_put_resources(ipu_crtc); + if (ipu_crtc->plane[1]) + ipu_plane_put_resources(ipu_crtc->plane[1]); + ipu_plane_put_resources(ipu_crtc->plane[0]); } static const struct component_ops ipu_crtc_ops = { diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index a4bb44118d33..4ad67d015ec7 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -14,13 +14,19 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> #include "video/imx-ipu-v3.h" #include "ipuv3-plane.h" -#define to_ipu_plane(x) container_of(x, struct ipu_plane, base) +static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p) +{ + return container_of(p, struct ipu_plane, base); +} static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_ARGB1555, @@ -53,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane) IPU_IRQ_EOF); } -static int calc_vref(struct drm_display_mode *mode) +static inline unsigned long +drm_plane_state_to_eba(struct drm_plane_state *state) { - unsigned long htotal, vtotal; + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *cma_obj; - htotal = mode->htotal; - vtotal = mode->vtotal; + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + BUG_ON(!cma_obj); - if (!htotal || !vtotal) - return 60; - - return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal); + return cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * (state->src_y >> 16) + + (fb->bits_per_pixel >> 3) * (state->src_x >> 16); } -static inline int calc_bandwidth(int width, int height, unsigned int vref) +static inline unsigned long +drm_plane_state_to_ubo(struct drm_plane_state *state) { - return width * height * vref; -} + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *cma_obj; + unsigned long eba = drm_plane_state_to_eba(state); -int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, - int x, int y) -{ - struct drm_gem_cma_object *cma_obj[3]; - unsigned long eba, ubo, vbo; - int active, i; + cma_obj = drm_fb_cma_get_gem_obj(fb, 1); + BUG_ON(!cma_obj); - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { - cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i); - if (!cma_obj[i]) { - DRM_DEBUG_KMS("plane %d entry is null.\n", i); - return -EFAULT; - } - } + return cma_obj->paddr + fb->offsets[1] + + fb->pitches[1] * (state->src_y >> 16) / 2 + + (state->src_x >> 16) / 2 - eba; +} - eba = cma_obj[0]->paddr + fb->offsets[0] + - fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; +static inline unsigned long +drm_plane_state_to_vbo(struct drm_plane_state *state) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *cma_obj; + unsigned long eba = drm_plane_state_to_eba(state); - if (eba & 0x7) { - DRM_DEBUG_KMS("base address must be a multiple of 8.\n"); - return -EINVAL; - } + cma_obj = drm_fb_cma_get_gem_obj(fb, 2); + BUG_ON(!cma_obj); - if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) { - DRM_DEBUG_KMS("pitches out of range.\n"); - return -EINVAL; - } + return cma_obj->paddr + fb->offsets[2] + + fb->pitches[2] * (state->src_y >> 16) / 2 + + (state->src_x >> 16) / 2 - eba; +} - if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) { - DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n"); - return -EINVAL; - } +static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, + struct drm_plane_state *old_state) +{ + struct drm_plane *plane = &ipu_plane->base; + struct drm_plane_state *state = plane->state; + struct drm_framebuffer *fb = state->fb; + unsigned long eba, ubo, vbo; + int active; - ipu_plane->stride[0] = fb->pitches[0]; + eba = drm_plane_state_to_eba(state); switch (fb->pixel_format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: + if (old_state->fb) + break; + /* * Multiplanar formats have to meet the following restrictions: * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO @@ -117,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, * - Only EBA may be changed while scanout is active * - The strides of U and V planes must be identical. */ - ubo = cma_obj[1]->paddr + fb->offsets[1] + - fb->pitches[1] * y / 2 + x / 2 - eba; - vbo = cma_obj[2]->paddr + fb->offsets[2] + - fb->pitches[2] * y / 2 + x / 2 - eba; + ubo = drm_plane_state_to_ubo(state); + vbo = drm_plane_state_to_vbo(state); - if ((ubo & 0x7) || (vbo & 0x7)) { - DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n"); - return -EINVAL; - } - - if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) { - DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n"); - return -EINVAL; - } - - if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) || - (ipu_plane->v_offset != vbo))) { - DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n"); - return -EINVAL; - } - - if (fb->pitches[1] != fb->pitches[2]) { - DRM_DEBUG_KMS("U/V pitches must be identical.\n"); - return -EINVAL; - } - - if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) { - DRM_DEBUG_KMS("U/V pitches out of range.\n"); - return -EINVAL; - } - - if (ipu_plane->enabled && - (ipu_plane->stride[1] != fb->pitches[1])) { - DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n"); - return -EINVAL; - } - - ipu_plane->u_offset = ubo; - ipu_plane->v_offset = vbo; - ipu_plane->stride[1] = fb->pitches[1]; + if (fb->pixel_format == DRM_FORMAT_YUV420) + ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, + fb->pitches[1], ubo, vbo); + else + ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, + fb->pitches[1], vbo, ubo); dev_dbg(ipu_plane->base.dev->dev, - "phys = %pad %pad %pad, x = %d, y = %d", - &cma_obj[0]->paddr, &cma_obj[1]->paddr, - &cma_obj[2]->paddr, x, y); + "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, + state->src_x >> 16, state->src_y >> 16); break; default: - dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", - &cma_obj[0]->paddr, x, y); + dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d", + eba, state->src_x >> 16, state->src_y >> 16); + break; } - if (ipu_plane->enabled) { + if (old_state->fb) { active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); @@ -177,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); } - - /* cache offsets for subsequent pageflips */ - ipu_plane->x = x; - ipu_plane->y = y; - - return 0; -} - -int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, bool interlaced) -{ - struct device *dev = ipu_plane->base.dev->dev; - int ret; - - /* no scaling */ - if (src_w != crtc_w || src_h != crtc_h) - return -EINVAL; - - /* clip to crtc bounds */ - if (crtc_x < 0) { - if (-crtc_x > crtc_w) - return -EINVAL; - src_x += -crtc_x; - src_w -= -crtc_x; - crtc_w -= -crtc_x; - crtc_x = 0; - } - if (crtc_y < 0) { - if (-crtc_y > crtc_h) - return -EINVAL; - src_y += -crtc_y; - src_h -= -crtc_y; - crtc_h -= -crtc_y; - crtc_y = 0; - } - if (crtc_x + crtc_w > mode->hdisplay) { - if (crtc_x > mode->hdisplay) - return -EINVAL; - crtc_w = mode->hdisplay - crtc_x; - src_w = crtc_w; - } - if (crtc_y + crtc_h > mode->vdisplay) { - if (crtc_y > mode->vdisplay) - return -EINVAL; - crtc_h = mode->vdisplay - crtc_y; - src_h = crtc_h; - } - /* full plane minimum width is 13 pixels */ - if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG)) - return -EINVAL; - if (crtc_h < 2) - return -EINVAL; - - /* - * since we cannot touch active IDMAC channels, we do not support - * resizing the enabled plane or changing its format - */ - if (ipu_plane->enabled) { - if (src_w != ipu_plane->w || src_h != ipu_plane->h || - fb->pixel_format != ipu_plane->base.fb->pixel_format) - return -EINVAL; - - return ipu_plane_set_base(ipu_plane, fb, src_x, src_y); - } - - switch (ipu_plane->dp_flow) { - case IPU_DP_FLOW_SYNC_BG: - ret = ipu_dp_setup_channel(ipu_plane->dp, - IPUV3_COLORSPACE_RGB, - IPUV3_COLORSPACE_RGB); - if (ret) { - dev_err(dev, - "initializing display processor failed with %d\n", - ret); - return ret; - } - ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); - break; - case IPU_DP_FLOW_SYNC_FG: - ipu_dp_setup_channel(ipu_plane->dp, - ipu_drm_fourcc_to_colorspace(fb->pixel_format), - IPUV3_COLORSPACE_UNKNOWN); - ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y); - /* Enable local alpha on partial plane */ - switch (fb->pixel_format) { - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); - break; - default: - break; - } - } - - ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc, - calc_bandwidth(crtc_w, crtc_h, - calc_vref(mode)), 64); - if (ret) { - dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret); - return ret; - } - - ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w); - - ipu_cpmem_zero(ipu_plane->ipu_ch); - ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h); - ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format); - if (ret < 0) { - dev_err(dev, "unsupported pixel format 0x%08x\n", - fb->pixel_format); - return ret; - } - ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); - ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); - ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); - - ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y); - if (ret < 0) - return ret; - if (interlaced) - ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]); - - if (fb->pixel_format == DRM_FORMAT_YUV420) { - ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, - ipu_plane->stride[1], - ipu_plane->u_offset, - ipu_plane->v_offset); - } else if (fb->pixel_format == DRM_FORMAT_YVU420) { - ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, - ipu_plane->stride[1], - ipu_plane->v_offset, - ipu_plane->u_offset); - } - - ipu_plane->w = src_w; - ipu_plane->h = src_h; - - return 0; } void ipu_plane_put_resources(struct ipu_plane *ipu_plane) @@ -372,7 +203,7 @@ err_out: return ret; } -void ipu_plane_enable(struct ipu_plane *ipu_plane) +static void ipu_plane_enable(struct ipu_plane *ipu_plane) { if (ipu_plane->dp) ipu_dp_enable(ipu_plane->ipu); @@ -380,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane) ipu_idmac_enable_channel(ipu_plane->ipu_ch); if (ipu_plane->dp) ipu_dp_enable_channel(ipu_plane->dp); - - ipu_plane->enabled = true; } -void ipu_plane_disable(struct ipu_plane *ipu_plane) +static void ipu_plane_disable(struct ipu_plane *ipu_plane) { - ipu_plane->enabled = false; - ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50); if (ipu_plane->dp) @@ -398,74 +225,225 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane) ipu_dp_disable(ipu_plane->ipu); } -/* - * drm_plane API - */ - -static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +static int ipu_disable_plane(struct drm_plane *plane) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); - int ret = 0; - - DRM_DEBUG_KMS("plane - %p\n", plane); - - if (!ipu_plane->enabled) - ret = ipu_plane_get_resources(ipu_plane); - if (ret < 0) - return ret; - - ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16, - false); - if (ret < 0) { - ipu_plane_put_resources(ipu_plane); - return ret; - } - if (crtc != plane->crtc) - dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", - plane->crtc, crtc); + DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - if (!ipu_plane->enabled) - ipu_plane_enable(ipu_plane); + ipu_plane_disable(ipu_plane); return 0; } -static int ipu_disable_plane(struct drm_plane *plane) +static void ipu_plane_destroy(struct drm_plane *plane) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - if (ipu_plane->enabled) - ipu_plane_disable(ipu_plane); + ipu_disable_plane(plane); + drm_plane_cleanup(plane); + kfree(ipu_plane); +} - ipu_plane_put_resources(ipu_plane); +static const struct drm_plane_funcs ipu_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = ipu_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int ipu_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_plane_state *old_state = plane->state; + struct drm_crtc_state *crtc_state; + struct device *dev = plane->dev->dev; + struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *old_fb = old_state->fb; + unsigned long eba, ubo, vbo, old_ubo, old_vbo; + + /* Ok to disable */ + if (!fb) + return 0; + + if (!state->crtc) + return -EINVAL; + + crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + /* CRTC should be enabled */ + if (!crtc_state->enable) + return -EINVAL; + + /* no scaling */ + if (state->src_w >> 16 != state->crtc_w || + state->src_h >> 16 != state->crtc_h) + return -EINVAL; + + switch (plane->type) { + case DRM_PLANE_TYPE_PRIMARY: + /* full plane doesn't support partial off screen */ + if (state->crtc_x || state->crtc_y || + state->crtc_w != crtc_state->adjusted_mode.hdisplay || + state->crtc_h != crtc_state->adjusted_mode.vdisplay) + return -EINVAL; + + /* full plane minimum width is 13 pixels */ + if (state->crtc_w < 13) + return -EINVAL; + break; + case DRM_PLANE_TYPE_OVERLAY: + if (state->crtc_x < 0 || state->crtc_y < 0) + return -EINVAL; + + if (state->crtc_x + state->crtc_w > + crtc_state->adjusted_mode.hdisplay) + return -EINVAL; + if (state->crtc_y + state->crtc_h > + crtc_state->adjusted_mode.vdisplay) + return -EINVAL; + break; + default: + dev_warn(dev, "Unsupported plane type\n"); + return -EINVAL; + } + + if (state->crtc_h < 2) + return -EINVAL; + + /* + * since we cannot touch active IDMAC channels, we do not support + * resizing the enabled plane or changing its format + */ + if (old_fb && (state->src_w != old_state->src_w || + state->src_h != old_state->src_h || + fb->pixel_format != old_fb->pixel_format)) + return -EINVAL; + + eba = drm_plane_state_to_eba(state); + + if (eba & 0x7) + return -EINVAL; + + if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) + return -EINVAL; + + if (old_fb && fb->pitches[0] != old_fb->pitches[0]) + return -EINVAL; + + switch (fb->pixel_format) { + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + /* + * Multiplanar formats have to meet the following restrictions: + * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO + * - EBA, UBO and VBO are a multiple of 8 + * - UBO and VBO are unsigned and not larger than 0xfffff8 + * - Only EBA may be changed while scanout is active + * - The strides of U and V planes must be identical. + */ + ubo = drm_plane_state_to_ubo(state); + vbo = drm_plane_state_to_vbo(state); + + if ((ubo & 0x7) || (vbo & 0x7)) + return -EINVAL; + + if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) + return -EINVAL; + + if (old_fb) { + old_ubo = drm_plane_state_to_ubo(old_state); + old_vbo = drm_plane_state_to_vbo(old_state); + if (ubo != old_ubo || vbo != old_vbo) + return -EINVAL; + } + + if (fb->pitches[1] != fb->pitches[2]) + return -EINVAL; + + if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) + return -EINVAL; + + if (old_fb && old_fb->pitches[1] != fb->pitches[1]) + return -EINVAL; + } return 0; } -static void ipu_plane_destroy(struct drm_plane *plane) +static void ipu_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + ipu_disable_plane(plane); +} + +static void ipu_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); + struct drm_plane_state *state = plane->state; + enum ipu_color_space ics; - DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); + if (old_state->fb) { + ipu_plane_atomic_set_base(ipu_plane, old_state); + return; + } - ipu_disable_plane(plane); - drm_plane_cleanup(plane); - kfree(ipu_plane); + switch (ipu_plane->dp_flow) { + case IPU_DP_FLOW_SYNC_BG: + ipu_dp_setup_channel(ipu_plane->dp, + IPUV3_COLORSPACE_RGB, + IPUV3_COLORSPACE_RGB); + ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); + break; + case IPU_DP_FLOW_SYNC_FG: + ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format); + ipu_dp_setup_channel(ipu_plane->dp, ics, + IPUV3_COLORSPACE_UNKNOWN); + ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x, + state->crtc_y); + /* Enable local alpha on partial plane */ + switch (state->fb->pixel_format) { + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); + break; + default: + break; + } + } + + ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w); + + ipu_cpmem_zero(ipu_plane->ipu_ch); + ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16, + state->src_h >> 16); + ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format); + ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); + ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); + ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); + ipu_plane_atomic_set_base(ipu_plane, old_state); + ipu_plane_enable(ipu_plane); } -static const struct drm_plane_funcs ipu_plane_funcs = { - .update_plane = ipu_update_plane, - .disable_plane = ipu_disable_plane, - .destroy = ipu_plane_destroy, +static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { + .atomic_check = ipu_plane_atomic_check, + .atomic_disable = ipu_plane_atomic_disable, + .atomic_update = ipu_plane_atomic_update, }; struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, @@ -498,5 +476,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, return ERR_PTR(ret); } + drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs); + return ipu_plane; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 4448fd4ad4eb..338b88a74eb6 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -23,17 +23,6 @@ struct ipu_plane { int dma; int dp_flow; - - int x; - int y; - int w; - int h; - - unsigned int u_offset; - unsigned int v_offset; - unsigned int stride[2]; - - bool enabled; }; struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, @@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, bool interlaced); -void ipu_plane_enable(struct ipu_plane *plane); -void ipu_plane_disable(struct ipu_plane *plane); -int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb, - int x, int y); - int ipu_plane_get_resources(struct ipu_plane *plane); void ipu_plane_put_resources(struct ipu_plane *plane); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2d1fd02cd3d6..1dad297b01fd 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -16,6 +16,7 @@ #include <linux/component.h> #include <linux/module.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_panel.h> @@ -25,9 +26,6 @@ #include "imx-drm.h" -#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector) -#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder) - struct imx_parallel_display { struct drm_connector connector; struct drm_encoder encoder; @@ -37,8 +35,19 @@ struct imx_parallel_display { u32 bus_format; struct drm_display_mode mode; struct drm_panel *panel; + struct drm_bridge *bridge; }; +static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) +{ + return container_of(c, struct imx_parallel_display, connector); +} + +static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e) +{ + return container_of(e, struct imx_parallel_display, encoder); +} + static enum drm_connector_status imx_pd_connector_detect( struct drm_connector *connector, bool force) { @@ -53,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) if (imxpd->panel && imxpd->panel->funcs && imxpd->panel->funcs->get_modes) { - struct drm_display_info *di = &connector->display_info; - num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); - if (!imxpd->bus_format && di->num_bus_formats) - imxpd->bus_format = di->bus_formats[0]; if (num_modes > 0) return num_modes; } @@ -69,10 +74,16 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) if (np) { struct drm_display_mode *mode = drm_mode_create(connector->dev); + int ret; if (!mode) return -EINVAL; - of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE); + + ret = of_get_drm_display_mode(np, &imxpd->mode, + OF_USE_NATIVE_MODE); + if (ret) + return ret; + drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, drm_mode_probed_add(connector, mode); @@ -90,24 +101,7 @@ static struct drm_encoder *imx_pd_connector_best_encoder( return &imxpd->encoder; } -static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - - if (mode != DRM_MODE_DPMS_ON) - drm_panel_disable(imxpd->panel); - else - drm_panel_enable(imxpd->panel); -} - -static void imx_pd_encoder_prepare(struct drm_encoder *encoder) -{ - struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, - imxpd->connector.display_info.bus_flags); -} - -static void imx_pd_encoder_commit(struct drm_encoder *encoder) +static void imx_pd_encoder_enable(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); @@ -115,12 +109,6 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder) drm_panel_enable(imxpd->panel); } -static void imx_pd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *orig_mode, - struct drm_display_mode *mode) -{ -} - static void imx_pd_encoder_disable(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); @@ -129,11 +117,33 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder) drm_panel_unprepare(imxpd->panel); } +static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); + struct drm_display_info *di = &conn_state->connector->display_info; + struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); + + imx_crtc_state->bus_flags = di->bus_flags; + if (!imxpd->bus_format && di->num_bus_formats) + imx_crtc_state->bus_format = di->bus_formats[0]; + else + imx_crtc_state->bus_format = imxpd->bus_format; + imx_crtc_state->di_hsync_pin = 2; + imx_crtc_state->di_vsync_pin = 3; + + return 0; +} + static const struct drm_connector_funcs imx_pd_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = imx_pd_connector_detect, .destroy = imx_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { @@ -146,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = { }; static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { - .dpms = imx_pd_encoder_dpms, - .prepare = imx_pd_encoder_prepare, - .commit = imx_pd_encoder_commit, - .mode_set = imx_pd_encoder_mode_set, + .enable = imx_pd_encoder_enable, .disable = imx_pd_encoder_disable, + .atomic_check = imx_pd_encoder_atomic_check, }; static int imx_pd_register(struct drm_device *drm, struct imx_parallel_display *imxpd) { + struct drm_encoder *encoder = &imxpd->encoder; int ret; - ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder, - imxpd->dev->of_node); + ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); if (ret) return ret; @@ -170,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm, */ imxpd->connector.dpms = DRM_MODE_DPMS_OFF; - drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); - drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, + drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs); + drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); - drm_connector_helper_add(&imxpd->connector, - &imx_pd_connector_helper_funcs); - drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + if (!imxpd->bridge) { + drm_connector_helper_add(&imxpd->connector, + &imx_pd_connector_helper_funcs); + drm_connector_init(drm, &imxpd->connector, + &imx_pd_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + } if (imxpd->panel) drm_panel_attach(imxpd->panel, &imxpd->connector); - drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder); + if (imxpd->bridge) { + imxpd->bridge->encoder = encoder; + encoder->bridge = imxpd->bridge; + ret = drm_bridge_attach(drm, imxpd->bridge); + if (ret < 0) { + dev_err(imxpd->dev, "failed to attach bridge: %d\n", + ret); + return ret; + } + } else { + drm_mode_connector_attach_encoder(&imxpd->connector, encoder); + } return 0; } @@ -195,6 +217,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) const u8 *edidp; struct imx_parallel_display *imxpd; int ret; + u32 bus_format = 0; const char *fmt; imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); @@ -208,14 +231,15 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { if (!strcmp(fmt, "rgb24")) - imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24; + bus_format = MEDIA_BUS_FMT_RGB888_1X24; else if (!strcmp(fmt, "rgb565")) - imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16; + bus_format = MEDIA_BUS_FMT_RGB565_1X16; else if (!strcmp(fmt, "bgr666")) - imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18; + bus_format = MEDIA_BUS_FMT_RGB666_1X18; else if (!strcmp(fmt, "lvds666")) - imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; + bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; } + imxpd->bus_format = bus_format; /* port@1 is the output port */ ep = of_graph_get_endpoint_by_regs(np, 1, -1); @@ -223,13 +247,30 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) struct device_node *remote; remote = of_graph_get_remote_port_parent(ep); + if (!remote) { + dev_warn(dev, "endpoint %s not connected\n", + ep->full_name); + of_node_put(ep); + return -ENODEV; + } of_node_put(ep); - if (remote) { - imxpd->panel = of_drm_find_panel(remote); - of_node_put(remote); + + imxpd->panel = of_drm_find_panel(remote); + if (imxpd->panel) { + dev_dbg(dev, "found panel %s\n", remote->full_name); + } else { + imxpd->bridge = of_drm_find_bridge(remote); + if (imxpd->bridge) + dev_dbg(dev, "found bridge %s\n", + remote->full_name); } - if (!imxpd->panel) + if (!imxpd->panel && !imxpd->bridge) { + dev_dbg(dev, "waiting for panel or bridge %s\n", + remote->full_name); + of_node_put(remote); return -EPROBE_DEFER; + } + of_node_put(remote); } imxpd->dev = dev; diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index eeefc971801a..23ac8041c562 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -6,7 +6,6 @@ config DRM_MEDIATEK select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL - select IOMMU_DMA select MEMORY select MTK_SMI help @@ -14,3 +13,11 @@ config DRM_MEDIATEK The module will be called mediatek-drm This driver provides kernel mode setting and buffer management to userspace. + +config DRM_MEDIATEK_HDMI + tristate "DRM HDMI Support for Mediatek SoCs" + depends on DRM_MEDIATEK + select SND_SOC_HDMI_CODEC if SND_SOC + select GENERIC_PHY + help + DRM/KMS HDMI driver for Mediatek SoCs diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 5fcf58e87786..bf2e5be1ab30 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \ mtk_dpi.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o + +mediatek-drm-hdmi-objs := mtk_cec.o \ + mtk_hdmi.o \ + mtk_hdmi_ddc.o \ + mtk_mt8173_hdmi_phy.o + +obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c new file mode 100644 index 000000000000..7a3eb8c17ef9 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_cec.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> + +#include "mtk_cec.h" + +#define TR_CONFIG 0x00 +#define CLEAR_CEC_IRQ BIT(15) + +#define CEC_CKGEN 0x04 +#define CEC_32K_PDN BIT(19) +#define PDN BIT(16) + +#define RX_EVENT 0x54 +#define HDMI_PORD BIT(25) +#define HDMI_HTPLG BIT(24) +#define HDMI_PORD_INT_EN BIT(9) +#define HDMI_HTPLG_INT_EN BIT(8) + +#define RX_GEN_WD 0x58 +#define HDMI_PORD_INT_32K_STATUS BIT(26) +#define RX_RISC_INT_32K_STATUS BIT(25) +#define HDMI_HTPLG_INT_32K_STATUS BIT(24) +#define HDMI_PORD_INT_32K_CLR BIT(18) +#define RX_INT_32K_CLR BIT(17) +#define HDMI_HTPLG_INT_32K_CLR BIT(16) +#define HDMI_PORD_INT_32K_STA_MASK BIT(10) +#define RX_RISC_INT_32K_STA_MASK BIT(9) +#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8) +#define HDMI_PORD_INT_32K_EN BIT(2) +#define RX_INT_32K_EN BIT(1) +#define HDMI_HTPLG_INT_32K_EN BIT(0) + +#define NORMAL_INT_CTRL 0x5C +#define HDMI_HTPLG_INT_STA BIT(0) +#define HDMI_PORD_INT_STA BIT(1) +#define HDMI_HTPLG_INT_CLR BIT(16) +#define HDMI_PORD_INT_CLR BIT(17) +#define HDMI_FULL_INT_CLR BIT(20) + +struct mtk_cec { + void __iomem *regs; + struct clk *clk; + int irq; + bool hpd; + void (*hpd_event)(bool hpd, struct device *dev); + struct device *hdmi_dev; + spinlock_t lock; +}; + +static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset, + unsigned int bits) +{ + void __iomem *reg = cec->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp &= ~bits; + writel(tmp, reg); +} + +static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset, + unsigned int bits) +{ + void __iomem *reg = cec->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp |= bits; + writel(tmp, reg); +} + +static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset, + unsigned int val, unsigned int mask) +{ + u32 tmp = readl(cec->regs + offset) & ~mask; + + tmp |= val & mask; + writel(val, cec->regs + offset); +} + +void mtk_cec_set_hpd_event(struct device *dev, + void (*hpd_event)(bool hpd, struct device *dev), + struct device *hdmi_dev) +{ + struct mtk_cec *cec = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&cec->lock, flags); + cec->hdmi_dev = hdmi_dev; + cec->hpd_event = hpd_event; + spin_unlock_irqrestore(&cec->lock, flags); +} + +bool mtk_cec_hpd_high(struct device *dev) +{ + struct mtk_cec *cec = dev_get_drvdata(dev); + unsigned int status; + + status = readl(cec->regs + RX_EVENT); + + return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG); +} + +static void mtk_cec_htplg_irq_init(struct mtk_cec *cec) +{ + mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN); + mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); + mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR | + HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN | + RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN); +} + +static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec) +{ + mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN); +} + +static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec) +{ + mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN); +} + +static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec) +{ + mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ); + mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR | + HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR); + mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); + usleep_range(5, 10); + mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR | + HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR); + mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ); + mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); +} + +static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd) +{ + void (*hpd_event)(bool hpd, struct device *dev); + struct device *hdmi_dev; + unsigned long flags; + + spin_lock_irqsave(&cec->lock, flags); + hpd_event = cec->hpd_event; + hdmi_dev = cec->hdmi_dev; + spin_unlock_irqrestore(&cec->lock, flags); + + if (hpd_event) + hpd_event(hpd, hdmi_dev); +} + +static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg) +{ + struct device *dev = arg; + struct mtk_cec *cec = dev_get_drvdata(dev); + bool hpd; + + mtk_cec_clear_htplg_irq(cec); + hpd = mtk_cec_hpd_high(dev); + + if (cec->hpd != hpd) { + dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n", + cec->hpd, hpd); + cec->hpd = hpd; + mtk_cec_hpd_event(cec, hpd); + } + return IRQ_HANDLED; +} + +static int mtk_cec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_cec *cec; + struct resource *res; + int ret; + + cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + platform_set_drvdata(pdev, cec); + spin_lock_init(&cec->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cec->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(cec->regs)) { + ret = PTR_ERR(cec->regs); + dev_err(dev, "Failed to ioremap cec: %d\n", ret); + return ret; + } + + cec->clk = devm_clk_get(dev, NULL); + if (IS_ERR(cec->clk)) { + ret = PTR_ERR(cec->clk); + dev_err(dev, "Failed to get cec clock: %d\n", ret); + return ret; + } + + cec->irq = platform_get_irq(pdev, 0); + if (cec->irq < 0) { + dev_err(dev, "Failed to get cec irq: %d\n", cec->irq); + return cec->irq; + } + + ret = devm_request_threaded_irq(dev, cec->irq, NULL, + mtk_cec_htplg_isr_thread, + IRQF_SHARED | IRQF_TRIGGER_LOW | + IRQF_ONESHOT, "hdmi hpd", dev); + if (ret) { + dev_err(dev, "Failed to register cec irq: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(cec->clk); + if (ret) { + dev_err(dev, "Failed to enable cec clock: %d\n", ret); + return ret; + } + + mtk_cec_htplg_irq_init(cec); + mtk_cec_htplg_irq_enable(cec); + + return 0; +} + +static int mtk_cec_remove(struct platform_device *pdev) +{ + struct mtk_cec *cec = platform_get_drvdata(pdev); + + mtk_cec_htplg_irq_disable(cec); + clk_disable_unprepare(cec->clk); + return 0; +} + +static const struct of_device_id mtk_cec_of_ids[] = { + { .compatible = "mediatek,mt8173-cec", }, + {} +}; + +struct platform_driver mtk_cec_driver = { + .probe = mtk_cec_probe, + .remove = mtk_cec_remove, + .driver = { + .name = "mediatek-cec", + .of_match_table = mtk_cec_of_ids, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h new file mode 100644 index 000000000000..10057b7eabec --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_cec.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MTK_CEC_H +#define _MTK_CEC_H + +#include <linux/types.h> + +struct device; + +void mtk_cec_set_hpd_event(struct device *dev, + void (*hotplug_event)(bool hpd, struct device *dev), + struct device *hdmi_dev); +bool mtk_cec_hpd_high(struct device *dev); + +#endif /* _MTK_CEC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b1223d54d0ab..eebb7d881c2b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm, mutex_lock(&private->commit.lock); flush_work(&private->commit.work); - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (async) mtk_atomic_schedule(private, state); @@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = { .enable_vblank = mtk_drm_crtc_enable_vblank, .disable_vblank = mtk_drm_crtc_disable_vblank, - .gem_free_object = mtk_drm_gem_free_object, + .gem_free_object_unlocked = mtk_drm_gem_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = mtk_drm_gem_dumb_create, .dumb_map_offset = mtk_drm_gem_dumb_map_offset, @@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev) if (!drm) return -ENOMEM; - drm_dev_set_unique(drm, dev_name(dev)); - drm->dev_private = private; private->drm = drm; @@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; - ret = drm_connector_register_all(drm); - if (ret < 0) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(drm); err_deinit: mtk_drm_kms_deinit(drm); err_free: @@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev) struct drm_device *drm = private->drm; int i; - drm_connector_unregister_all(drm); drm_dev_unregister(drm); mtk_drm_kms_deinit(drm); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 51bc8988fc26..3995765a90dc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, return drm_plane_helper_check_update(plane, state->crtc, fb, &src, &dest, &clip, + state->rotation, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true, &visible); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 769559124562..28b2044ed9f2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(dsi->panel); } -static struct drm_encoder *mtk_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct mtk_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { .mode_fixup = mtk_dsi_encoder_mode_fixup, .mode_set = mtk_dsi_encoder_mode_set, @@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = { static const struct drm_connector_helper_funcs mtk_dsi_connector_helper_funcs = { .get_modes = mtk_dsi_connector_get_modes, - .best_encoder = mtk_dsi_connector_best_encoder, }; static int mtk_drm_attach_bridge(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c new file mode 100644 index 000000000000..334562d06731 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -0,0 +1,1828 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <linux/arm-smccc.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_graph.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <sound/hdmi-codec.h> +#include "mtk_cec.h" +#include "mtk_hdmi.h" +#include "mtk_hdmi_regs.h" + +#define NCTS_BYTES 7 + +enum mtk_hdmi_clk_id { + MTK_HDMI_CLK_HDMI_PIXEL, + MTK_HDMI_CLK_HDMI_PLL, + MTK_HDMI_CLK_AUD_BCLK, + MTK_HDMI_CLK_AUD_SPDIF, + MTK_HDMI_CLK_COUNT +}; + +enum hdmi_aud_input_type { + HDMI_AUD_INPUT_I2S = 0, + HDMI_AUD_INPUT_SPDIF, +}; + +enum hdmi_aud_i2s_fmt { + HDMI_I2S_MODE_RJT_24BIT = 0, + HDMI_I2S_MODE_RJT_16BIT, + HDMI_I2S_MODE_LJT_24BIT, + HDMI_I2S_MODE_LJT_16BIT, + HDMI_I2S_MODE_I2S_24BIT, + HDMI_I2S_MODE_I2S_16BIT +}; + +enum hdmi_aud_mclk { + HDMI_AUD_MCLK_128FS, + HDMI_AUD_MCLK_192FS, + HDMI_AUD_MCLK_256FS, + HDMI_AUD_MCLK_384FS, + HDMI_AUD_MCLK_512FS, + HDMI_AUD_MCLK_768FS, + HDMI_AUD_MCLK_1152FS, +}; + +enum hdmi_aud_channel_type { + HDMI_AUD_CHAN_TYPE_1_0 = 0, + HDMI_AUD_CHAN_TYPE_1_1, + HDMI_AUD_CHAN_TYPE_2_0, + HDMI_AUD_CHAN_TYPE_2_1, + HDMI_AUD_CHAN_TYPE_3_0, + HDMI_AUD_CHAN_TYPE_3_1, + HDMI_AUD_CHAN_TYPE_4_0, + HDMI_AUD_CHAN_TYPE_4_1, + HDMI_AUD_CHAN_TYPE_5_0, + HDMI_AUD_CHAN_TYPE_5_1, + HDMI_AUD_CHAN_TYPE_6_0, + HDMI_AUD_CHAN_TYPE_6_1, + HDMI_AUD_CHAN_TYPE_7_0, + HDMI_AUD_CHAN_TYPE_7_1, + HDMI_AUD_CHAN_TYPE_3_0_LRS, + HDMI_AUD_CHAN_TYPE_3_1_LRS, + HDMI_AUD_CHAN_TYPE_4_0_CLRS, + HDMI_AUD_CHAN_TYPE_4_1_CLRS, + HDMI_AUD_CHAN_TYPE_6_1_CS, + HDMI_AUD_CHAN_TYPE_6_1_CH, + HDMI_AUD_CHAN_TYPE_6_1_OH, + HDMI_AUD_CHAN_TYPE_6_1_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LH_RH, + HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_1_LC_RC, + HDMI_AUD_CHAN_TYPE_7_1_LW_RW, + HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_1_CS_CH, + HDMI_AUD_CHAN_TYPE_7_1_CS_OH, + HDMI_AUD_CHAN_TYPE_7_1_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_1_CH_OH, + HDMI_AUD_CHAN_TYPE_7_1_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_6_0_CS, + HDMI_AUD_CHAN_TYPE_6_0_CH, + HDMI_AUD_CHAN_TYPE_6_0_OH, + HDMI_AUD_CHAN_TYPE_6_0_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LH_RH, + HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_0_LC_RC, + HDMI_AUD_CHAN_TYPE_7_0_LW_RW, + HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_0_CS_CH, + HDMI_AUD_CHAN_TYPE_7_0_CS_OH, + HDMI_AUD_CHAN_TYPE_7_0_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_0_CH_OH, + HDMI_AUD_CHAN_TYPE_7_0_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS, + HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF +}; + +enum hdmi_aud_channel_swap_type { + HDMI_AUD_SWAP_LR, + HDMI_AUD_SWAP_LFE_CC, + HDMI_AUD_SWAP_LSRS, + HDMI_AUD_SWAP_RLS_RRS, + HDMI_AUD_SWAP_LR_STATUS, +}; + +struct hdmi_audio_param { + enum hdmi_audio_coding_type aud_codec; + enum hdmi_audio_sample_size aud_sampe_size; + enum hdmi_aud_input_type aud_input_type; + enum hdmi_aud_i2s_fmt aud_i2s_fmt; + enum hdmi_aud_mclk aud_mclk; + enum hdmi_aud_channel_type aud_input_chan_type; + struct hdmi_codec_params codec_params; +}; + +struct mtk_hdmi { + struct drm_bridge bridge; + struct drm_connector conn; + struct device *dev; + struct phy *phy; + struct device *cec_dev; + struct i2c_adapter *ddc_adpt; + struct clk *clk[MTK_HDMI_CLK_COUNT]; + struct drm_display_mode mode; + bool dvi_mode; + u32 min_clock; + u32 max_clock; + u32 max_hdisplay; + u32 max_vdisplay; + u32 ibias; + u32 ibias_up; + struct regmap *sys_regmap; + unsigned int sys_offset; + void __iomem *regs; + enum hdmi_colorspace csp; + struct hdmi_audio_param aud_param; + bool audio_enable; + bool powered; + bool enabled; +}; + +static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b) +{ + return container_of(b, struct mtk_hdmi, bridge); +} + +static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c) +{ + return container_of(c, struct mtk_hdmi, conn); +} + +static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset) +{ + return readl(hdmi->regs + offset); +} + +static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val) +{ + writel(val, hdmi->regs + offset); +} + +static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits) +{ + void __iomem *reg = hdmi->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp &= ~bits; + writel(tmp, reg); +} + +static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits) +{ + void __iomem *reg = hdmi->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp |= bits; + writel(tmp, reg); +} + +static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask) +{ + void __iomem *reg = hdmi->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp = (tmp & ~mask) | (val & mask); + writel(tmp, reg); +} + +static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black) +{ + mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH, + VIDEO_SOURCE_SEL); +} + +static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) +{ + struct arm_smccc_res res; + + /* + * MT8173 HDMI hardware has an output control bit to enable/disable HDMI + * output. This bit can only be controlled in ARM supervisor mode. + * The ARM trusted firmware provides an API for the HDMI driver to set + * this control bit to enable HDMI output in supervisor mode. + */ + arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000, + 0, 0, 0, 0, 0, &res); + + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0); +} + +static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI2P0_EN, enable ? 0 : HDMI2P0_EN); +} + +static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO); +} + +static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO); +} + +static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_RST, HDMI_RST); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_RST, 0); + mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + ANLG_ON, ANLG_ON); +} + +static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice) +{ + mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0, + CFG2_NOTICE_EN); +} + +static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask) +{ + mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask); +} + +static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable) +{ + mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI); +} + +static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer, + u8 len) +{ + u32 ctrl_reg = GRL_CTRL; + int i; + u8 *frame_data; + enum hdmi_infoframe_type frame_type; + u8 frame_ver; + u8 frame_len; + u8 checksum; + int ctrl_frame_en = 0; + + frame_type = *buffer; + buffer += 1; + frame_ver = *buffer; + buffer += 1; + frame_len = *buffer; + buffer += 1; + checksum = *buffer; + buffer += 1; + frame_data = buffer; + + dev_dbg(hdmi->dev, + "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n", + frame_type, frame_ver, frame_len, checksum); + + switch (frame_type) { + case HDMI_INFOFRAME_TYPE_AVI: + ctrl_frame_en = CTRL_AVI_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_SPD: + ctrl_frame_en = CTRL_SPD_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + ctrl_frame_en = CTRL_AUDIO_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + ctrl_frame_en = VS_EN; + ctrl_reg = GRL_ACP_ISRC_CTRL; + break; + } + mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en); + mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type); + mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver); + mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len); + + mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum); + for (i = 0; i < frame_len; i++) + mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]); + + mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en); +} + +static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable) +{ + mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF, + AUDIO_PACKET_OFF); +} + +static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0); + usleep_range(2000, 4000); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN); +} + +static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN, + COLOR_8BIT_MODE); +} + +static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE); + usleep_range(2000, 4000); + mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE); +} + +static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN, + CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET); + usleep_range(2000, 4000); + mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET, + CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET); +} + +static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on) +{ + mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT, + CTS_CTRL_SOFT); +} + +static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi, + bool enable) +{ + mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0, + NCTS_WRI_ANYTIME); +} + +static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE && + mode->clock == 74250 && + mode->vdisplay == 1080) + mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL); + else + mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL); +} + +static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi, + enum hdmi_aud_channel_swap_type swap) +{ + u8 swap_bit; + + switch (swap) { + case HDMI_AUD_SWAP_LR: + swap_bit = LR_SWAP; + break; + case HDMI_AUD_SWAP_LFE_CC: + swap_bit = LFE_CC_SWAP; + break; + case HDMI_AUD_SWAP_LSRS: + swap_bit = LSRS_SWAP; + break; + case HDMI_AUD_SWAP_RLS_RRS: + swap_bit = RLS_RRS_SWAP; + break; + case HDMI_AUD_SWAP_LR_STATUS: + swap_bit = LR_STATUS_SWAP; + break; + default: + swap_bit = LFE_CC_SWAP; + break; + } + mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff); +} + +static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi, + enum hdmi_audio_sample_size bit_num) +{ + u32 val; + + switch (bit_num) { + case HDMI_AUDIO_SAMPLE_SIZE_16: + val = AOUT_16BIT; + break; + case HDMI_AUDIO_SAMPLE_SIZE_20: + val = AOUT_20BIT; + break; + case HDMI_AUDIO_SAMPLE_SIZE_24: + case HDMI_AUDIO_SAMPLE_SIZE_STREAM: + val = AOUT_24BIT; + break; + } + + mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK); +} + +static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi, + enum hdmi_aud_i2s_fmt i2s_fmt) +{ + u32 val; + + val = mtk_hdmi_read(hdmi, GRL_CFG0); + val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK); + + switch (i2s_fmt) { + case HDMI_I2S_MODE_RJT_24BIT: + val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_RJT_16BIT: + val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT; + break; + case HDMI_I2S_MODE_LJT_24BIT: + default: + val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_LJT_16BIT: + val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT; + break; + case HDMI_I2S_MODE_I2S_24BIT: + val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_I2S_16BIT: + val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT; + break; + } + mtk_hdmi_write(hdmi, GRL_CFG0, val); +} + +static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst) +{ + const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL; + u8 val; + + /* Disable high bitrate, set DST packet normal/double */ + mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN); + + if (dst) + val = DST_NORMAL_DOUBLE | SACD_DST; + else + val = 0; + + mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask); +} + +static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi, + enum hdmi_aud_channel_type channel_type, + u8 channel_count) +{ + unsigned int ch_switch; + u8 i2s_uv; + + ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) | + CH_SWITCH(5, 5) | CH_SWITCH(4, 4) | + CH_SWITCH(3, 3) | CH_SWITCH(1, 2) | + CH_SWITCH(2, 1) | CH_SWITCH(0, 0); + + if (channel_count == 2) { + i2s_uv = I2S_UV_CH_EN(0); + } else if (channel_count == 3 || channel_count == 4) { + if (channel_count == 4 && + (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS || + channel_type == HDMI_AUD_CHAN_TYPE_4_0)) + i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0); + else + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2); + } else if (channel_count == 6 || channel_count == 5) { + if (channel_count == 6 && + channel_type != HDMI_AUD_CHAN_TYPE_5_1 && + channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) { + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) | + I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0); + } else { + i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) | + I2S_UV_CH_EN(0); + } + } else if (channel_count == 8 || channel_count == 7) { + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) | + I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0); + } else { + i2s_uv = I2S_UV_CH_EN(0); + } + + mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff); + mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff); + mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff); + mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv); +} + +static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi, + enum hdmi_aud_input_type input_type) +{ + u32 val; + + val = mtk_hdmi_read(hdmi, GRL_CFG1); + if (input_type == HDMI_AUD_INPUT_I2S && + (val & CFG1_SPDIF) == CFG1_SPDIF) { + val &= ~CFG1_SPDIF; + } else if (input_type == HDMI_AUD_INPUT_SPDIF && + (val & CFG1_SPDIF) == 0) { + val |= CFG1_SPDIF; + } + mtk_hdmi_write(hdmi, GRL_CFG1, val); +} + +static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi, + u8 *channel_status) +{ + int i; + + for (i = 0; i < 5; i++) { + mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]); + mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]); + mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]); + } + for (; i < 24; i++) { + mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0); + mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0); + } +} + +static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi) +{ + u32 val; + + val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL); + if (val & MIX_CTRL_SRC_EN) { + val &= ~MIX_CTRL_SRC_EN; + mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val); + usleep_range(255, 512); + val |= MIX_CTRL_SRC_EN; + mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val); + } +} + +static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi) +{ + u32 val; + + val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL); + val &= ~MIX_CTRL_SRC_EN; + mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val); + mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00); +} + +static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi, + enum hdmi_aud_mclk mclk) +{ + u32 val; + + val = mtk_hdmi_read(hdmi, GRL_CFG5); + val &= CFG5_CD_RATIO_MASK; + + switch (mclk) { + case HDMI_AUD_MCLK_128FS: + val |= CFG5_FS128; + break; + case HDMI_AUD_MCLK_256FS: + val |= CFG5_FS256; + break; + case HDMI_AUD_MCLK_384FS: + val |= CFG5_FS384; + break; + case HDMI_AUD_MCLK_512FS: + val |= CFG5_FS512; + break; + case HDMI_AUD_MCLK_768FS: + val |= CFG5_FS768; + break; + default: + val |= CFG5_FS256; + break; + } + mtk_hdmi_write(hdmi, GRL_CFG5, val); +} + +struct hdmi_acr_n { + unsigned int clock; + unsigned int n[3]; +}; + +/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */ +static const struct hdmi_acr_n hdmi_rec_n_table[] = { + /* Clock, N: 32kHz 44.1kHz 48kHz */ + { 25175, { 4576, 7007, 6864 } }, + { 74176, { 11648, 17836, 11648 } }, + { 148352, { 11648, 8918, 5824 } }, + { 296703, { 5824, 4459, 5824 } }, + { 297000, { 3072, 4704, 5120 } }, + { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */ +}; + +/** + * hdmi_recommended_n() - Return N value recommended by HDMI specification + * @freq: audio sample rate in Hz + * @clock: rounded TMDS clock in kHz + */ +static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock) +{ + const struct hdmi_acr_n *recommended; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) { + if (clock == hdmi_rec_n_table[i].clock) + break; + } + recommended = hdmi_rec_n_table + i; + + switch (freq) { + case 32000: + return recommended->n[0]; + case 44100: + return recommended->n[1]; + case 48000: + return recommended->n[2]; + case 88200: + return recommended->n[1] * 2; + case 96000: + return recommended->n[2] * 2; + case 176400: + return recommended->n[1] * 4; + case 192000: + return recommended->n[2] * 4; + default: + return (128 * freq) / 1000; + } +} + +static unsigned int hdmi_mode_clock_to_hz(unsigned int clock) +{ + switch (clock) { + case 25175: + return 25174825; /* 25.2/1.001 MHz */ + case 74176: + return 74175824; /* 74.25/1.001 MHz */ + case 148352: + return 148351648; /* 148.5/1.001 MHz */ + case 296703: + return 296703297; /* 297/1.001 MHz */ + default: + return clock * 1000; + } +} + +static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate, + unsigned int tmds_clock, unsigned int n) +{ + return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n, + 128 * audio_sample_rate); +} + +static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n, + unsigned int cts) +{ + unsigned char val[NCTS_BYTES]; + int i; + + mtk_hdmi_write(hdmi, GRL_NCTS, 0); + mtk_hdmi_write(hdmi, GRL_NCTS, 0); + mtk_hdmi_write(hdmi, GRL_NCTS, 0); + memset(val, 0, sizeof(val)); + + val[0] = (cts >> 24) & 0xff; + val[1] = (cts >> 16) & 0xff; + val[2] = (cts >> 8) & 0xff; + val[3] = cts & 0xff; + + val[4] = (n >> 16) & 0xff; + val[5] = (n >> 8) & 0xff; + val[6] = n & 0xff; + + for (i = 0; i < NCTS_BYTES; i++) + mtk_hdmi_write(hdmi, GRL_NCTS, val[i]); +} + +static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, + unsigned int sample_rate, + unsigned int clock) +{ + unsigned int n, cts; + + n = hdmi_recommended_n(sample_rate, clock); + cts = hdmi_expected_cts(sample_rate, clock, n); + + dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n", + __func__, sample_rate, clock, n, cts); + + mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64, + AUDIO_I2S_NCTS_SEL); + do_hdmi_hw_aud_set_ncts(hdmi, n, cts); +} + +static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type) +{ + switch (channel_type) { + case HDMI_AUD_CHAN_TYPE_1_0: + case HDMI_AUD_CHAN_TYPE_1_1: + case HDMI_AUD_CHAN_TYPE_2_0: + return 2; + case HDMI_AUD_CHAN_TYPE_2_1: + case HDMI_AUD_CHAN_TYPE_3_0: + return 3; + case HDMI_AUD_CHAN_TYPE_3_1: + case HDMI_AUD_CHAN_TYPE_4_0: + case HDMI_AUD_CHAN_TYPE_3_0_LRS: + return 4; + case HDMI_AUD_CHAN_TYPE_4_1: + case HDMI_AUD_CHAN_TYPE_5_0: + case HDMI_AUD_CHAN_TYPE_3_1_LRS: + case HDMI_AUD_CHAN_TYPE_4_0_CLRS: + return 5; + case HDMI_AUD_CHAN_TYPE_5_1: + case HDMI_AUD_CHAN_TYPE_6_0: + case HDMI_AUD_CHAN_TYPE_4_1_CLRS: + case HDMI_AUD_CHAN_TYPE_6_0_CS: + case HDMI_AUD_CHAN_TYPE_6_0_CH: + case HDMI_AUD_CHAN_TYPE_6_0_OH: + case HDMI_AUD_CHAN_TYPE_6_0_CHR: + return 6; + case HDMI_AUD_CHAN_TYPE_6_1: + case HDMI_AUD_CHAN_TYPE_6_1_CS: + case HDMI_AUD_CHAN_TYPE_6_1_CH: + case HDMI_AUD_CHAN_TYPE_6_1_OH: + case HDMI_AUD_CHAN_TYPE_6_1_CHR: + case HDMI_AUD_CHAN_TYPE_7_0: + case HDMI_AUD_CHAN_TYPE_7_0_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_0_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_0_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS: + return 7; + case HDMI_AUD_CHAN_TYPE_7_1: + case HDMI_AUD_CHAN_TYPE_7_1_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_1_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_1_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR: + return 8; + default: + return 2; + } +} + +static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock) +{ + unsigned long rate; + int ret; + + /* The DPI driver already should have set TVDPLL to the correct rate */ + ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock); + if (ret) { + dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock, + ret); + return ret; + } + + rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + + if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000)) + dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, + rate); + else + dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate); + + mtk_hdmi_hw_config_sys(hdmi); + mtk_hdmi_hw_set_deep_color_mode(hdmi); + return 0; +} + +static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + mtk_hdmi_hw_reset(hdmi); + mtk_hdmi_hw_enable_notice(hdmi, true); + mtk_hdmi_hw_write_int_mask(hdmi, 0xff); + mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode); + mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true); + + mtk_hdmi_hw_msic_setting(hdmi, mode); +} + +static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable) +{ + mtk_hdmi_hw_send_aud_packet(hdmi, enable); + return 0; +} + +static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on) +{ + mtk_hdmi_hw_ncts_enable(hdmi, on); + return 0; +} + +static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi) +{ + enum hdmi_aud_channel_type chan_type; + u8 chan_count; + bool dst; + + mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC); + mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF && + hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) { + mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24); + } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) { + hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT; + } + + mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt); + mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24); + + dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) && + (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST)); + mtk_hdmi_hw_audio_config(hdmi, dst); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) + chan_type = HDMI_AUD_CHAN_TYPE_2_0; + else + chan_type = hdmi->aud_param.aud_input_chan_type; + chan_count = mtk_hdmi_aud_get_chnl_count(chan_type); + mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count); + mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type); + + return 0; +} + +static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate; + + mtk_hdmi_aud_on_off_hw_ncts(hdmi, false); + mtk_hdmi_hw_aud_src_disable(hdmi); + mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) { + switch (sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + break; + default: + return -EINVAL; + } + mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk); + } else { + switch (sample_rate) { + case 32000: + case 44100: + case 48000: + break; + default: + return -EINVAL; + } + mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS); + } + + mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock); + + mtk_hdmi_hw_aud_src_reenable(hdmi); + return 0; +} + +static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + mtk_hdmi_hw_aud_mute(hdmi); + mtk_hdmi_aud_enable_packet(hdmi, false); + + mtk_hdmi_aud_set_input(hdmi); + mtk_hdmi_aud_set_src(hdmi, display_mode); + mtk_hdmi_hw_aud_set_channel_status(hdmi, + hdmi->aud_param.codec_params.iec.status); + + usleep_range(50, 100); + + mtk_hdmi_aud_on_off_hw_ncts(hdmi, true); + mtk_hdmi_aud_enable_packet(hdmi, true); + mtk_hdmi_hw_aud_unmute(hdmi); + return 0; +} + +static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + struct hdmi_avi_infoframe frame; + u8 buffer[17]; + ssize_t err; + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + dev_err(hdmi->dev, + "Failed to get AVI infoframe from mode: %zd\n", err); + return err; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi, + const char *vendor, + const char *product) +{ + struct hdmi_spd_infoframe frame; + u8 buffer[29]; + ssize_t err; + + err = hdmi_spd_infoframe_init(&frame, vendor, product); + if (err < 0) { + dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n", + err); + return err; + } + + err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi) +{ + struct hdmi_audio_infoframe frame; + u8 buffer[14]; + ssize_t err; + + err = hdmi_audio_infoframe_init(&frame); + if (err < 0) { + dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n", + err); + return err; + } + + frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; + frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; + frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; + frame.channels = mtk_hdmi_aud_get_chnl_count( + hdmi->aud_param.aud_input_chan_type); + + err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n", + err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + struct hdmi_vendor_infoframe frame; + u8 buffer[10]; + ssize_t err; + + err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode); + if (err) { + dev_err(hdmi->dev, + "Failed to get vendor infoframe from mode: %zd\n", err); + return err; + } + + err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err) { + dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", + err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi) +{ + struct hdmi_audio_param *aud_param = &hdmi->aud_param; + + hdmi->csp = HDMI_COLORSPACE_RGB; + aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; + aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; + aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + + return 0; +} + +void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_aud_enable_packet(hdmi, true); + hdmi->audio_enable = true; +} + +void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_aud_enable_packet(hdmi, false); + hdmi->audio_enable = false; +} + +int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi, + struct hdmi_audio_param *param) +{ + if (!hdmi->audio_enable) { + dev_err(hdmi->dev, "hdmi audio is in disable state!\n"); + return -EINVAL; + } + dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n", + param->aud_codec, param->aud_input_type, + param->aud_input_chan_type, param->codec_params.sample_rate); + memcpy(&hdmi->aud_param, param, sizeof(*param)); + return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode); +} + +static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + int ret; + + mtk_hdmi_hw_vid_black(hdmi, true); + mtk_hdmi_hw_aud_mute(hdmi); + mtk_hdmi_hw_send_av_mute(hdmi); + phy_power_off(hdmi->phy); + + ret = mtk_hdmi_video_change_vpll(hdmi, + mode->clock * 1000); + if (ret) { + dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret); + return ret; + } + mtk_hdmi_video_set_display_mode(hdmi, mode); + + phy_power_on(hdmi->phy); + mtk_hdmi_aud_output_config(hdmi, mode); + + mtk_hdmi_setup_audio_infoframe(hdmi); + mtk_hdmi_setup_avi_infoframe(hdmi, mode); + mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); + if (mode->flags & DRM_MODE_FLAG_3D_MASK) + mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); + + mtk_hdmi_hw_vid_black(hdmi, false); + mtk_hdmi_hw_aud_unmute(hdmi); + mtk_hdmi_hw_send_av_unmute(hdmi); + + return 0; +} + +static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = { + [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel", + [MTK_HDMI_CLK_HDMI_PLL] = "pll", + [MTK_HDMI_CLK_AUD_BCLK] = "bclk", + [MTK_HDMI_CLK_AUD_SPDIF] = "spdif", +}; + +static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi, + struct device_node *np) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) { + hdmi->clk[i] = of_clk_get_by_name(np, + mtk_hdmi_clk_names[i]); + if (IS_ERR(hdmi->clk[i])) + return PTR_ERR(hdmi->clk[i]); + } + return 0; +} + +static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi) +{ + int ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + if (ret) + return ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]); + if (ret) + goto err; + + return 0; +err: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + return ret; +} + +static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi) +{ + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]); +} + +static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn, + bool force) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); + + return mtk_cec_hpd_high(hdmi->cec_dev) ? + connector_status_connected : connector_status_disconnected; +} + +static void hdmi_conn_destroy(struct drm_connector *conn) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); + + mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL); + + drm_connector_cleanup(conn); +} + +static int mtk_hdmi_conn_get_modes(struct drm_connector *conn) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); + struct edid *edid; + int ret; + + if (!hdmi->ddc_adpt) + return -ENODEV; + + edid = drm_get_edid(conn, hdmi->ddc_adpt); + if (!edid) + return -ENODEV; + + hdmi->dvi_mode = !drm_detect_monitor_audio(edid); + + drm_mode_connector_update_edid_property(conn, edid); + + ret = drm_add_edid_modes(conn, edid); + drm_edid_to_eld(conn, edid); + kfree(edid); + return ret; +} + +static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn, + struct drm_display_mode *mode) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); + + dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", + mode->hdisplay, mode->vdisplay, mode->vrefresh, + !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000); + + if (hdmi->bridge.next) { + struct drm_display_mode adjusted_mode; + + drm_mode_copy(&adjusted_mode, mode); + if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode, + &adjusted_mode)) + return MODE_BAD; + } + + if (mode->clock < 27000) + return MODE_CLOCK_LOW; + if (mode->clock > 297000) + return MODE_CLOCK_HIGH; + + return drm_mode_validate_size(mode, 0x1fff, 0x1fff); +} + +static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn); + + return hdmi->bridge.encoder; +} + +static const struct drm_connector_funcs mtk_hdmi_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = hdmi_conn_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = hdmi_conn_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs + mtk_hdmi_connector_helper_funcs = { + .get_modes = mtk_hdmi_conn_get_modes, + .mode_valid = mtk_hdmi_conn_mode_valid, + .best_encoder = mtk_hdmi_conn_best_enc, +}; + +static void mtk_hdmi_hpd_event(bool hpd, struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) + drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev); +} + +/* + * Bridge callbacks + */ + +static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn, + &mtk_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret); + return ret; + } + drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs); + + hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD; + hdmi->conn.interlace_allowed = true; + hdmi->conn.doublescan_allowed = false; + + ret = drm_mode_connector_attach_encoder(&hdmi->conn, + bridge->encoder); + if (ret) { + dev_err(hdmi->dev, + "Failed to attach connector to encoder: %d\n", ret); + return ret; + } + + if (bridge->next) { + bridge->next->encoder = bridge->encoder; + ret = drm_bridge_attach(bridge->encoder->dev, bridge->next); + if (ret) { + dev_err(hdmi->dev, + "Failed to attach external bridge: %d\n", ret); + return ret; + } + } + + mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev); + + return 0; +} + +static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->enabled) + return; + + phy_power_off(hdmi->phy); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + + hdmi->enabled = false; +} + +static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->powered) + return; + + mtk_hdmi_hw_1p4_version_enable(hdmi, true); + mtk_hdmi_hw_make_reg_writable(hdmi, false); + + hdmi->powered = false; +} + +static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n", + adjusted_mode->name, adjusted_mode->hdisplay); + dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d", + adjusted_mode->hsync_start, adjusted_mode->hsync_end, + adjusted_mode->htotal); + dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n", + adjusted_mode->hskew, adjusted_mode->vdisplay); + dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d", + adjusted_mode->vsync_start, adjusted_mode->vsync_end, + adjusted_mode->vtotal); + dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n", + adjusted_mode->vscan, adjusted_mode->flags); + + drm_mode_copy(&hdmi->mode, adjusted_mode); +} + +static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + mtk_hdmi_hw_make_reg_writable(hdmi, true); + mtk_hdmi_hw_1p4_version_enable(hdmi, true); + + hdmi->powered = true; +} + +static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode); + clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); + phy_power_on(hdmi->phy); + + hdmi->enabled = true; +} + +static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = { + .attach = mtk_hdmi_bridge_attach, + .mode_fixup = mtk_hdmi_bridge_mode_fixup, + .disable = mtk_hdmi_bridge_disable, + .post_disable = mtk_hdmi_bridge_post_disable, + .mode_set = mtk_hdmi_bridge_mode_set, + .pre_enable = mtk_hdmi_bridge_pre_enable, + .enable = mtk_hdmi_bridge_enable, +}; + +static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *cec_np, *port, *ep, *remote, *i2c_np; + struct platform_device *cec_pdev; + struct regmap *regmap; + struct resource *mem; + int ret; + + ret = mtk_hdmi_get_all_clk(hdmi, np); + if (ret) { + dev_err(dev, "Failed to get clocks: %d\n", ret); + return ret; + } + + /* The CEC module handles HDMI hotplug detection */ + cec_np = of_find_compatible_node(np->parent, NULL, + "mediatek,mt8173-cec"); + if (!cec_np) { + dev_err(dev, "Failed to find CEC node\n"); + return -EINVAL; + } + + cec_pdev = of_find_device_by_node(cec_np); + if (!cec_pdev) { + dev_err(hdmi->dev, "Waiting for CEC device %s\n", + cec_np->full_name); + return -EPROBE_DEFER; + } + hdmi->cec_dev = &cec_pdev->dev; + + /* + * The mediatek,syscon-hdmi property contains a phandle link to the + * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG + * registers it contains. + */ + regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi"); + ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1, + &hdmi->sys_offset); + if (IS_ERR(regmap)) + ret = PTR_ERR(regmap); + if (ret) { + ret = PTR_ERR(regmap); + dev_err(dev, + "Failed to get system configuration registers: %d\n", + ret); + return ret; + } + hdmi->sys_regmap = regmap; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdmi->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + port = of_graph_get_port_by_id(np, 1); + if (!port) { + dev_err(dev, "Missing output port node\n"); + return -EINVAL; + } + + ep = of_get_child_by_name(port, "endpoint"); + if (!ep) { + dev_err(dev, "Missing endpoint node in port %s\n", + port->full_name); + of_node_put(port); + return -EINVAL; + } + of_node_put(port); + + remote = of_graph_get_remote_port_parent(ep); + if (!remote) { + dev_err(dev, "Missing connector/bridge node for endpoint %s\n", + ep->full_name); + of_node_put(ep); + return -EINVAL; + } + of_node_put(ep); + + if (!of_device_is_compatible(remote, "hdmi-connector")) { + hdmi->bridge.next = of_drm_find_bridge(remote); + if (!hdmi->bridge.next) { + dev_err(dev, "Waiting for external bridge\n"); + of_node_put(remote); + return -EPROBE_DEFER; + } + } + + i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0); + if (!i2c_np) { + dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n", + remote->full_name); + of_node_put(remote); + return -EINVAL; + } + of_node_put(remote); + + hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + if (!hdmi->ddc_adpt) { + dev_err(dev, "Failed to get ddc i2c adapter by node\n"); + return -EINVAL; + } + + return 0; +} + +/* + * HDMI audio codec callbacks + */ + +static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + struct hdmi_audio_param hdmi_params; + unsigned int chan = params->cea.channels; + + dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__, + params->sample_rate, params->sample_width, chan); + + if (!hdmi->bridge.encoder) + return -ENODEV; + + switch (chan) { + case 2: + hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + break; + case 4: + hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0; + break; + case 6: + hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1; + break; + case 8: + hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1; + break; + default: + dev_err(hdmi->dev, "channel[%d] not supported!\n", chan); + return -EINVAL; + } + + switch (params->sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + case 176400: + case 192000: + break; + default: + dev_err(hdmi->dev, "rate[%d] not supported!\n", + params->sample_rate); + return -EINVAL; + } + + switch (daifmt->fmt) { + case HDMI_I2S: + hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S; + hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS; + break; + default: + dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__, + daifmt->fmt); + return -EINVAL; + } + + memcpy(&hdmi_params.codec_params, params, + sizeof(hdmi_params.codec_params)); + + mtk_hdmi_audio_set_param(hdmi, &hdmi_params); + + return 0; +} + +static int mtk_hdmi_audio_startup(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + dev_dbg(dev, "%s\n", __func__); + + mtk_hdmi_audio_enable(hdmi); + + return 0; +} + +static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + dev_dbg(dev, "%s\n", __func__); + + mtk_hdmi_audio_disable(hdmi); +} + +int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + dev_dbg(dev, "%s(%d)\n", __func__, enable); + + if (enable) + mtk_hdmi_hw_aud_mute(hdmi); + else + mtk_hdmi_hw_aud_unmute(hdmi); + + return 0; +} + +static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + dev_dbg(dev, "%s\n", __func__); + + memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { + .hw_params = mtk_hdmi_audio_hw_params, + .audio_startup = mtk_hdmi_audio_startup, + .audio_shutdown = mtk_hdmi_audio_shutdown, + .digital_mute = mtk_hdmi_audio_digital_mute, + .get_eld = mtk_hdmi_audio_get_eld, +}; + +static void mtk_hdmi_register_audio_driver(struct device *dev) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &mtk_hdmi_audio_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + }; + struct platform_device *pdev; + + pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, &codec_data, + sizeof(codec_data)); + if (IS_ERR(pdev)) + return; + + DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); +} + +static int mtk_drm_hdmi_probe(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi; + struct device *dev = &pdev->dev; + int ret; + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->dev = dev; + + ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev); + if (ret) + return ret; + + hdmi->phy = devm_phy_get(dev, "hdmi"); + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + dev_err(dev, "Failed to get HDMI PHY: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, hdmi); + + ret = mtk_hdmi_output_init(hdmi); + if (ret) { + dev_err(dev, "Failed to initialize hdmi output\n"); + return ret; + } + + mtk_hdmi_register_audio_driver(dev); + + hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs; + hdmi->bridge.of_node = pdev->dev.of_node; + ret = drm_bridge_add(&hdmi->bridge); + if (ret) { + dev_err(dev, "failed to add bridge, ret = %d\n", ret); + return ret; + } + + ret = mtk_hdmi_clk_enable_audio(hdmi); + if (ret) { + dev_err(dev, "Failed to enable audio clocks: %d\n", ret); + goto err_bridge_remove; + } + + dev_dbg(dev, "mediatek hdmi probe success\n"); + return 0; + +err_bridge_remove: + drm_bridge_remove(&hdmi->bridge); + return ret; +} + +static int mtk_drm_hdmi_remove(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi = platform_get_drvdata(pdev); + + drm_bridge_remove(&hdmi->bridge); + mtk_hdmi_clk_disable_audio(hdmi); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_hdmi_suspend(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_clk_disable_audio(hdmi); + dev_dbg(dev, "hdmi suspend success!\n"); + return 0; +} + +static int mtk_hdmi_resume(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + int ret = 0; + + ret = mtk_hdmi_clk_enable_audio(hdmi); + if (ret) { + dev_err(dev, "hdmi resume failed!\n"); + return ret; + } + + dev_dbg(dev, "hdmi resume success!\n"); + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, + mtk_hdmi_suspend, mtk_hdmi_resume); + +static const struct of_device_id mtk_drm_hdmi_of_ids[] = { + { .compatible = "mediatek,mt8173-hdmi", }, + {} +}; + +static struct platform_driver mtk_hdmi_driver = { + .probe = mtk_drm_hdmi_probe, + .remove = mtk_drm_hdmi_remove, + .driver = { + .name = "mediatek-drm-hdmi", + .of_match_table = mtk_drm_hdmi_of_ids, + .pm = &mtk_hdmi_pm_ops, + }, +}; + +static struct platform_driver * const mtk_hdmi_drivers[] = { + &mtk_hdmi_phy_driver, + &mtk_hdmi_ddc_driver, + &mtk_cec_driver, + &mtk_hdmi_driver, +}; + +static int __init mtk_hdmitx_init(void) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) { + ret = platform_driver_register(mtk_hdmi_drivers[i]); + if (ret < 0) { + pr_err("Failed to register %s driver: %d\n", + mtk_hdmi_drivers[i]->driver.name, ret); + goto err; + } + } + + return 0; + +err: + while (--i >= 0) + platform_driver_unregister(mtk_hdmi_drivers[i]); + + return ret; +} + +static void __exit mtk_hdmitx_exit(void) +{ + int i; + + for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--) + platform_driver_unregister(mtk_hdmi_drivers[i]); +} + +module_init(mtk_hdmitx_init); +module_exit(mtk_hdmitx_exit); + +MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h new file mode 100644 index 000000000000..6371b3de1ff6 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MTK_HDMI_CTRL_H +#define _MTK_HDMI_CTRL_H + +struct platform_driver; + +extern struct platform_driver mtk_cec_driver; +extern struct platform_driver mtk_hdmi_ddc_driver; +extern struct platform_driver mtk_hdmi_phy_driver; + +#endif /* _MTK_HDMI_CTRL_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c new file mode 100644 index 000000000000..33c9e1bdb114 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#define SIF1_CLOK (288) +#define DDC_DDCMCTL0 (0x0) +#define DDCM_ODRAIN BIT(31) +#define DDCM_CLK_DIV_OFFSET (16) +#define DDCM_CLK_DIV_MASK (0xfff << 16) +#define DDCM_CS_STATUS BIT(4) +#define DDCM_SCL_STATE BIT(3) +#define DDCM_SDA_STATE BIT(2) +#define DDCM_SM0EN BIT(1) +#define DDCM_SCL_STRECH BIT(0) +#define DDC_DDCMCTL1 (0x4) +#define DDCM_ACK_OFFSET (16) +#define DDCM_ACK_MASK (0xff << 16) +#define DDCM_PGLEN_OFFSET (8) +#define DDCM_PGLEN_MASK (0x7 << 8) +#define DDCM_SIF_MODE_OFFSET (4) +#define DDCM_SIF_MODE_MASK (0x7 << 4) +#define DDCM_START (0x1) +#define DDCM_WRITE_DATA (0x2) +#define DDCM_STOP (0x3) +#define DDCM_READ_DATA_NO_ACK (0x4) +#define DDCM_READ_DATA_ACK (0x5) +#define DDCM_TRI BIT(0) +#define DDC_DDCMD0 (0x8) +#define DDCM_DATA3 (0xff << 24) +#define DDCM_DATA2 (0xff << 16) +#define DDCM_DATA1 (0xff << 8) +#define DDCM_DATA0 (0xff << 0) +#define DDC_DDCMD1 (0xc) +#define DDCM_DATA7 (0xff << 24) +#define DDCM_DATA6 (0xff << 16) +#define DDCM_DATA5 (0xff << 8) +#define DDCM_DATA4 (0xff << 0) + +struct mtk_hdmi_ddc { + struct i2c_adapter adap; + struct clk *clk; + void __iomem *regs; +}; + +static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + writel(readl(ddc->regs + offset) | val, ddc->regs + offset); +} + +static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset); +} + +static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + return (readl(ddc->regs + offset) & val) == val; +} + +static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int mask, unsigned int shift, + unsigned int val) +{ + unsigned int tmp; + + tmp = readl(ddc->regs + offset); + tmp &= ~mask; + tmp |= (val << shift) & mask; + writel(tmp, ddc->regs + offset); +} + +static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc, + unsigned int offset, unsigned int mask, + unsigned int shift) +{ + return (readl(ddc->regs + offset) & mask) >> shift; +} + +static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode) +{ + u32 val; + + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK, + DDCM_SIF_MODE_OFFSET, mode); + sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI); + readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val, + (val & DDCM_TRI) != DDCM_TRI, 4, 20000); +} + +static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg) +{ + struct device *dev = ddc->adap.dev.parent; + u32 remain_count, ack_count, ack_final, read_count, temp_count; + u32 index = 0; + u32 ack; + int i; + + ddcm_trigger_mode(ddc, DDCM_START); + sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01); + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET, + 0x00); + ddcm_trigger_mode(ddc, DDCM_WRITE_DATA); + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET); + dev_dbg(dev, "ack = 0x%x\n", ack); + if (ack != 0x01) { + dev_err(dev, "i2c ack err!\n"); + return -ENXIO; + } + + remain_count = msg->len; + ack_count = (msg->len - 1) / 8; + ack_final = 0; + + while (remain_count > 0) { + if (ack_count > 0) { + read_count = 8; + ack_final = 0; + ack_count--; + } else { + read_count = remain_count; + ack_final = 1; + } + + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, + DDCM_PGLEN_OFFSET, read_count - 1); + ddcm_trigger_mode(ddc, (ack_final == 1) ? + DDCM_READ_DATA_NO_ACK : + DDCM_READ_DATA_ACK); + + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, + DDCM_ACK_OFFSET); + temp_count = 0; + while (((ack & (1 << temp_count)) != 0) && (temp_count < 8)) + temp_count++; + if (((ack_final == 1) && (temp_count != (read_count - 1))) || + ((ack_final == 0) && (temp_count != read_count))) { + dev_err(dev, "Address NACK! ACK(0x%x)\n", ack); + break; + } + + for (i = read_count; i >= 1; i--) { + int shift; + int offset; + + if (i > 4) { + offset = DDC_DDCMD1; + shift = (i - 5) * 8; + } else { + offset = DDC_DDCMD0; + shift = (i - 1) * 8; + } + + msg->buf[index + i - 1] = sif_read_mask(ddc, offset, + 0xff << shift, + shift); + } + + remain_count -= read_count; + index += read_count; + } + + return 0; +} + +static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg) +{ + struct device *dev = ddc->adap.dev.parent; + u32 ack; + + ddcm_trigger_mode(ddc, DDCM_START); + sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1); + sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]); + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET, + 0x1); + ddcm_trigger_mode(ddc, DDCM_WRITE_DATA); + + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET); + dev_dbg(dev, "ack = %d\n", ack); + + if (ack != 0x03) { + dev_err(dev, "i2c ack err!\n"); + return -EIO; + } + + return 0; +} + +static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + struct mtk_hdmi_ddc *ddc = adapter->algo_data; + struct device *dev = adapter->dev.parent; + int ret; + int i; + + if (!ddc) { + dev_err(dev, "invalid arguments\n"); + return -EINVAL; + } + + sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH); + sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN); + sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN); + + if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) { + dev_err(dev, "ddc line is busy!\n"); + return -EBUSY; + } + + sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK, + DDCM_CLK_DIV_OFFSET, SIF1_CLOK); + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + + dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n", + msg->addr, msg->flags, msg->len); + + if (msg->flags & I2C_M_RD) + ret = mtk_hdmi_ddc_read_msg(ddc, msg); + else + ret = mtk_hdmi_ddc_write_msg(ddc, msg); + if (ret < 0) + goto xfer_end; + } + + ddcm_trigger_mode(ddc, DDCM_STOP); + + return i; + +xfer_end: + ddcm_trigger_mode(ddc, DDCM_STOP); + dev_err(dev, "ddc failed!\n"); + return ret; +} + +static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = { + .master_xfer = mtk_hdmi_ddc_xfer, + .functionality = mtk_hdmi_ddc_func, +}; + +static int mtk_hdmi_ddc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_hdmi_ddc *ddc; + struct resource *mem; + int ret; + + ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL); + if (!ddc) + return -ENOMEM; + + ddc->clk = devm_clk_get(dev, "ddc-i2c"); + if (IS_ERR(ddc->clk)) { + dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk); + return PTR_ERR(ddc->clk); + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddc->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(ddc->regs)) + return PTR_ERR(ddc->regs); + + ret = clk_prepare_enable(ddc->clk); + if (ret) { + dev_err(dev, "enable ddc clk failed!\n"); + return ret; + } + + strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name)); + ddc->adap.owner = THIS_MODULE; + ddc->adap.class = I2C_CLASS_DDC; + ddc->adap.algo = &mtk_hdmi_ddc_algorithm; + ddc->adap.retries = 3; + ddc->adap.dev.of_node = dev->of_node; + ddc->adap.algo_data = ddc; + ddc->adap.dev.parent = &pdev->dev; + + ret = i2c_add_adapter(&ddc->adap); + if (ret < 0) { + dev_err(dev, "failed to add bus to i2c core\n"); + goto err_clk_disable; + } + + platform_set_drvdata(pdev, ddc); + + dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap); + dev_dbg(dev, "ddc->clk: %p\n", ddc->clk); + dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start, + &mem->end); + + return 0; + +err_clk_disable: + clk_disable_unprepare(ddc->clk); + return ret; +} + +static int mtk_hdmi_ddc_remove(struct platform_device *pdev) +{ + struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev); + + i2c_del_adapter(&ddc->adap); + clk_disable_unprepare(ddc->clk); + + return 0; +} + +static const struct of_device_id mtk_hdmi_ddc_match[] = { + { .compatible = "mediatek,mt8173-hdmi-ddc", }, + {}, +}; + +struct platform_driver mtk_hdmi_ddc_driver = { + .probe = mtk_hdmi_ddc_probe, + .remove = mtk_hdmi_ddc_remove, + .driver = { + .name = "mediatek-hdmi-ddc", + .of_match_table = mtk_hdmi_ddc_match, + }, +}; + +MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMI DDC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h new file mode 100644 index 000000000000..a5cb07d12c9c --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MTK_HDMI_REGS_H +#define _MTK_HDMI_REGS_H + +#define GRL_INT_MASK 0x18 +#define GRL_IFM_PORT 0x188 +#define GRL_CH_SWAP 0x198 +#define LR_SWAP BIT(0) +#define LFE_CC_SWAP BIT(1) +#define LSRS_SWAP BIT(2) +#define RLS_RRS_SWAP BIT(3) +#define LR_STATUS_SWAP BIT(4) +#define GRL_I2S_C_STA0 0x140 +#define GRL_I2S_C_STA1 0x144 +#define GRL_I2S_C_STA2 0x148 +#define GRL_I2S_C_STA3 0x14C +#define GRL_I2S_C_STA4 0x150 +#define GRL_I2S_UV 0x154 +#define I2S_UV_V BIT(0) +#define I2S_UV_U BIT(1) +#define I2S_UV_CH_EN_MASK 0x3c +#define I2S_UV_CH_EN(x) BIT((x) + 2) +#define I2S_UV_TMDS_DEBUG BIT(6) +#define I2S_UV_NORMAL_INFO_INV BIT(7) +#define GRL_ACP_ISRC_CTRL 0x158 +#define VS_EN BIT(0) +#define ACP_EN BIT(1) +#define ISRC1_EN BIT(2) +#define ISRC2_EN BIT(3) +#define GAMUT_EN BIT(4) +#define GRL_CTS_CTRL 0x160 +#define CTS_CTRL_SOFT BIT(0) +#define GRL_INT 0x14 +#define INT_MDI BIT(0) +#define INT_HDCP BIT(1) +#define INT_FIFO_O BIT(2) +#define INT_FIFO_U BIT(3) +#define INT_IFM_ERR BIT(4) +#define INT_INF_DONE BIT(5) +#define INT_NCTS_DONE BIT(6) +#define INT_CTRL_PKT_DONE BIT(7) +#define GRL_INT_MASK 0x18 +#define GRL_CTRL 0x1C +#define CTRL_GEN_EN BIT(2) +#define CTRL_SPD_EN BIT(3) +#define CTRL_MPEG_EN BIT(4) +#define CTRL_AUDIO_EN BIT(5) +#define CTRL_AVI_EN BIT(6) +#define CTRL_AVMUTE BIT(7) +#define GRL_STATUS 0x20 +#define STATUS_HTPLG BIT(0) +#define STATUS_PORD BIT(1) +#define GRL_DIVN 0x170 +#define NCTS_WRI_ANYTIME BIT(6) +#define GRL_AUDIO_CFG 0x17C +#define AUDIO_ZERO BIT(0) +#define HIGH_BIT_RATE BIT(1) +#define SACD_DST BIT(2) +#define DST_NORMAL_DOUBLE BIT(3) +#define DSD_INV BIT(4) +#define LR_INV BIT(5) +#define LR_MIX BIT(6) +#define DSD_SEL BIT(7) +#define GRL_NCTS 0x184 +#define GRL_CH_SW0 0x18C +#define GRL_CH_SW1 0x190 +#define GRL_CH_SW2 0x194 +#define CH_SWITCH(from, to) ((from) << ((to) * 3)) +#define GRL_INFOFRM_VER 0x19C +#define GRL_INFOFRM_TYPE 0x1A0 +#define GRL_INFOFRM_LNG 0x1A4 +#define GRL_MIX_CTRL 0x1B4 +#define MIX_CTRL_SRC_EN BIT(0) +#define BYPASS_VOLUME BIT(1) +#define MIX_CTRL_FLAT BIT(7) +#define GRL_AOUT_CFG 0x1C4 +#define AOUT_BNUM_SEL_MASK 0x03 +#define AOUT_24BIT 0x00 +#define AOUT_20BIT 0x02 +#define AOUT_16BIT 0x03 +#define AOUT_FIFO_ADAP_CTRL BIT(6) +#define AOUT_BURST_PREAMBLE_EN BIT(7) +#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \ + AOUT_FIFO_ADAP_CTRL) +#define GRL_SHIFT_L1 0x1C0 +#define GRL_SHIFT_R2 0x1B0 +#define AUDIO_PACKET_OFF BIT(6) +#define GRL_CFG0 0x24 +#define CFG0_I2S_MODE_MASK 0x3 +#define CFG0_I2S_MODE_RTJ 0x1 +#define CFG0_I2S_MODE_LTJ 0x0 +#define CFG0_I2S_MODE_I2S 0x2 +#define CFG0_W_LENGTH_MASK 0x30 +#define CFG0_W_LENGTH_24BIT 0x00 +#define CFG0_W_LENGTH_16BIT 0x10 +#define GRL_CFG1 0x28 +#define CFG1_EDG_SEL BIT(0) +#define CFG1_SPDIF BIT(1) +#define CFG1_DVI BIT(2) +#define CFG1_HDCP_DEBUG BIT(3) +#define GRL_CFG2 0x2c +#define CFG2_MHL_DE_SEL BIT(3) +#define CFG2_MHL_FAKE_DE_SEL BIT(4) +#define CFG2_MHL_DATA_REMAP BIT(5) +#define CFG2_NOTICE_EN BIT(6) +#define CFG2_ACLK_INV BIT(7) +#define GRL_CFG3 0x30 +#define CFG3_AES_KEY_INDEX_MASK 0x3f +#define CFG3_CONTROL_PACKET_DELAY BIT(6) +#define CFG3_KSV_LOAD_START BIT(7) +#define GRL_CFG4 0x34 +#define CFG4_AES_KEY_LOAD BIT(4) +#define CFG4_AV_UNMUTE_EN BIT(5) +#define CFG4_AV_UNMUTE_SET BIT(6) +#define CFG4_MHL_MODE BIT(7) +#define GRL_CFG5 0x38 +#define CFG5_CD_RATIO_MASK 0x8F +#define CFG5_FS128 (0x1 << 4) +#define CFG5_FS256 (0x2 << 4) +#define CFG5_FS384 (0x3 << 4) +#define CFG5_FS512 (0x4 << 4) +#define CFG5_FS768 (0x6 << 4) +#define DUMMY_304 0x304 +#define CHMO_SEL (0x3 << 2) +#define CHM1_SEL (0x3 << 4) +#define CHM2_SEL (0x3 << 6) +#define AUDIO_I2S_NCTS_SEL BIT(1) +#define AUDIO_I2S_NCTS_SEL_64 (1 << 1) +#define AUDIO_I2S_NCTS_SEL_128 (0 << 1) +#define NEW_GCP_CTRL BIT(0) +#define NEW_GCP_CTRL_MERGE BIT(0) +#define GRL_L_STATUS_0 0x200 +#define GRL_L_STATUS_1 0x204 +#define GRL_L_STATUS_2 0x208 +#define GRL_L_STATUS_3 0x20c +#define GRL_L_STATUS_4 0x210 +#define GRL_L_STATUS_5 0x214 +#define GRL_L_STATUS_6 0x218 +#define GRL_L_STATUS_7 0x21c +#define GRL_L_STATUS_8 0x220 +#define GRL_L_STATUS_9 0x224 +#define GRL_L_STATUS_10 0x228 +#define GRL_L_STATUS_11 0x22c +#define GRL_L_STATUS_12 0x230 +#define GRL_L_STATUS_13 0x234 +#define GRL_L_STATUS_14 0x238 +#define GRL_L_STATUS_15 0x23c +#define GRL_L_STATUS_16 0x240 +#define GRL_L_STATUS_17 0x244 +#define GRL_L_STATUS_18 0x248 +#define GRL_L_STATUS_19 0x24c +#define GRL_L_STATUS_20 0x250 +#define GRL_L_STATUS_21 0x254 +#define GRL_L_STATUS_22 0x258 +#define GRL_L_STATUS_23 0x25c +#define GRL_R_STATUS_0 0x260 +#define GRL_R_STATUS_1 0x264 +#define GRL_R_STATUS_2 0x268 +#define GRL_R_STATUS_3 0x26c +#define GRL_R_STATUS_4 0x270 +#define GRL_R_STATUS_5 0x274 +#define GRL_R_STATUS_6 0x278 +#define GRL_R_STATUS_7 0x27c +#define GRL_R_STATUS_8 0x280 +#define GRL_R_STATUS_9 0x284 +#define GRL_R_STATUS_10 0x288 +#define GRL_R_STATUS_11 0x28c +#define GRL_R_STATUS_12 0x290 +#define GRL_R_STATUS_13 0x294 +#define GRL_R_STATUS_14 0x298 +#define GRL_R_STATUS_15 0x29c +#define GRL_R_STATUS_16 0x2a0 +#define GRL_R_STATUS_17 0x2a4 +#define GRL_R_STATUS_18 0x2a8 +#define GRL_R_STATUS_19 0x2ac +#define GRL_R_STATUS_20 0x2b0 +#define GRL_R_STATUS_21 0x2b4 +#define GRL_R_STATUS_22 0x2b8 +#define GRL_R_STATUS_23 0x2bc +#define GRL_ABIST_CTRL0 0x2D4 +#define GRL_ABIST_CTRL1 0x2D8 +#define ABIST_EN BIT(7) +#define ABIST_DATA_FMT (0x7 << 0) +#define VIDEO_CFG_0 0x380 +#define VIDEO_CFG_1 0x384 +#define VIDEO_CFG_2 0x388 +#define VIDEO_CFG_3 0x38c +#define VIDEO_CFG_4 0x390 +#define VIDEO_SOURCE_SEL BIT(7) +#define NORMAL_PATH (1 << 7) +#define GEN_RGB (0 << 7) + +#define HDMI_SYS_CFG1C 0x000 +#define HDMI_ON BIT(0) +#define HDMI_RST BIT(1) +#define ANLG_ON BIT(2) +#define CFG10_DVI BIT(3) +#define HDMI_TST BIT(3) +#define SYS_KEYMASK1 (0xff << 8) +#define SYS_KEYMASK2 (0xff << 16) +#define AUD_OUTSYNC_EN BIT(24) +#define AUD_OUTSYNC_PRE_EN BIT(25) +#define I2CM_ON BIT(26) +#define E2PROM_TYPE_8BIT BIT(27) +#define MCM_E2PROM_ON BIT(28) +#define EXT_E2PROM_ON BIT(29) +#define HTPLG_PIN_SEL_OFF BIT(30) +#define AES_EFUSE_ENABLE BIT(31) +#define HDMI_SYS_CFG20 0x004 +#define DEEP_COLOR_MODE_MASK (3 << 1) +#define COLOR_8BIT_MODE (0 << 1) +#define COLOR_10BIT_MODE (1 << 1) +#define COLOR_12BIT_MODE (2 << 1) +#define COLOR_16BIT_MODE (3 << 1) +#define DEEP_COLOR_EN BIT(0) +#define HDMI_AUDIO_TEST_SEL BIT(8) +#define HDMI2P0_EN BIT(11) +#define HDMI_OUT_FIFO_EN BIT(16) +#define HDMI_OUT_FIFO_CLK_INV BIT(17) +#define MHL_MODE_ON BIT(28) +#define MHL_PP_MODE BIT(29) +#define MHL_SYNC_AUTO_EN BIT(30) +#define HDMI_PCLK_FREE_RUN BIT(31) + +#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001 +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index cf8f38d39e10..1c366f8cb2d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) phy_set_drvdata(phy, mipi_tx); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy)) { + if (IS_ERR(phy_provider)) { ret = PTR_ERR(phy_provider); return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c new file mode 100644 index 000000000000..8a24754b440f --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -0,0 +1,515 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define HDMI_CON0 0x00 +#define RG_HDMITX_PLL_EN BIT(31) +#define RG_HDMITX_PLL_FBKDIV (0x7f << 24) +#define PLL_FBKDIV_SHIFT 24 +#define RG_HDMITX_PLL_FBKSEL (0x3 << 22) +#define PLL_FBKSEL_SHIFT 22 +#define RG_HDMITX_PLL_PREDIV (0x3 << 20) +#define PREDIV_SHIFT 20 +#define RG_HDMITX_PLL_POSDIV (0x3 << 18) +#define POSDIV_SHIFT 18 +#define RG_HDMITX_PLL_RST_DLY (0x3 << 16) +#define RG_HDMITX_PLL_IR (0xf << 12) +#define PLL_IR_SHIFT 12 +#define RG_HDMITX_PLL_IC (0xf << 8) +#define PLL_IC_SHIFT 8 +#define RG_HDMITX_PLL_BP (0xf << 4) +#define PLL_BP_SHIFT 4 +#define RG_HDMITX_PLL_BR (0x3 << 2) +#define PLL_BR_SHIFT 2 +#define RG_HDMITX_PLL_BC (0x3 << 0) +#define PLL_BC_SHIFT 0 +#define HDMI_CON1 0x04 +#define RG_HDMITX_PLL_DIVEN (0x7 << 29) +#define PLL_DIVEN_SHIFT 29 +#define RG_HDMITX_PLL_AUTOK_EN BIT(28) +#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26) +#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24) +#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23) +#define RG_HDMITX_PLL_BAND (0x3f << 16) +#define RG_HDMITX_PLL_REF_SEL BIT(15) +#define RG_HDMITX_PLL_BIAS_EN BIT(14) +#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13) +#define RG_HDMITX_PLL_TXDIV_EN BIT(12) +#define RG_HDMITX_PLL_TXDIV (0x3 << 10) +#define PLL_TXDIV_SHIFT 10 +#define RG_HDMITX_PLL_LVROD_EN BIT(9) +#define RG_HDMITX_PLL_MONVC_EN BIT(8) +#define RG_HDMITX_PLL_MONCK_EN BIT(7) +#define RG_HDMITX_PLL_MONREF_EN BIT(6) +#define RG_HDMITX_PLL_TST_EN BIT(5) +#define RG_HDMITX_PLL_TST_CK_EN BIT(4) +#define RG_HDMITX_PLL_TST_SEL (0xf << 0) +#define HDMI_CON2 0x08 +#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8) +#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1) +#define RG_HDMITX_EN_TX_CKLDO BIT(0) +#define HDMI_CON3 0x0c +#define RG_HDMITX_SER_EN (0xf << 28) +#define RG_HDMITX_PRD_EN (0xf << 24) +#define RG_HDMITX_PRD_IMP_EN (0xf << 20) +#define RG_HDMITX_DRV_EN (0xf << 16) +#define RG_HDMITX_DRV_IMP_EN (0xf << 12) +#define DRV_IMP_EN_SHIFT 12 +#define RG_HDMITX_MHLCK_FORCE BIT(10) +#define RG_HDMITX_MHLCK_PPIX_EN BIT(9) +#define RG_HDMITX_MHLCK_EN BIT(8) +#define RG_HDMITX_SER_DIN_SEL (0xf << 4) +#define RG_HDMITX_SER_5T1_BIST_EN BIT(3) +#define RG_HDMITX_SER_BIST_TOG BIT(2) +#define RG_HDMITX_SER_DIN_TOG BIT(1) +#define RG_HDMITX_SER_CLKDIG_INV BIT(0) +#define HDMI_CON4 0x10 +#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24) +#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16) +#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8) +#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0) +#define PRD_IBIAS_CLK_SHIFT 24 +#define PRD_IBIAS_D2_SHIFT 16 +#define PRD_IBIAS_D1_SHIFT 8 +#define PRD_IBIAS_D0_SHIFT 0 +#define HDMI_CON5 0x14 +#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24) +#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16) +#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8) +#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0) +#define DRV_IBIAS_CLK_SHIFT 24 +#define DRV_IBIAS_D2_SHIFT 16 +#define DRV_IBIAS_D1_SHIFT 8 +#define DRV_IBIAS_D0_SHIFT 0 +#define HDMI_CON6 0x18 +#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24) +#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16) +#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8) +#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0) +#define DRV_IMP_CLK_SHIFT 24 +#define DRV_IMP_D2_SHIFT 16 +#define DRV_IMP_D1_SHIFT 8 +#define DRV_IMP_D0_SHIFT 0 +#define HDMI_CON7 0x1c +#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27) +#define RG_HDMITX_SER_DIN (0x3ff << 16) +#define RG_HDMITX_CHLDC_TST (0xf << 12) +#define RG_HDMITX_CHLCK_TST (0xf << 8) +#define RG_HDMITX_RESERVE (0xff << 0) +#define HDMI_CON8 0x20 +#define RGS_HDMITX_2T1_LEV (0xf << 16) +#define RGS_HDMITX_2T1_EDG (0xf << 12) +#define RGS_HDMITX_5T1_LEV (0xf << 8) +#define RGS_HDMITX_5T1_EDG (0xf << 4) +#define RGS_HDMITX_PLUG_TST BIT(0) + +struct mtk_hdmi_phy { + void __iomem *regs; + struct device *dev; + struct clk *pll; + struct clk_hw pll_hw; + unsigned long pll_rate; + u8 drv_imp_clk; + u8 drv_imp_d2; + u8 drv_imp_d1; + u8 drv_imp_d0; + u32 ibias; + u32 ibias_up; +}; + +static const u8 PREDIV[3][4] = { + {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */ + {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */ + {0x1, 0x1, 0x1, 0x1} /* 148Mhz */ +}; + +static const u8 TXDIV[3][4] = { + {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */ + {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */ + {0x1, 0x0, 0x0, 0x0} /* 148Mhz */ +}; + +static const u8 FBKSEL[3][4] = { + {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */ + {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */ + {0x1, 0x0, 0x1, 0x1} /* 148Mhz */ +}; + +static const u8 FBKDIV[3][4] = { + {19, 24, 29, 19}, /* 27Mhz */ + {19, 24, 14, 19}, /* 74Mhz */ + {19, 24, 14, 19} /* 148Mhz */ +}; + +static const u8 DIVEN[3][4] = { + {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */ + {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */ + {0x2, 0x2, 0x2, 0x2} /* 148Mhz */ +}; + +static const u8 HTPLLBP[3][4] = { + {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */ + {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */ + {0xc, 0xf, 0xf, 0xc} /* 148Mhz */ +}; + +static const u8 HTPLLBC[3][4] = { + {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */ + {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */ + {0x2, 0x3, 0x3, 0x2} /* 148Mhz */ +}; + +static const u8 HTPLLBR[3][4] = { + {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */ + {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */ + {0x1, 0x2, 0x2, 0x1} /* 148Mhz */ +}; + +static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, + u32 bits) +{ + void __iomem *reg = hdmi_phy->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp &= ~bits; + writel(tmp, reg); +} + +static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, + u32 bits) +{ + void __iomem *reg = hdmi_phy->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp |= bits; + writel(tmp, reg); +} + +static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, + u32 val, u32 mask) +{ + void __iomem *reg = hdmi_phy->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp = (tmp & ~mask) | (val & mask); + writel(tmp, reg); +} + +static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw) +{ + return container_of(hw, struct mtk_hdmi_phy, pll_hw); +} + +static int mtk_hdmi_pll_prepare(struct clk_hw *hw) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + dev_dbg(hdmi_phy->dev, "%s\n", __func__); + + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN); + usleep_range(100, 150); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN); + usleep_range(100, 150); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); + + return 0; +} + +static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + dev_dbg(hdmi_phy->dev, "%s\n", __func__); + + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); + usleep_range(100, 150); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN); + usleep_range(100, 150); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); + usleep_range(100, 150); +} + +static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + unsigned int pre_div; + unsigned int div; + + dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, + rate, parent_rate); + + if (rate <= 27000000) { + pre_div = 0; + div = 3; + } else if (rate <= 74250000) { + pre_div = 1; + div = 2; + } else { + pre_div = 1; + div = 1; + } + + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, + (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, + (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT), + RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, + (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, + (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT), + RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, + (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, + (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) | + (0x1 << PLL_BR_SHIFT), + RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | + RG_HDMITX_PLL_BR); + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, + (0x3 << PRD_IBIAS_CLK_SHIFT) | + (0x3 << PRD_IBIAS_D2_SHIFT) | + (0x3 << PRD_IBIAS_D1_SHIFT) | + (0x3 << PRD_IBIAS_D0_SHIFT), + RG_HDMITX_PRD_IBIAS_CLK | + RG_HDMITX_PRD_IBIAS_D2 | + RG_HDMITX_PRD_IBIAS_D1 | + RG_HDMITX_PRD_IBIAS_D0); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, + (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, + (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | + (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | + (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) | + (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT), + RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | + RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); + mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, + (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | + (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | + (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | + (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), + RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | + RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); + return 0; +} + +static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + hdmi_phy->pll_rate = rate; + if (rate <= 74250000) + *parent_rate = rate; + else + *parent_rate = rate / 2; + + return rate; +} + +static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + return hdmi_phy->pll_rate; +} + +static const struct clk_ops mtk_hdmi_pll_ops = { + .prepare = mtk_hdmi_pll_prepare, + .unprepare = mtk_hdmi_pll_unprepare, + .set_rate = mtk_hdmi_pll_set_rate, + .round_rate = mtk_hdmi_pll_round_rate, + .recalc_rate = mtk_hdmi_pll_recalc_rate, +}; + +static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) +{ + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, + RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN | + RG_HDMITX_DRV_EN); + usleep_range(100, 150); +} + +static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) +{ + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, + RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN | + RG_HDMITX_SER_EN); +} + +static int mtk_hdmi_phy_power_on(struct phy *phy) +{ + struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy); + int ret; + + ret = clk_prepare_enable(hdmi_phy->pll); + if (ret < 0) + return ret; + + mtk_hdmi_phy_enable_tmds(hdmi_phy); + + return 0; +} + +static int mtk_hdmi_phy_power_off(struct phy *phy) +{ + struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy); + + mtk_hdmi_phy_disable_tmds(hdmi_phy); + clk_disable_unprepare(hdmi_phy->pll); + + return 0; +} + +static const struct phy_ops mtk_hdmi_phy_ops = { + .power_on = mtk_hdmi_phy_power_on, + .power_off = mtk_hdmi_phy_power_off, + .owner = THIS_MODULE, +}; + +static int mtk_hdmi_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_hdmi_phy *hdmi_phy; + struct resource *mem; + struct clk *ref_clk; + const char *ref_clk_name; + struct clk_init_data clk_init = { + .ops = &mtk_hdmi_pll_ops, + .num_parents = 1, + .parent_names = (const char * const *)&ref_clk_name, + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, + }; + struct phy *phy; + struct phy_provider *phy_provider; + int ret; + + hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL); + if (!hdmi_phy) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdmi_phy->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(hdmi_phy->regs)) { + ret = PTR_ERR(hdmi_phy->regs); + dev_err(dev, "Failed to get memory resource: %d\n", ret); + return ret; + } + + ref_clk = devm_clk_get(dev, "pll_ref"); + if (IS_ERR(ref_clk)) { + ret = PTR_ERR(ref_clk); + dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n", + ret); + return ret; + } + ref_clk_name = __clk_get_name(ref_clk); + + ret = of_property_read_string(dev->of_node, "clock-output-names", + &clk_init.name); + if (ret < 0) { + dev_err(dev, "Failed to read clock-output-names: %d\n", ret); + return ret; + } + + hdmi_phy->pll_hw.init = &clk_init; + hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); + if (IS_ERR(hdmi_phy->pll)) { + ret = PTR_ERR(hdmi_phy->pll); + dev_err(dev, "Failed to register PLL: %d\n", ret); + return ret; + } + + ret = of_property_read_u32(dev->of_node, "mediatek,ibias", + &hdmi_phy->ibias); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret); + return ret; + } + + ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up", + &hdmi_phy->ibias_up); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret); + return ret; + } + + dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n"); + hdmi_phy->drv_imp_clk = 0x30; + hdmi_phy->drv_imp_d2 = 0x30; + hdmi_phy->drv_imp_d1 = 0x30; + hdmi_phy->drv_imp_d0 = 0x30; + + phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "Failed to create HDMI PHY\n"); + return PTR_ERR(phy); + } + phy_set_drvdata(phy, hdmi_phy); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + hdmi_phy->dev = dev; + return of_clk_add_provider(dev->of_node, of_clk_src_simple_get, + hdmi_phy->pll); +} + +static int mtk_hdmi_phy_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id mtk_hdmi_phy_match[] = { + { .compatible = "mediatek,mt8173-hdmi-phy", }, + {}, +}; + +struct platform_driver mtk_hdmi_phy_driver = { + .probe = mtk_hdmi_phy_probe, + .remove = mtk_hdmi_phy_remove, + .driver = { + .name = "mediatek-hdmi-phy", + .of_match_table = mtk_hdmi_phy_match, + }, +}; + +MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index 3a1c5fbae54a..520e5e668d6c 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -1,11 +1,7 @@ config DRM_MGAG200 tristate "Kernel modesetting driver for MGA G200 server engines" depends on DRM && PCI - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM help This is a KMS driver for the MGA G200 server chips, it diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index ebb470ff7200..2b4b125eebc3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -101,7 +101,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = mgag200_gem_free_object, + .gem_free_object_unlocked = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 615cbb08ba29..13798b3e6beb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -17,8 +17,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); - if (mga_fb->obj) - drm_gem_object_unreference_unlocked(mga_fb->obj); + + drm_gem_object_unreference_unlocked(mga_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d347dca17267..6b21cb27e1cc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct mga_crtc *mga_crtc = to_mga_crtc(crtc); - int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size; int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { mga_crtc->lut_r[i] = red[i] >> 8; mga_crtc->lut_g[i] = green[i] >> 8; mga_crtc->lut_b[i] = blue[i] >> 8; } mga_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 9d5083d0f1ee..68268e55d595 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r { } -static int mgag200_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void mgag200_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = { .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, .init_mem_type = mgag200_bo_init_mem_type, .evict_flags = mgag200_bo_evict_flags, - .move = mgag200_bo_move, + .move = NULL, .verify_access = mgag200_bo_verify_access, .io_mem_reserve = &mgag200_ttm_io_mem_reserve, .io_mem_free = &mgag200_ttm_io_mem_free, diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 167a4971f47c..7c7a0314a756 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -10,6 +10,7 @@ config DRM_MSM select SHMEM select TMPFS select QCOM_SCM + select SND_SOC_HDMI_CODEC if SND_SOC default y help DRM/KMS driver for MSM/snapdragon. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 60cb02624dc0..4e2806cf778c 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -35,6 +35,7 @@ msm-y := \ mdp/mdp5/mdp5_crtc.o \ mdp/mdp5/mdp5_encoder.o \ mdp/mdp5/mdp5_irq.o \ + mdp/mdp5/mdp5_mdss.o \ mdp/mdp5/mdp5_kms.o \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ @@ -45,6 +46,7 @@ msm-y := \ msm_fence.o \ msm_gem.o \ msm_gem_prime.o \ + msm_gem_shrinker.o \ msm_gem_submit.o \ msm_gpu.o \ msm_iommu.o \ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2aec27dbb5bb..f386f463278d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = gpu->rb; - unsigned i, ibs = 0; + unsigned i; for (i = 0; i < submit->nr_cmds; i++) { switch (submit->cmd[i].type) { @@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); OUT_RING(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].size); - ibs++; + OUT_PKT2(ring); break; } } - /* on a320, at least, we seem to need to pad things out to an - * even number of qwords to avoid issue w/ CP hanging on wrap- - * around: - */ - if (ibs % 2) - OUT_PKT2(ring); - OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); OUT_RING(ring, submit->fence->seqno); @@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); + adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo); if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; @@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *gpu) { if (gpu->memptrs_bo) { + if (gpu->memptrs) + msm_gem_put_vaddr(gpu->memptrs_bo); + if (gpu->memptrs_iova) msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } release_firmware(gpu->pm4); diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 6edcd6f57e70..ec572f8389ed 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) struct platform_device *phy_pdev; struct device_node *phy_node; - phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0); + phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); if (!phy_node) { dev_err(&pdev->dev, "cannot find phy device\n"); return -ENXIO; diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 93c1ee094eac..63436d8ee470 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = { }, .bus_clk_names = dsi_v2_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), + .io_start = { 0x4700000, 0x5800000 }, + .num_dsi = 2, }; static const char * const dsi_6g_bus_clk_names[] = { @@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { }, .bus_clk_names = dsi_6g_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { 0xfd922800, 0xfd922b00 }, + .num_dsi = 2, }; static const char * const dsi_8916_bus_clk_names[] = { @@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { }, .bus_clk_names = dsi_8916_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), + .io_start = { 0x1a98000 }, + .num_dsi = 1, }; static const struct msm_dsi_config msm8994_dsi_cfg = { @@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { }, .bus_clk_names = dsi_6g_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { 0xfd998000, 0xfd9a0000 }, + .num_dsi = 2, }; static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index a68c836744a3..eeacc3232494 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -34,6 +34,8 @@ struct msm_dsi_config { struct dsi_reg_config reg_cfg; const char * const *bus_clk_names; const int num_bus_clks; + const resource_size_t io_start[DSI_MAX]; + const int num_dsi; }; struct msm_dsi_cfg_handler { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index a3e47ad83eb3..f05ed0e1f3d6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, } if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { - data = msm_gem_vaddr(msm_host->tx_gem_obj); + data = msm_gem_get_vaddr(msm_host->tx_gem_obj); if (IS_ERR(data)) { ret = PTR_ERR(data); pr_err("%s: get vaddr failed, %d\n", __func__, ret); @@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, if (packet.size < len) memset(data + packet.size, 0xff, len - packet.size); + if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) + msm_gem_put_vaddr(msm_host->tx_gem_obj); + return len; } @@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, u32 lane_map[4]; int ret, i, len, num_lanes; - prop = of_find_property(ep, "qcom,data-lane-map", &len); + prop = of_find_property(ep, "data-lanes", &len); if (!prop) { dev_dbg(dev, "failed to find data lane mapping\n"); return -EINVAL; @@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, msm_host->num_data_lanes = num_lanes; - ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map, + ret = of_property_read_u32_array(ep, "data-lanes", lane_map, num_lanes); if (ret) { dev_err(dev, "failed to read lane data\n"); @@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, const int *swap = supported_data_lane_swaps[i]; int j; + /* + * the data-lanes array we get from DT has a logical->physical + * mapping. The "data lane swap" register field represents + * supported configurations in a physical->logical mapping. + * Translate the DT mapping to what we understand and find a + * configuration that works. + */ for (j = 0; j < num_lanes; j++) { - if (swap[j] != lane_map[j]) + if (lane_map[j] < 0 || lane_map[j] > 3) + dev_err(dev, "bad physical lane entry %u\n", + lane_map[j]); + + if (swap[lane_map[j]] != j) break; } @@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) struct device_node *endpoint, *device_node; int ret; - ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id); - if (ret) { - dev_err(dev, "%s: host index not specified, ret=%d\n", - __func__, ret); - return ret; - } - /* - * Get the first endpoint node. In our case, dsi has one output port - * to which the panel is connected. Don't return an error if a port - * isn't defined. It's possible that there is nothing connected to - * the dsi output. + * Get the endpoint of the output port of the DSI host. In our case, + * this is mapped to port number with reg = 1. Don't return an error if + * the remote endpoint isn't defined. It's possible that there is + * nothing connected to the dsi output. */ - endpoint = of_graph_get_next_endpoint(np, NULL); + endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); if (!endpoint) { dev_dbg(dev, "%s: no endpoint\n", __func__); return 0; @@ -1648,6 +1655,25 @@ err: return ret; } +static int dsi_host_get_id(struct msm_dsi_host *msm_host) +{ + struct platform_device *pdev = msm_host->pdev; + const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; + struct resource *res; + int i; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl"); + if (!res) + return -EINVAL; + + for (i = 0; i < cfg->num_dsi; i++) { + if (cfg->io_start[i] == res->start) + return i; + } + + return -EINVAL; +} + int msm_dsi_host_init(struct msm_dsi *msm_dsi) { struct msm_dsi_host *msm_host = NULL; @@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } + msm_host->id = dsi_host_get_id(msm_host); + if (msm_host->id < 0) { + ret = msm_host->id; + pr_err("%s: unable to identify DSI host index\n", __func__); + goto fail; + } + /* fixup base address by io offset */ msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; @@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, } msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); - if (IS_ERR(msm_host->mode)) { + if (!msm_host->mode) { pr_err("%s: cannot duplicate mode\n", __func__); - return PTR_ERR(msm_host->mode); + return -ENOMEM; } return 0; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index e2f42d8ea294..f39386ed75e4 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = { {} }; +/* + * Currently, we only support one SoC for each PHY type. When we have multiple + * SoCs for the same PHY, we can try to make the index searching a bit more + * clever. + */ +static int dsi_phy_get_id(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + const struct msm_dsi_phy_cfg *cfg = phy->cfg; + struct resource *res; + int i; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy"); + if (!res) + return -EINVAL; + + for (i = 0; i < cfg->num_dsi_phy; i++) { + if (cfg->io_start[i] == res->start) + return i; + } + + return -EINVAL; +} + static int dsi_phy_driver_probe(struct platform_device *pdev) { struct msm_dsi_phy *phy; @@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->cfg = match->data; phy->pdev = pdev; - ret = of_property_read_u32(dev->of_node, - "qcom,dsi-phy-index", &phy->id); - if (ret) { - dev_err(dev, "%s: PHY index not specified, %d\n", + phy->id = dsi_phy_get_id(phy); + if (phy->id < 0) { + ret = phy->id; + dev_err(dev, "%s: couldn't identify PHY index, %d\n", __func__, ret); goto fail; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 0d54ed00386d..f24a85439b94 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg { * Fill default H/W values in illegal cells, eg. cell {0, 1}. */ bool src_pll_truthtable[DSI_MAX][DSI_MAX]; + const resource_size_t io_start[DSI_MAX]; + const int num_dsi_phy; }; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index f4bc11af849a..c757e2070cac 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .ops = { .enable = dsi_20nm_phy_enable, .disable = dsi_20nm_phy_disable, - } + }, + .io_start = { 0xfd998300, 0xfd9a0300 }, + .num_dsi_phy = 2, }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 96d1852af418..63d7fba31380 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, }, + .io_start = { 0xfd922b00, 0xfd923100 }, + .num_dsi_phy = 2, }; const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { @@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, }, + .io_start = { 0x1a98500 }, + .num_dsi_phy = 1, }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 213355a3e767..7bdb9de54968 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, }, + .io_start = { 0x4700300, 0x5800300 }, + .num_dsi_phy = 2, }; diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index 72360cd038c0..5960628ceb93 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -edp_connector_best_encoder(struct drm_connector *connector) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - - DBG(""); - return edp_connector->edp->encoder; -} - static const struct drm_connector_funcs edp_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = edp_connector_detect, @@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = { static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { .get_modes = edp_connector_get_modes, .mode_valid = edp_connector_mode_valid, - .best_encoder = edp_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 51b9ea552f97..973720792236 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -19,6 +19,7 @@ #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <sound/hdmi-codec.h> #include "hdmi.h" void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) @@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name) return gpio; } +/* + * HDMI audio codec callbacks + */ +static int msm_hdmi_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct hdmi *hdmi = dev_get_drvdata(dev); + unsigned int chan; + unsigned int channel_allocation = 0; + unsigned int rate; + unsigned int level_shift = 0; /* 0dB */ + bool down_mix = false; + + dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate, + params->sample_width, params->cea.channels); + + switch (params->cea.channels) { + case 2: + /* FR and FL speakers */ + channel_allocation = 0; + chan = MSM_HDMI_AUDIO_CHANNEL_2; + break; + case 4: + /* FC, LFE, FR and FL speakers */ + channel_allocation = 0x3; + chan = MSM_HDMI_AUDIO_CHANNEL_4; + break; + case 6: + /* RR, RL, FC, LFE, FR and FL speakers */ + channel_allocation = 0x0B; + chan = MSM_HDMI_AUDIO_CHANNEL_6; + break; + case 8: + /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */ + channel_allocation = 0x1F; + chan = MSM_HDMI_AUDIO_CHANNEL_8; + break; + default: + return -EINVAL; + } + + switch (params->sample_rate) { + case 32000: + rate = HDMI_SAMPLE_RATE_32KHZ; + break; + case 44100: + rate = HDMI_SAMPLE_RATE_44_1KHZ; + break; + case 48000: + rate = HDMI_SAMPLE_RATE_48KHZ; + break; + case 88200: + rate = HDMI_SAMPLE_RATE_88_2KHZ; + break; + case 96000: + rate = HDMI_SAMPLE_RATE_96KHZ; + break; + case 176400: + rate = HDMI_SAMPLE_RATE_176_4KHZ; + break; + case 192000: + rate = HDMI_SAMPLE_RATE_192KHZ; + break; + default: + dev_err(dev, "rate[%d] not supported!\n", + params->sample_rate); + return -EINVAL; + } + + msm_hdmi_audio_set_sample_rate(hdmi, rate); + msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation, + level_shift, down_mix); + + return 0; +} + +static void msm_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct hdmi *hdmi = dev_get_drvdata(dev); + + msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0); +} + +static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = { + .hw_params = msm_hdmi_audio_hw_params, + .audio_shutdown = msm_hdmi_audio_shutdown, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &msm_hdmi_audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, +}; + +static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev) +{ + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(hdmi->audio_pdev); +} + static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); @@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; - int i; + int i, err; hdmi_cfg = (struct hdmi_platform_config *) of_device_get_match_data(dev); @@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(hdmi); priv->hdmi = hdmi; + err = msm_hdmi_register_audio_driver(hdmi, dev); + if (err) { + DRM_ERROR("Failed to attach an audio codec %d\n", err); + hdmi->audio_pdev = NULL; + } + return 0; } @@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; if (priv->hdmi) { + if (priv->hdmi->audio_pdev) + platform_device_unregister(priv->hdmi->audio_pdev); + msm_hdmi_destroy(priv->hdmi); priv->hdmi = NULL; } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index bc7ba0bdee07..accc9a61611d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl; struct hdmi { struct drm_device *dev; struct platform_device *pdev; + struct platform_device *audio_pdev; const struct hdmi_platform_config *config; @@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev) /* * audio: */ +/* Supported HDMI Audio channels and rates */ +#define MSM_HDMI_AUDIO_CHANNEL_2 0 +#define MSM_HDMI_AUDIO_CHANNEL_4 1 +#define MSM_HDMI_AUDIO_CHANNEL_6 2 +#define MSM_HDMI_AUDIO_CHANNEL_8 3 + +#define HDMI_SAMPLE_RATE_32KHZ 0 +#define HDMI_SAMPLE_RATE_44_1KHZ 1 +#define HDMI_SAMPLE_RATE_48KHZ 2 +#define HDMI_SAMPLE_RATE_88_2KHZ 3 +#define HDMI_SAMPLE_RATE_96KHZ 4 +#define HDMI_SAMPLE_RATE_176_4KHZ 5 +#define HDMI_SAMPLE_RATE_192KHZ 6 int msm_hdmi_audio_update(struct hdmi *hdmi); int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b15d72683112..a2515b466ce5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, return 0; } -static struct drm_encoder * -msm_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - return hdmi_connector->hdmi->encoder; -} - static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = hdmi_connector_detect, @@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { .get_modes = msm_hdmi_connector_get_modes, .mode_valid = msm_hdmi_connector_mode_valid, - .best_encoder = msm_hdmi_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c index 0baaaaabd002..6e767979aab3 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi) void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) { - if (hdmi && hdmi->hdcp_ctrl) { + if (hdmi) { kfree(hdmi->hdcp_ctrl); hdmi->hdcp_ctrl = NULL; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 35ad78a1dc1c..24258e3025e3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -23,7 +23,6 @@ struct mdp4_dtv_encoder { struct drm_encoder base; - struct clk *src_clk; struct clk *hdmi_clk; struct clk *mdp_clk; unsigned long int pixclock; @@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) */ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); - clk_disable_unprepare(mdp4_dtv_encoder->src_clk); clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); @@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) bs_set(mdp4_dtv_encoder, 1); - DBG("setting src_clk=%lu", pc); + DBG("setting mdp_clk=%lu", pc); - ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); + ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); if (ret) - dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); - clk_prepare_enable(mdp4_dtv_encoder->src_clk); - ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); - if (ret) - dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n", + pc, ret); + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); if (ret) dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); mdp4_dtv_encoder->enabled = true; @@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) { struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - return clk_round_rate(mdp4_dtv_encoder->src_clk, rate); + return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate); } /* initialize encoder */ @@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); - mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk"); - if (IS_ERR(mdp4_dtv_encoder->src_clk)) { - dev_err(dev->dev, "failed to get src_clk\n"); - ret = PTR_ERR(mdp4_dtv_encoder->src_clk); - goto fail; - } - mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { dev_err(dev->dev, "failed to get hdmi_clk\n"); @@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) goto fail; } - mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk"); + mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { - dev_err(dev->dev, "failed to get mdp_clk\n"); + dev_err(dev->dev, "failed to get tv_clk\n"); ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); goto fail; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 67442d50a6c2..7b39e89fbc2b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -106,31 +106,27 @@ out: static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; mdp4_enable(mdp4_kms); /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_get(crtc); - } } static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_put(crtc); - } mdp4_disable(mdp4_kms); } @@ -162,6 +158,7 @@ static const char * const iommu_ports[] = { static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct device *dev = mdp4_kms->dev->dev; struct msm_mmu *mmu = mdp4_kms->mmu; if (mmu) { @@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); - if (mdp4_kms->blank_cursor_bo) - drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + + if (mdp4_kms->rpm_enabled) + pm_runtime_disable(dev); + kfree(mdp4_kms); } @@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; struct msm_mmu *mmu; - int ret; + int irq, ret; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); if (!mdp4_kms) { @@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + dev_err(dev->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + kms->irq = irq; + /* NOTE: driver for this regulator still missing upstream.. use * _get_exclusive() and ignore the error if it does not exist * (and hope that the bootloader left it on for us) @@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); if (IS_ERR(mdp4_kms->axi_clk)) { dev_err(dev->dev, "failed to get axi_clk\n"); ret = PTR_ERR(mdp4_kms->axi_clk); @@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk); + pm_runtime_enable(dev->dev); + mdp4_kms->rpm_enabled = true; + /* make sure things are off before attaching iommu (bootloader could * have left things on, in which case we'll start getting faults if * we don't disable): diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index c5d045d5680d..25fb83997119 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -47,6 +47,8 @@ struct mdp4_kms { struct mdp_irq error_handler; + bool rpm_enabled; + /* empty/blank cursor bo to use when cursor is "disabled" */ struct drm_gem_object *blank_cursor_bo; uint32_t blank_cursor_iova; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index 2648cd7631ef..353429b05733 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -mdp4_lvds_connector_best_encoder(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - return mdp4_lvds_connector->encoder; -} - static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = mdp4_lvds_connector_detect, @@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { .get_modes = mdp4_lvds_connector_get_modes, .mode_valid = mdp4_lvds_connector_mode_valid, - .best_encoder = mdp4_lvds_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index b275ce11b24b..ca6ca30650a0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2015 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) +- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) + +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) @@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) #define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 #define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 -static inline uint32_t __offset_MDP(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->mdp.base[0]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); } - -static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); } -#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff -#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val) +#define REG_MDP5_HW_VERSION 0x00000000 +#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff +#define MDP5_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val) { - return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK; + return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK; } -#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000 -#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val) +#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDP5_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val) { - return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK; + return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK; } -#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000 -#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val) +#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDP5_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val) { - return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK; + return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK; } -static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); } -#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff -#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0 -static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) +#define REG_MDP5_DISP_INTF_SEL 0x00000004 +#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff +#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) { - return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; + return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; } -#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 -#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8 -static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) +#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 +#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) { - return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; + return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; } -#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 -#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16 -static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) +#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 +#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) { - return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; + return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; } -#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000 -#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24 -static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) +#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 +#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) { - return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; + return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; } -static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); } +#define REG_MDP5_INTR_EN 0x00000010 -static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); } +#define REG_MDP5_INTR_STATUS 0x00000014 -static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); } +#define REG_MDP5_INTR_CLEAR 0x00000018 -static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); } +#define REG_MDP5_HIST_INTR_EN 0x0000001c -static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); } +#define REG_MDP5_HIST_INTR_STATUS 0x00000020 -static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); } +#define REG_MDP5_HIST_INTR_CLEAR 0x00000024 -static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); } -#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 +#define REG_MDP5_SPARE_0 0x00000028 +#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 -static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } +static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; } -static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) +static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; } +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; } -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; } -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; } -static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } +static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; } -static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) +static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; } +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK; + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; } -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK; + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; } -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) { - return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK; + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; } static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) @@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) default: return INVALID_IDX(idx); } } -static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); } +static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } -static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } +static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } -static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } -#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff -#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0 -static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val) +static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } +#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff +#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 +static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) { - return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK; + return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; } -#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000 -#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 -#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 -#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 +#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 -static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); } +#define REG_MDP5_SPLIT_DPL_EN 0x000002f4 -static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); } -#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 -#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 -#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 +#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 +#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 -static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); } -#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 -#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 -#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 +#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 +#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 static inline uint32_t __offset_CTL(uint32_t idx) { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 57f73f0c120d..ac9e4cde1380 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = { .name = "msm8x74v1", .mdp = { .count = 1, - .base = { 0x00100 }, .caps = MDP_CAP_SMP | 0, }, @@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = { }, .ctl = { .count = 5, - .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, .flush_hw_mask = 0x0003ffff, }, .pipe_vig = { .count = 3, - .base = { 0x01200, 0x01600, 0x01a00 }, + .base = { 0x01100, 0x01500, 0x01900 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | @@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = { }, .pipe_rgb = { .count = 3, - .base = { 0x01e00, 0x02200, 0x02600 }, + .base = { 0x01d00, 0x02100, 0x02500 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | @@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = { }, .pipe_dma = { .count = 2, - .base = { 0x02a00, 0x02e00 }, + .base = { 0x02900, 0x02d00 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 0, }, .lm = { .count = 5, - .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, .nb_stages = 5, }, .dspp = { .count = 3, - .base = { 0x04600, 0x04a00, 0x04e00 }, + .base = { 0x04500, 0x04900, 0x04d00 }, }, .pp = { .count = 3, - .base = { 0x21b00, 0x21c00, 0x21d00 }, + .base = { 0x21a00, 0x21b00, 0x21c00 }, }, .intf = { - .base = { 0x21100, 0x21300, 0x21500, 0x21700 }, + .base = { 0x21000, 0x21200, 0x21400, 0x21600 }, .connect = { [0] = INTF_eDP, [1] = INTF_DSI, @@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = { .name = "msm8x74", .mdp = { .count = 1, - .base = { 0x00100 }, .caps = MDP_CAP_SMP | 0, }, @@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = { }, .ctl = { .count = 5, - .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, .flush_hw_mask = 0x0003ffff, }, .pipe_vig = { .count = 3, - .base = { 0x01200, 0x01600, 0x01a00 }, + .base = { 0x01100, 0x01500, 0x01900 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 3, - .base = { 0x01e00, 0x02200, 0x02600 }, + .base = { 0x01d00, 0x02100, 0x02500 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, - .base = { 0x02a00, 0x02e00 }, + .base = { 0x02900, 0x02d00 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 5, - .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, .nb_stages = 5, .max_width = 2048, .max_height = 0xFFFF, }, .dspp = { .count = 3, - .base = { 0x04600, 0x04a00, 0x04e00 }, + .base = { 0x04500, 0x04900, 0x04d00 }, }, .ad = { .count = 2, - .base = { 0x13100, 0x13300 }, + .base = { 0x13000, 0x13200 }, }, .pp = { .count = 3, - .base = { 0x12d00, 0x12e00, 0x12f00 }, + .base = { 0x12c00, 0x12d00, 0x12e00 }, }, .intf = { - .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, + .base = { 0x12400, 0x12600, 0x12800, 0x12a00 }, .connect = { [0] = INTF_eDP, [1] = INTF_DSI, @@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = { .name = "apq8084", .mdp = { .count = 1, - .base = { 0x00100 }, .caps = MDP_CAP_SMP | 0, }, @@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = { }, .ctl = { .count = 5, - .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, .flush_hw_mask = 0x003fffff, }, .pipe_vig = { .count = 4, - .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, + .base = { 0x01100, 0x01500, 0x01900, 0x01d00 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 4, - .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, + .base = { 0x02100, 0x02500, 0x02900, 0x02d00 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, - .base = { 0x03200, 0x03600 }, + .base = { 0x03100, 0x03500 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 6, - .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, + .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, .nb_stages = 5, .max_width = 2048, .max_height = 0xFFFF, }, .dspp = { .count = 4, - .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, + .base = { 0x05100, 0x05500, 0x05900, 0x05d00 }, }, .ad = { .count = 3, - .base = { 0x13500, 0x13700, 0x13900 }, + .base = { 0x13400, 0x13600, 0x13800 }, }, .pp = { .count = 4, - .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, + .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 }, }, .intf = { - .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, + .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 }, .connect = { [0] = INTF_eDP, [1] = INTF_DSI, @@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .name = "msm8x16", .mdp = { .count = 1, - .base = { 0x01000 }, + .base = { 0x0 }, .caps = MDP_CAP_SMP | 0, }, @@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = { }, .ctl = { .count = 5, - .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, .flush_hw_mask = 0x4003ffff, }, .pipe_vig = { .count = 1, - .base = { 0x05000 }, + .base = { 0x04000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 2, - .base = { 0x15000, 0x17000 }, + .base = { 0x14000, 0x16000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 1, - .base = { 0x25000 }, + .base = { 0x24000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 2, /* LM0 and LM3 */ - .base = { 0x45000, 0x48000 }, + .base = { 0x44000, 0x47000 }, .nb_stages = 5, .max_width = 2048, .max_height = 0xFFFF, }, .dspp = { .count = 1, - .base = { 0x55000 }, + .base = { 0x54000 }, }, .intf = { - .base = { 0x00000, 0x6b800 }, + .base = { 0x00000, 0x6a800 }, .connect = { [0] = INTF_DISABLED, [1] = INTF_DSI, @@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = { .name = "msm8x94", .mdp = { .count = 1, - .base = { 0x01000 }, .caps = MDP_CAP_SMP | 0, }, @@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = { }, .ctl = { .count = 5, - .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, .flush_hw_mask = 0xf0ffffff, }, .pipe_vig = { .count = 4, - .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | MDP_PIPE_CAP_DECIMATION, }, .pipe_rgb = { .count = 4, - .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 2, - .base = { 0x25000, 0x27000 }, + .base = { 0x24000, 0x26000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, }, .lm = { .count = 6, - .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, .nb_stages = 8, .max_width = 2048, .max_height = 0xFFFF, }, .dspp = { .count = 4, - .base = { 0x55000, 0x57000, 0x59000, 0x5b000 }, + .base = { 0x54000, 0x56000, 0x58000, 0x5a000 }, }, .ad = { .count = 3, - .base = { 0x79000, 0x79800, 0x7a000 }, + .base = { 0x78000, 0x78800, 0x79000 }, }, .pp = { .count = 4, - .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, }, .intf = { - .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, .connect = { [0] = INTF_DISABLED, [1] = INTF_DSI, @@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = { .name = "msm8x96", .mdp = { .count = 1, - .base = { 0x01000 }, .caps = MDP_CAP_DSC | MDP_CAP_CDM | 0, }, .ctl = { .count = 5, - .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, .flush_hw_mask = 0xf4ffffff, }, .pipe_vig = { .count = 4, - .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | @@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = { }, .pipe_rgb = { .count = 4, - .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SCALE | @@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = { }, .pipe_dma = { .count = 2, - .base = { 0x25000, 0x27000 }, + .base = { 0x24000, 0x26000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | MDP_PIPE_CAP_SW_PIX_EXT | @@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = { }, .lm = { .count = 6, - .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, .nb_stages = 8, .max_width = 2560, .max_height = 0xFFFF, }, .dspp = { .count = 2, - .base = { 0x55000, 0x57000 }, + .base = { 0x54000, 0x56000 }, }, .ad = { .count = 3, - .base = { 0x79000, 0x79800, 0x7a000 }, + .base = { 0x78000, 0x78800, 0x79000 }, }, .pp = { .count = 4, - .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, }, .cdm = { .count = 1, - .base = { 0x7a200 }, + .base = { 0x79200 }, }, .dsc = { .count = 2, - .base = { 0x81000, 0x81400 }, + .base = { 0x80000, 0x80400 }, }, .intf = { - .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, .connect = { [0] = INTF_DISABLED, [1] = INTF_DSI, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index 69094cb28103..c627ab6d0061 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, * start signal for the slave encoder */ if (intf_num == 1) - data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; + data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; else if (intf_num == 2) - data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; + data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; else return -EINVAL; /* Smart Panel, Sync mode */ - data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL; + data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), - MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, + MDP5_SPLIT_DPL_LOWER_SMART_PANEL); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); mdp5_disable(mdp5_kms); return 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 88fe256c1931..fa2be7ce9468 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; + const struct drm_plane_state *pstate; int cnt = 0, i; DBG("%s: check", mdp5_crtc->name); @@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, * and that we don't have conflicting mixer stages: */ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *pstate; + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { if (cnt >= (hw_cfg->lm.nb_stages)) { dev_err(dev->dev, "too many planes!\n"); return -EINVAL; } - pstate = state->state->plane_states[drm_plane_index(plane)]; - /* plane might not have changed, in which case take - * current state: - */ - if (!pstate) - pstate = plane->state; pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); @@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_gem_object *cursor_bo, *old_bo = NULL; uint32_t blendcfg, cursor_addr, stride; - int ret, bpp, lm; - unsigned int depth; + int ret, lm; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t roi_w, roi_h; @@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; lm = mdp5_crtc->lm; - drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); - stride = width * (bpp >> 3); + stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); old_bo = mdp5_crtc->cursor.scanout_bo; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 4e81ca4f964a..d021edc3b307 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms, u32 intf_sel; spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0)); + intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); switch (intf->num) { case 0: - intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; - intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type); + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type); break; case 1: - intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; - intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type); + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type); break; case 2: - intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; - intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type); + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type); break; case 3: - intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; - intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type); + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type); break; default: BUG(); break; } - mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel); + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); } @@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) if (!enable) { ctlx->pair = NULL; ctly->pair = NULL; - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); return 0; } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); @@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) ctlx->pair = ctly; ctly->pair = ctlx; - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), - MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, + MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 1d95f9fd9dc7..fe0c22230883 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, * to use the master's enable signal for the slave encoder. */ if (intf_num == 1) - data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC; + data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; else if (intf_num == 2) - data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC; + data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; else return -EINVAL; /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); /* Dumb Panel, Sync mode */ - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 73bc3e312fd4..d53e5510fd7c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -15,7 +15,6 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/irqdomain.h> #include <linux/irq.h> #include "msm_drv.h" @@ -24,9 +23,9 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, uint32_t old_irqmask) { - mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), - irqmask ^ (irqmask & old_irqmask)); - mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); } static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) @@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff); - mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); mdp5_disable(mdp5_kms); } @@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) MDP5_IRQ_INTF2_UNDER_RUN | MDP5_IRQ_INTF3_UNDER_RUN; + mdp5_enable(mdp5_kms); mdp_irq_register(mdp_kms, error_handler); + mdp5_disable(mdp5_kms); return 0; } @@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); mdp5_disable(mdp5_kms); } -static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) +irqreturn_t mdp5_irq(struct msm_kms *kms) { + struct mdp_kms *mdp_kms = to_mdp_kms(kms); struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; unsigned int id; uint32_t status, enable; - enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); - status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; - mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); + enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN); + status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable; + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); VERB("status=%08x", status); @@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) for (id = 0; id < priv->num_crtcs; id++) if (status & mdp5_crtc_vblank(priv->crtcs[id])) drm_handle_vblank(dev, id); -} - -irqreturn_t mdp5_irq(struct msm_kms *kms) -{ - struct mdp_kms *mdp_kms = to_mdp_kms(kms); - struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); - uint32_t intr; - - intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS); - - VERB("intr=%08x", intr); - - if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) { - mdp5_irq_mdp(mdp_kms); - intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP; - } - - while (intr) { - irq_hw_number_t hwirq = fls(intr) - 1; - generic_handle_irq(irq_find_mapping( - mdp5_kms->irqcontroller.domain, hwirq)); - intr &= ~(1 << hwirq); - } return IRQ_HANDLED; } @@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) mdp5_crtc_vblank(crtc), false); mdp5_disable(mdp5_kms); } - -/* - * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc) - * can register to get their irq's delivered - */ - -#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \ - MDSS_HW_INTR_STATUS_INTR_DSI1 | \ - MDSS_HW_INTR_STATUS_INTR_HDMI | \ - MDSS_HW_INTR_STATUS_INTR_EDP) - -static void mdp5_hw_mask_irq(struct irq_data *irqd) -{ - struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd); - smp_mb__before_atomic(); - clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask); - smp_mb__after_atomic(); -} - -static void mdp5_hw_unmask_irq(struct irq_data *irqd) -{ - struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd); - smp_mb__before_atomic(); - set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask); - smp_mb__after_atomic(); -} - -static struct irq_chip mdp5_hw_irq_chip = { - .name = "mdp5", - .irq_mask = mdp5_hw_mask_irq, - .irq_unmask = mdp5_hw_unmask_irq, -}; - -static int mdp5_hw_irqdomain_map(struct irq_domain *d, - unsigned int irq, irq_hw_number_t hwirq) -{ - struct mdp5_kms *mdp5_kms = d->host_data; - - if (!(VALID_IRQS & (1 << hwirq))) - return -EPERM; - - irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); - irq_set_chip_data(irq, mdp5_kms); - - return 0; -} - -static struct irq_domain_ops mdp5_hw_irqdomain_ops = { - .map = mdp5_hw_irqdomain_map, - .xlate = irq_domain_xlate_onecell, -}; - - -int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms) -{ - struct device *dev = mdp5_kms->dev->dev; - struct irq_domain *d; - - d = irq_domain_add_linear(dev->of_node, 32, - &mdp5_hw_irqdomain_ops, mdp5_kms); - if (!d) { - dev_err(dev, "mdp5 irq domain add failed\n"); - return -ENXIO; - } - - mdp5_kms->irqcontroller.enabled_mask = 0; - mdp5_kms->irqcontroller.domain = d; - - return 0; -} - -void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms) -{ - if (mdp5_kms->irqcontroller.domain) { - irq_domain_remove(mdp5_kms->irqcontroller.domain); - mdp5_kms->irqcontroller.domain = NULL; - } -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 484b4d15e71d..ed7143d35b25 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -16,6 +16,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/of_irq.h> #include "msm_drv.h" #include "msm_mmu.h" @@ -28,10 +29,11 @@ static const char *iommu_ports[] = { static int mdp5_hw_init(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_device *dev = mdp5_kms->dev; + struct platform_device *pdev = mdp5_kms->pdev; unsigned long flags; - pm_runtime_get_sync(dev->dev); + pm_runtime_get_sync(&pdev->dev); + mdp5_enable(mdp5_kms); /* Magic unknown register writes: * @@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms) */ spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0); + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); mdp5_ctlm_hw_reset(mdp5_kms->ctlm); - pm_runtime_put_sync(dev->dev); + mdp5_disable(mdp5_kms); + pm_runtime_put_sync(&pdev->dev); return 0; } @@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s { int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - int nplanes = mdp5_kms->dev->mode_config.num_total_plane; - - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + for_each_plane_in_state(state, plane, plane_state, i) mdp5_plane_complete_commit(plane, plane_state); - } mdp5_disable(mdp5_kms); } @@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms, return mdp5_encoder_set_split_display(encoder, slave_encoder); } -static void mdp5_destroy(struct msm_kms *kms) +static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct msm_mmu *mmu = mdp5_kms->mmu; - mdp5_irq_domain_fini(mdp5_kms); - if (mmu) { mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); mmu->funcs->destroy(mmu); } - - if (mdp5_kms->ctlm) - mdp5_ctlm_destroy(mdp5_kms->ctlm); - if (mdp5_kms->smp) - mdp5_smp_destroy(mdp5_kms->smp); - if (mdp5_kms->cfg) - mdp5_cfg_destroy(mdp5_kms->cfg); - - kfree(mdp5_kms); } static const struct mdp_kms_funcs kms_funcs = { @@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = { .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, - .destroy = mdp5_destroy, + .destroy = mdp5_kms_destroy, }, .set_irqmask = mdp5_set_irqmask, }; @@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* register our interrupt-controller for hdmi/eDP/dsi/etc - * to use for irqs routed through mdp: - */ - ret = mdp5_irq_domain_init(mdp5_kms); - if (ret) - goto fail; - /* construct CRTCs and their private planes: */ for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { struct drm_plane *plane; @@ -425,17 +404,17 @@ fail: return ret; } -static void read_hw_revision(struct mdp5_kms *mdp5_kms, - uint32_t *major, uint32_t *minor) +static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, + u32 *major, u32 *minor) { - uint32_t version; + u32 version; mdp5_enable(mdp5_kms); - version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION); + version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); mdp5_disable(mdp5_kms); - *major = FIELD(version, MDSS_HW_VERSION_MAJOR); - *minor = FIELD(version, MDSS_HW_VERSION_MINOR); + *major = FIELD(version, MDP5_HW_VERSION_MAJOR); + *minor = FIELD(version, MDP5_HW_VERSION_MINOR); DBG("MDP5 version v%d.%d", *major, *minor); } @@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) struct msm_kms *mdp5_kms_init(struct drm_device *dev) { - struct platform_device *pdev = dev->platformdev; - struct mdp5_cfg *config; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev; struct mdp5_kms *mdp5_kms; - struct msm_kms *kms = NULL; + struct mdp5_cfg *config; + struct msm_kms *kms; struct msm_mmu *mmu; - uint32_t major, minor; - int i, ret; + int irq, i, ret; - mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); - if (!mdp5_kms) { - dev_err(dev->dev, "failed to allocate kms\n"); - ret = -ENOMEM; + /* priv->kms would have been populated by the MDP5 driver */ + kms = priv->kms; + if (!kms) + return NULL; + + mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp_kms_init(&mdp5_kms->base, &kms_funcs); + + pdev = mdp5_kms->pdev; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (irq < 0) { + ret = irq; + dev_err(&pdev->dev, "failed to get irq: %d\n", ret); goto fail; } - spin_lock_init(&mdp5_kms->resource_lock); + kms->irq = irq; - mdp_kms_init(&mdp5_kms->base, &kms_funcs); + config = mdp5_cfg_get_config(mdp5_kms->cfg); - kms = &mdp5_kms->base.base; + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + mdp5_enable(mdp5_kms); + for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { + if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || + !config->hw->intf.base[i]) + continue; + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); - mdp5_kms->dev = dev; + mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); + } + mdp5_disable(mdp5_kms); + mdelay(16); - /* mdp5_kms->mmio actually represents the MDSS base address */ - mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); - if (IS_ERR(mdp5_kms->mmio)) { - ret = PTR_ERR(mdp5_kms->mmio); + if (config->platform.iommu) { + mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); + if (IS_ERR(mmu)) { + ret = PTR_ERR(mmu); + dev_err(&pdev->dev, "failed to init iommu: %d\n", ret); + iommu_domain_free(config->platform.iommu); + goto fail; + } + + ret = mmu->funcs->attach(mmu, iommu_ports, + ARRAY_SIZE(iommu_ports)); + if (ret) { + dev_err(&pdev->dev, "failed to attach iommu: %d\n", + ret); + mmu->funcs->destroy(mmu); + goto fail; + } + } else { + dev_info(&pdev->dev, + "no iommu, fallback to phys contig buffers for scanout\n"); + mmu = NULL; + } + mdp5_kms->mmu = mmu; + + mdp5_kms->id = msm_register_mmu(dev, mmu); + if (mdp5_kms->id < 0) { + ret = mdp5_kms->id; + dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret); goto fail; } - mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); - if (IS_ERR(mdp5_kms->vbif)) { - ret = PTR_ERR(mdp5_kms->vbif); + ret = modeset_init(mdp5_kms); + if (ret) { + dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); goto fail; } - mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); - if (IS_ERR(mdp5_kms->vdd)) { - ret = PTR_ERR(mdp5_kms->vdd); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = config->hw->lm.max_width; + dev->mode_config.max_height = config->hw->lm.max_height; + + dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; + dev->driver->get_scanout_position = mdp5_get_scanoutpos; + dev->driver->get_vblank_counter = mdp5_get_vblank_counter; + dev->max_vblank_count = 0xffffffff; + dev->vblank_disable_immediate = true; + + return kms; +fail: + if (kms) + mdp5_kms_destroy(kms); + return ERR_PTR(ret); +} + +static void mdp5_destroy(struct platform_device *pdev) +{ + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + + if (mdp5_kms->ctlm) + mdp5_ctlm_destroy(mdp5_kms->ctlm); + if (mdp5_kms->smp) + mdp5_smp_destroy(mdp5_kms->smp); + if (mdp5_kms->cfg) + mdp5_cfg_destroy(mdp5_kms->cfg); + + if (mdp5_kms->rpm_enabled) + pm_runtime_disable(&pdev->dev); +} + +static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms; + struct mdp5_cfg *config; + u32 major, minor; + int ret; + + mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); + if (!mdp5_kms) { + ret = -ENOMEM; goto fail; } - ret = regulator_enable(mdp5_kms->vdd); - if (ret) { - dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); + platform_set_drvdata(pdev, mdp5_kms); + + spin_lock_init(&mdp5_kms->resource_lock); + + mdp5_kms->dev = dev; + mdp5_kms->pdev = pdev; + + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); + if (IS_ERR(mdp5_kms->mmio)) { + ret = PTR_ERR(mdp5_kms->mmio); goto fail; } @@ -635,9 +709,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true); - if (ret) - goto fail; ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); if (ret) goto fail; @@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) * rate first, then figure out hw revision, and then set a * more optimal rate: */ - clk_set_rate(mdp5_kms->src_clk, 200000000); + clk_set_rate(mdp5_kms->core_clk, 200000000); - read_hw_revision(mdp5_kms, &major, &minor); + pm_runtime_enable(&pdev->dev); + mdp5_kms->rpm_enabled = true; + + read_mdp_hw_revision(mdp5_kms, &major, &minor); mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); if (IS_ERR(mdp5_kms->cfg)) { @@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdp5_kms->caps = config->hw->mdp.caps; /* TODO: compute core clock rate at runtime */ - clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); + clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); /* * Some chipsets have a Shared Memory Pool (SMP), while others @@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } - /* make sure things are off before attaching iommu (bootloader could - * have left things on, in which case we'll start getting faults if - * we don't disable): - */ - mdp5_enable(mdp5_kms); - for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { - if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || - !config->hw->intf.base[i]) - continue; - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); + /* set uninit-ed kms */ + priv->kms = &mdp5_kms->base.base; - mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); - } - mdp5_disable(mdp5_kms); - mdelay(16); + return 0; +fail: + mdp5_destroy(pdev); + return ret; +} - if (config->platform.iommu) { - mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); - dev_err(dev->dev, "failed to init iommu: %d\n", ret); - iommu_domain_free(config->platform.iommu); - goto fail; - } +static int mdp5_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *ddev = dev_get_drvdata(master); + struct platform_device *pdev = to_platform_device(dev); - ret = mmu->funcs->attach(mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) { - dev_err(dev->dev, "failed to attach iommu: %d\n", ret); - mmu->funcs->destroy(mmu); - goto fail; - } - } else { - dev_info(dev->dev, "no iommu, fallback to phys " - "contig buffers for scanout\n"); - mmu = NULL; - } - mdp5_kms->mmu = mmu; + DBG(""); - mdp5_kms->id = msm_register_mmu(dev, mmu); - if (mdp5_kms->id < 0) { - ret = mdp5_kms->id; - dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret); - goto fail; - } + return mdp5_init(pdev, ddev); +} - ret = modeset_init(mdp5_kms); - if (ret) { - dev_err(dev->dev, "modeset_init failed: %d\n", ret); - goto fail; - } +static void mdp5_unbind(struct device *dev, struct device *master, + void *data) +{ + struct platform_device *pdev = to_platform_device(dev); - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - dev->mode_config.max_width = config->hw->lm.max_width; - dev->mode_config.max_height = config->hw->lm.max_height; + mdp5_destroy(pdev); +} - dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; - dev->driver->get_scanout_position = mdp5_get_scanoutpos; - dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; - dev->vblank_disable_immediate = true; +static const struct component_ops mdp5_ops = { + .bind = mdp5_bind, + .unbind = mdp5_unbind, +}; - return kms; +static int mdp5_dev_probe(struct platform_device *pdev) +{ + DBG(""); + return component_add(&pdev->dev, &mdp5_ops); +} -fail: - if (kms) - mdp5_destroy(kms); - return ERR_PTR(ret); +static int mdp5_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_del(&pdev->dev, &mdp5_ops); + return 0; +} + +static const struct of_device_id mdp5_dt_match[] = { + { .compatible = "qcom,mdp5", }, + /* to support downstream DT files */ + { .compatible = "qcom,mdss_mdp", }, + {} +}; +MODULE_DEVICE_TABLE(of, mdp5_dt_match); + +static struct platform_driver mdp5_driver = { + .probe = mdp5_dev_probe, + .remove = mdp5_dev_remove, + .driver = { + .name = "msm_mdp", + .of_match_table = mdp5_dt_match, + }, +}; + +void __init msm_mdp_register(void) +{ + DBG(""); + platform_driver_register(&mdp5_driver); +} + +void __exit msm_mdp_unregister(void) +{ + DBG(""); + platform_driver_unregister(&mdp5_driver); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 9a25898239d3..03738927be10 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -31,6 +31,8 @@ struct mdp5_kms { struct drm_device *dev; + struct platform_device *pdev; + struct mdp5_cfg_handler *cfg; uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ @@ -43,29 +45,23 @@ struct mdp5_kms { struct mdp5_ctl_manager *ctlm; /* io/register spaces: */ - void __iomem *mmio, *vbif; - - struct regulator *vdd; + void __iomem *mmio; struct clk *axi_clk; struct clk *ahb_clk; - struct clk *src_clk; struct clk *core_clk; struct clk *lut_clk; struct clk *vsync_clk; /* * lock to protect access to global resources: ie., following register: - * - REG_MDP5_MDP_DISP_INTF_SEL + * - REG_MDP5_DISP_INTF_SEL */ spinlock_t resource_lock; - struct mdp_irq error_handler; + bool rpm_enabled; - struct { - volatile unsigned long enabled_mask; - struct irq_domain *domain; - } irqcontroller; + struct mdp_irq error_handler; }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c new file mode 100644 index 000000000000..d444a6901fff --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/irqdomain.h> +#include <linux/irq.h> + +#include "msm_drv.h" +#include "mdp5_kms.h" + +/* + * If needed, this can become more specific: something like struct mdp5_mdss, + * which contains a 'struct msm_mdss base' member. + */ +struct msm_mdss { + struct drm_device *dev; + + void __iomem *mmio, *vbif; + + struct regulator *vdd; + + struct { + volatile unsigned long enabled_mask; + struct irq_domain *domain; + } irqcontroller; +}; + +static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data) +{ + msm_writel(data, mdss->mmio + reg); +} + +static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg) +{ + return msm_readl(mdss->mmio + reg); +} + +static irqreturn_t mdss_irq(int irq, void *arg) +{ + struct msm_mdss *mdss = arg; + u32 intr; + + intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS); + + VERB("intr=%08x", intr); + + while (intr) { + irq_hw_number_t hwirq = fls(intr) - 1; + + generic_handle_irq(irq_find_mapping( + mdss->irqcontroller.domain, hwirq)); + intr &= ~(1 << hwirq); + } + + return IRQ_HANDLED; +} + +/* + * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc) + * can register to get their irq's delivered + */ + +#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \ + MDSS_HW_INTR_STATUS_INTR_DSI0 | \ + MDSS_HW_INTR_STATUS_INTR_DSI1 | \ + MDSS_HW_INTR_STATUS_INTR_HDMI | \ + MDSS_HW_INTR_STATUS_INTR_EDP) + +static void mdss_hw_mask_irq(struct irq_data *irqd) +{ + struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static void mdss_hw_unmask_irq(struct irq_data *irqd) +{ + struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd); + + smp_mb__before_atomic(); + set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static struct irq_chip mdss_hw_irq_chip = { + .name = "mdss", + .irq_mask = mdss_hw_mask_irq, + .irq_unmask = mdss_hw_unmask_irq, +}; + +static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct msm_mdss *mdss = d->host_data; + + if (!(VALID_IRQS & (1 << hwirq))) + return -EPERM; + + irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq); + irq_set_chip_data(irq, mdss); + + return 0; +} + +static struct irq_domain_ops mdss_hw_irqdomain_ops = { + .map = mdss_hw_irqdomain_map, + .xlate = irq_domain_xlate_onecell, +}; + + +static int mdss_irq_domain_init(struct msm_mdss *mdss) +{ + struct device *dev = mdss->dev->dev; + struct irq_domain *d; + + d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, + mdss); + if (!d) { + dev_err(dev, "mdss irq domain add failed\n"); + return -ENXIO; + } + + mdss->irqcontroller.enabled_mask = 0; + mdss->irqcontroller.domain = d; + + return 0; +} + +void msm_mdss_destroy(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_mdss *mdss = priv->mdss; + + if (!mdss) + return; + + irq_domain_remove(mdss->irqcontroller.domain); + mdss->irqcontroller.domain = NULL; + + regulator_disable(mdss->vdd); + + pm_runtime_put_sync(dev->dev); + + pm_runtime_disable(dev->dev); +} + +int msm_mdss_init(struct drm_device *dev) +{ + struct platform_device *pdev = dev->platformdev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_mdss *mdss; + int ret; + + DBG(""); + + if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss")) + return 0; + + mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL); + if (!mdss) { + ret = -ENOMEM; + goto fail; + } + + mdss->dev = dev; + + mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS"); + if (IS_ERR(mdss->mmio)) { + ret = PTR_ERR(mdss->mmio); + goto fail; + } + + mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); + if (IS_ERR(mdss->vbif)) { + ret = PTR_ERR(mdss->vbif); + goto fail; + } + + /* Regulator to enable GDSCs in downstream kernels */ + mdss->vdd = devm_regulator_get(dev->dev, "vdd"); + if (IS_ERR(mdss->vdd)) { + ret = PTR_ERR(mdss->vdd); + goto fail; + } + + ret = regulator_enable(mdss->vdd); + if (ret) { + dev_err(dev->dev, "failed to enable regulator vdd: %d\n", + ret); + goto fail; + } + + ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), + mdss_irq, 0, "mdss_isr", mdss); + if (ret) { + dev_err(dev->dev, "failed to init irq: %d\n", ret); + goto fail_irq; + } + + ret = mdss_irq_domain_init(mdss); + if (ret) { + dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret); + goto fail_irq; + } + + priv->mdss = mdss; + + pm_runtime_enable(dev->dev); + + /* + * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power + * domain. Remove this once runtime PM is adapted for all the devices. + */ + pm_runtime_get_sync(dev->dev); + + return 0; +fail_irq: + regulator_disable(mdss->vdd); +fail: + return ret; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 6f425c25d9fe..27d7b55b52c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -42,7 +42,7 @@ * * configured: * The block is allocated to some client, and assigned to that - * client in MDP5_MDP_SMP_ALLOC registers. + * client in MDP5_SMP_ALLOC registers. * * inuse: * The block is being actively used by a client. @@ -59,7 +59,7 @@ * mdp5_smp_commit. * * 2) mdp5_smp_configure(): - * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers + * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers * are configured for the union(pending, inuse) * Current pending is copied to configured. * It is assumed that mdp5_smp_request and mdp5_smp_configure not run @@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp, int idx = blk / 3; int fld = blk % 3; - val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx)); + val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); switch (fld) { case 0: - val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; - val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid); + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); break; case 1: - val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; - val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid); + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); break; case 2: - val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; - val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid); + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); break; } - mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val); + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); } } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index e3892c263f27..4a8a6f1f1151 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; struct msm_drm_private *priv = old_state->dev->dev_private; struct msm_kms *kms = priv->kms; - int ncrtcs = old_state->dev->mode_config.num_crtc; int i; - for (i = 0; i < ncrtcs; i++) { - crtc = old_state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { if (!crtc->state->enable) continue; @@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; - int nplanes = dev->mode_config.num_total_plane; - int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); @@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev, /* * Figure out what crtcs we have: */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; - c->crtc_mask |= (1 << drm_crtc_index(crtc)); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + c->crtc_mask |= drm_crtc_mask(crtc); /* * Figure out what fence to wait for: */ - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - - if ((plane->state->fb != new_state->fb) && new_state->fb) { - struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); + for_each_plane_in_state(state, plane, plane_state, i) { + if ((plane->state->fb != plane_state->fb) && plane_state->fb) { + struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); - new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); + plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } @@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9c654092ef78..26f859ec24b3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -21,6 +21,16 @@ #include "msm_gpu.h" #include "msm_kms.h" + +/* + * MSM driver version: + * - 1.0.0 - initial interface + * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers + */ +#define MSM_VERSION_MAJOR 1 +#define MSM_VERSION_MINOR 1 +#define MSM_VERSION_PATCHLEVEL 0 + static void msm_fb_output_poll_changed(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev) kfree(vbl_ev); } - drm_kms_helper_poll_fini(ddev); + msm_gem_shrinker_cleanup(ddev); - drm_connector_unregister_all(ddev); + drm_kms_helper_poll_fini(ddev); drm_dev_unregister(ddev); @@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev) flush_workqueue(priv->atomic_wq); destroy_workqueue(priv->atomic_wq); - if (kms) { - pm_runtime_disable(dev); + if (kms) kms->funcs->destroy(kms); - } if (gpu) { mutex_lock(&ddev->struct_mutex); @@ -239,6 +247,8 @@ static int msm_drm_uninit(struct device *dev) component_unbind_all(dev, ddev); + msm_mdss_destroy(ddev); + ddev->dev_private = NULL; drm_dev_unref(ddev); @@ -284,6 +294,7 @@ static int msm_init_vram(struct drm_device *dev) if (node) { struct resource r; ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; size = r.end - r.start; @@ -352,6 +363,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) } ddev->dev_private = priv; + priv->dev = ddev; + + ret = msm_mdss_init(ddev); + if (ret) { + kfree(priv); + drm_dev_unref(ddev); + return ret; + } priv->wq = alloc_ordered_workqueue("msm", 0); priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); @@ -367,6 +386,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) /* Bind all our sub-components: */ ret = component_bind_all(dev, ddev); if (ret) { + msm_mdss_destroy(ddev); kfree(priv); drm_dev_unref(ddev); return ret; @@ -376,9 +396,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto fail; + msm_gem_shrinker_init(ddev); + switch (get_mdp_ver(pdev)) { case 4: kms = mdp4_kms_init(ddev); + priv->kms = kms; break; case 5: kms = mdp5_kms_init(ddev); @@ -400,10 +423,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto fail; } - priv->kms = kms; - if (kms) { - pm_runtime_enable(dev); ret = kms->funcs->hw_init(kms); if (ret) { dev_err(dev, "kms hw init failed: %d\n", ret); @@ -419,24 +439,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto fail; } - pm_runtime_get_sync(dev); - ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); - pm_runtime_put_sync(dev); - if (ret < 0) { - dev_err(dev, "failed to install IRQ handler\n"); - goto fail; + if (kms) { + pm_runtime_get_sync(dev); + ret = drm_irq_install(ddev, kms->irq); + pm_runtime_put_sync(dev); + if (ret < 0) { + dev_err(dev, "failed to install IRQ handler\n"); + goto fail; + } } ret = drm_dev_register(ddev, 0); if (ret) goto fail; - ret = drm_connector_register_all(ddev); - if (ret) { - dev_err(dev, "failed to register connectors\n"); - goto fail; - } - drm_mode_config_reset(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION @@ -690,6 +706,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); } +static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_gem_madvise *args = data; + struct drm_gem_object *obj; + int ret; + + switch (args->madv) { + case MSM_MADV_DONTNEED: + case MSM_MADV_WILLNEED: + break; + default: + return -EINVAL; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) { + ret = -ENOENT; + goto unlock; + } + + ret = msm_gem_madvise(obj, args->madv); + if (ret >= 0) { + args->retained = ret; + ret = 0; + } + + drm_gem_object_unreference(obj); + +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), @@ -698,6 +752,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { @@ -730,7 +785,6 @@ static struct drm_driver msm_driver = { .open = msm_open, .preclose = msm_preclose, .lastclose = msm_lastclose, - .set_busid = drm_platform_set_busid, .irq_handler = msm_irq, .irq_preinstall = msm_irq_preinstall, .irq_postinstall = msm_irq_postinstall, @@ -764,8 +818,9 @@ static struct drm_driver msm_driver = { .name = "msm", .desc = "MSM Snapdragon DRM", .date = "20130625", - .major = 1, - .minor = 0, + .major = MSM_VERSION_MAJOR, + .minor = MSM_VERSION_MINOR, + .patchlevel = MSM_VERSION_PATCHLEVEL, }; #ifdef CONFIG_PM_SLEEP @@ -805,22 +860,146 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } -static int add_components(struct device *dev, struct component_match **matchptr, - const char *name) +/* + * Identify what components need to be added by parsing what remote-endpoints + * our MDP output ports are connected to. In the case of LVDS on MDP4, there + * is no external component that we need to add since LVDS is within MDP4 + * itself. + */ +static int add_components_mdp(struct device *mdp_dev, + struct component_match **matchptr) { - struct device_node *np = dev->of_node; - unsigned i; + struct device_node *np = mdp_dev->of_node; + struct device_node *ep_node; + struct device *master_dev; + + /* + * on MDP4 based platforms, the MDP platform device is the component + * master that adds other display interface components to itself. + * + * on MDP5 based platforms, the MDSS platform device is the component + * master that adds MDP5 and other display interface components to + * itself. + */ + if (of_device_is_compatible(np, "qcom,mdp4")) + master_dev = mdp_dev; + else + master_dev = mdp_dev->parent; - for (i = 0; ; i++) { - struct device_node *node; + for_each_endpoint_of_node(np, ep_node) { + struct device_node *intf; + struct of_endpoint ep; + int ret; - node = of_parse_phandle(np, name, i); - if (!node) - break; + ret = of_graph_parse_endpoint(ep_node, &ep); + if (ret) { + dev_err(mdp_dev, "unable to parse port endpoint\n"); + of_node_put(ep_node); + return ret; + } + + /* + * The LCDC/LVDS port on MDP4 is a speacial case where the + * remote-endpoint isn't a component that we need to add + */ + if (of_device_is_compatible(np, "qcom,mdp4") && + ep.port == 0) { + of_node_put(ep_node); + continue; + } + + /* + * It's okay if some of the ports don't have a remote endpoint + * specified. It just means that the port isn't connected to + * any external interface. + */ + intf = of_graph_get_remote_port_parent(ep_node); + if (!intf) { + of_node_put(ep_node); + continue; + } + + component_match_add(master_dev, matchptr, compare_of, intf); + + of_node_put(intf); + of_node_put(ep_node); + } + + return 0; +} + +static int compare_name_mdp(struct device *dev, void *data) +{ + return (strstr(dev_name(dev), "mdp") != NULL); +} + +static int add_display_components(struct device *dev, + struct component_match **matchptr) +{ + struct device *mdp_dev; + int ret; + + /* + * MDP5 based devices don't have a flat hierarchy. There is a top level + * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the + * children devices, find the MDP5 node, and then add the interfaces + * to our components list. + */ + if (of_device_is_compatible(dev->of_node, "qcom,mdss")) { + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) { + dev_err(dev, "failed to populate children devices\n"); + return ret; + } + + mdp_dev = device_find_child(dev, NULL, compare_name_mdp); + if (!mdp_dev) { + dev_err(dev, "failed to find MDSS MDP node\n"); + of_platform_depopulate(dev); + return -ENODEV; + } + + put_device(mdp_dev); - component_match_add(dev, matchptr, compare_of, node); + /* add the MDP component itself */ + component_match_add(dev, matchptr, compare_of, + mdp_dev->of_node); + } else { + /* MDP4 */ + mdp_dev = dev; } + ret = add_components_mdp(mdp_dev, matchptr); + if (ret) + of_platform_depopulate(dev); + + return ret; +} + +/* + * We don't know what's the best binding to link the gpu with the drm device. + * Fow now, we just hunt for all the possible gpus that we support, and add them + * as components. + */ +static const struct of_device_id msm_gpu_match[] = { + { .compatible = "qcom,adreno-3xx" }, + { .compatible = "qcom,kgsl-3d0" }, + { }, +}; + +static int add_gpu_components(struct device *dev, + struct component_match **matchptr) +{ + struct device_node *np; + + np = of_find_matching_node(NULL, msm_gpu_match); + if (!np) + return 0; + + component_match_add(dev, matchptr, compare_of, np); + + of_node_put(np); + return 0; } @@ -846,9 +1025,15 @@ static const struct component_master_ops msm_drm_ops = { static int msm_pdev_probe(struct platform_device *pdev) { struct component_match *match = NULL; + int ret; - add_components(&pdev->dev, &match, "connectors"); - add_components(&pdev->dev, &match, "gpus"); + ret = add_display_components(&pdev->dev, &match); + if (ret) + return ret; + + ret = add_gpu_components(&pdev->dev, &match); + if (ret) + return ret; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); @@ -857,20 +1042,14 @@ static int msm_pdev_probe(struct platform_device *pdev) static int msm_pdev_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &msm_drm_ops); + of_platform_depopulate(&pdev->dev); return 0; } -static const struct platform_device_id msm_id[] = { - { "mdp", 0 }, - { } -}; - static const struct of_device_id dt_match[] = { - { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */ - { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */ - /* to support downstream DT files */ - { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */ + { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */ + { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */ {} }; MODULE_DEVICE_TABLE(of, dt_match); @@ -883,12 +1062,12 @@ static struct platform_driver msm_platform_driver = { .of_match_table = dt_match, .pm = &msm_pm_ops, }, - .id_table = msm_id, }; static int __init msm_drm_register(void) { DBG("init"); + msm_mdp_register(); msm_dsi_register(); msm_edp_register(); msm_hdmi_register(); @@ -904,6 +1083,7 @@ static void __exit msm_drm_unregister(void) adreno_unregister(); msm_edp_unregister(); msm_dsi_unregister(); + msm_mdp_unregister(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 5b2963f32291..b4bc7f1ef717 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -46,6 +46,7 @@ struct msm_kms; struct msm_gpu; struct msm_mmu; +struct msm_mdss; struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; @@ -77,11 +78,16 @@ struct msm_vblank_ctrl { struct msm_drm_private { + struct drm_device *dev; + struct msm_kms *kms; /* subordinate devices, if present: */ struct platform_device *gpu_pdev; + /* top level MDSS wrapper device (for MDP5 only) */ + struct msm_mdss *mdss; + /* possibly this should be in the kms component, but it is * shared by both mdp4 and mdp5.. */ @@ -147,6 +153,9 @@ struct msm_drm_private { struct drm_mm mm; } vram; + struct notifier_block vmap_notifier; + struct shrinker shrinker; + struct msm_vblank_ctrl vblank_ctrl; }; @@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); +void msm_gem_shrinker_init(struct drm_device *dev); +void msm_gem_shrinker_cleanup(struct drm_device *dev); + int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); @@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj); -void *msm_gem_vaddr_locked(struct drm_gem_object *obj); -void *msm_gem_vaddr(struct drm_gem_object *obj); +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); +void *msm_gem_get_vaddr(struct drm_gem_object *obj); +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); +void msm_gem_put_vaddr(struct drm_gem_object *obj); +int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); +void msm_gem_purge(struct drm_gem_object *obj); +void msm_gem_vunmap(struct drm_gem_object *obj); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, @@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, } #endif +void __init msm_mdp_register(void); +void __exit msm_mdp_unregister(void); + #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 461dc8b873f0..95cf8fe72ee5 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) for (i = 0; i < n; i++) { struct drm_gem_object *bo = msm_fb->planes[i]; - if (bo) - drm_gem_object_unreference_unlocked(bo); + + drm_gem_object_unreference_unlocked(bo); } kfree(msm_fb); } -static int msm_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { .create_handle = msm_framebuffer_create_handle, .destroy = msm_framebuffer_destroy, - .dirty = msm_framebuffer_dirty, }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c6cf837c5193..ffd4a338ca12 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; - fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo); if (IS_ERR(fbi->screen_base)) { ret = PTR_ERR(fbi->screen_base); goto fail_unlock; @@ -188,21 +188,7 @@ fail: return ret; } -static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc, - u16 red, u16 green, u16 blue, int regno) -{ - DBG("fbdev: set gamma"); -} - -static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, int regno) -{ - DBG("fbdev: get gamma"); -} - static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { - .gamma_set = msm_crtc_fb_gamma_set, - .gamma_get = msm_crtc_fb_gamma_get, .fb_probe = msm_fbdev_create, }; @@ -265,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev) /* this will free the backing object */ if (fbdev->fb) { + msm_gem_put_vaddr(fbdev->bo); drm_framebuffer_unregister_private(fbdev->fb); drm_framebuffer_remove(fbdev->fb); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 69836f5685b1..6cd4af443139 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } +static void +put_iova(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = obj->dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + int id; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { + struct msm_mmu *mmu = priv->mmus[id]; + if (mmu && msm_obj->domain[id].iova) { + uint32_t offset = msm_obj->domain[id].iova; + mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); + msm_obj->domain[id].iova = 0; + } + } +} + /* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. @@ -388,7 +408,7 @@ fail: return ret; } -void *msm_gem_vaddr_locked(struct drm_gem_object *obj) +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) if (msm_obj->vaddr == NULL) return ERR_PTR(-ENOMEM); } + msm_obj->vmap_count++; return msm_obj->vaddr; } -void *msm_gem_vaddr(struct drm_gem_object *obj) +void *msm_gem_get_vaddr(struct drm_gem_object *obj) { void *ret; mutex_lock(&obj->dev->struct_mutex); - ret = msm_gem_vaddr_locked(obj); + ret = msm_gem_get_vaddr_locked(obj); mutex_unlock(&obj->dev->struct_mutex); return ret; } +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + WARN_ON(msm_obj->vmap_count < 1); + msm_obj->vmap_count--; +} + +void msm_gem_put_vaddr(struct drm_gem_object *obj) +{ + mutex_lock(&obj->dev->struct_mutex); + msm_gem_put_vaddr_locked(obj); + mutex_unlock(&obj->dev->struct_mutex); +} + +/* Update madvise status, returns true if not purged, else + * false or -errno. + */ +int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + + if (msm_obj->madv != __MSM_MADV_PURGED) + msm_obj->madv = madv; + + return (msm_obj->madv != __MSM_MADV_PURGED); +} + +void msm_gem_purge(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!is_purgeable(msm_obj)); + WARN_ON(obj->import_attach); + + put_iova(obj); + + msm_gem_vunmap(obj); + + put_pages(obj); + + msm_obj->madv = __MSM_MADV_PURGED; + + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); + drm_gem_free_mmap_offset(obj); + + /* Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. + */ + shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); + + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, + 0, (loff_t)-1); +} + +void msm_gem_vunmap(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) + return; + + vunmap(msm_obj->vaddr); + msm_obj->vaddr = NULL; +} + /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) @@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, struct msm_gpu *gpu, bool exclusive, struct fence *fence) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); msm_obj->gpu = gpu; if (exclusive) reservation_object_add_excl_fence(msm_obj->resv, fence); @@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct reservation_object_list *fobj; struct fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); + const char *madv; WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", + switch (msm_obj->madv) { + case __MSM_MADV_PURGED: + madv = " purged"; + break; + case MSM_MADV_DONTNEED: + madv = " purgeable"; + break; + case MSM_MADV_WILLNEED: + default: + madv = ""; + break; + } + + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, - off, msm_obj->vaddr, obj->size); + off, msm_obj->vaddr, obj->size, madv); rcu_read_lock(); fobj = rcu_dereference(robj->fence); @@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); - int id; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) list_del(&msm_obj->mm_list); - for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - struct msm_mmu *mmu = priv->mmus[id]; - if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); - } - } + put_iova(obj); if (obj->import_attach) { if (msm_obj->vaddr) @@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, msm_obj->sgt); } else { - vunmap(msm_obj->vaddr); + msm_gem_vunmap(obj); put_pages(obj); } @@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->flags = flags; + msm_obj->madv = MSM_MADV_WILLNEED; if (resv) { msm_obj->resv = resv; @@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, return obj; fail: - if (obj) - drm_gem_object_unreference(obj); - + drm_gem_object_unreference(obj); return ERR_PTR(ret); } @@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, return obj; fail: - if (obj) - drm_gem_object_unreference_unlocked(obj); - + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 9facd4b6ffd9..b2f13cfe945e 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -29,6 +29,16 @@ struct msm_gem_object { uint32_t flags; + /** + * Advice: are the backing pages purgeable? + */ + uint8_t madv; + + /** + * count of active vmap'ing + */ + uint8_t vmap_count; + /* And object is either: * inactive - on priv->inactive_list * active - on one one of the gpu's active_list.. well, at @@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj) return msm_obj->gpu != NULL; } -#define MAX_CMDS 4 +static inline bool is_purgeable(struct msm_gem_object *msm_obj) +{ + return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && + !msm_obj->base.dma_buf && !msm_obj->base.import_attach; +} + +static inline bool is_vunmapable(struct msm_gem_object *msm_obj) +{ + return (msm_obj->vmap_count == 0) && msm_obj->vaddr; +} /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, * associated with the cmdstream submission for synchronization (and @@ -95,7 +114,7 @@ struct msm_gem_submit { uint32_t size; /* in dwords */ uint32_t iova; uint32_t idx; /* cmdstream buffer idx in bos[] */ - } cmd[MAX_CMDS]; + } *cmd; /* array of size nr_cmds */ struct { uint32_t flags; struct msm_gem_object *obj; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 6b90890faffe..60bb290700ce 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) void *msm_gem_prime_vmap(struct drm_gem_object *obj) { - return msm_gem_vaddr(obj); + return msm_gem_get_vaddr(obj); } void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { - /* TODO msm_gem_vunmap() */ + msm_gem_put_vaddr(obj); } int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c new file mode 100644 index 000000000000..283d2841ba58 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "msm_drv.h" +#include "msm_gem.h" + +static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +{ + if (!mutex_is_locked(mutex)) + return false; + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) + return mutex->owner == task; +#else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ + return false; +#endif +} + +static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +{ + if (!mutex_trylock(&dev->struct_mutex)) { + if (!mutex_is_locked_by(&dev->struct_mutex, current)) + return false; + *unlock = false; + } else { + *unlock = true; + } + + return true; +} + + +static unsigned long +msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct msm_drm_private *priv = + container_of(shrinker, struct msm_drm_private, shrinker); + struct drm_device *dev = priv->dev; + struct msm_gem_object *msm_obj; + unsigned long count = 0; + bool unlock; + + if (!msm_gem_shrinker_lock(dev, &unlock)) + return 0; + + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { + if (is_purgeable(msm_obj)) + count += msm_obj->base.size >> PAGE_SHIFT; + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static unsigned long +msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct msm_drm_private *priv = + container_of(shrinker, struct msm_drm_private, shrinker); + struct drm_device *dev = priv->dev; + struct msm_gem_object *msm_obj; + unsigned long freed = 0; + bool unlock; + + if (!msm_gem_shrinker_lock(dev, &unlock)) + return SHRINK_STOP; + + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { + if (freed >= sc->nr_to_scan) + break; + if (is_purgeable(msm_obj)) { + msm_gem_purge(&msm_obj->base); + freed += msm_obj->base.size >> PAGE_SHIFT; + } + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + if (freed > 0) + pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); + + return freed; +} + +static int +msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct msm_drm_private *priv = + container_of(nb, struct msm_drm_private, vmap_notifier); + struct drm_device *dev = priv->dev; + struct msm_gem_object *msm_obj; + unsigned unmapped = 0; + bool unlock; + + if (!msm_gem_shrinker_lock(dev, &unlock)) + return NOTIFY_DONE; + + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { + if (is_vunmapable(msm_obj)) { + msm_gem_vunmap(&msm_obj->base); + /* since we don't know any better, lets bail after a few + * and if necessary the shrinker will be invoked again. + * Seems better than unmapping *everything* + */ + if (++unmapped >= 15) + break; + } + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + *(unsigned long *)ptr += unmapped; + + if (unmapped > 0) + pr_info_ratelimited("Purging %u vmaps\n", unmapped); + + return NOTIFY_DONE; +} + +/** + * msm_gem_shrinker_init - Initialize msm shrinker + * @dev_priv: msm device + * + * This function registers and sets up the msm shrinker. + */ +void msm_gem_shrinker_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + priv->shrinker.count_objects = msm_gem_shrinker_count; + priv->shrinker.scan_objects = msm_gem_shrinker_scan; + priv->shrinker.seeks = DEFAULT_SEEKS; + WARN_ON(register_shrinker(&priv->shrinker)); + + priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap; + WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier)); +} + +/** + * msm_gem_shrinker_cleanup - Clean up msm shrinker + * @dev_priv: msm device + * + * This function unregisters the msm shrinker. + */ +void msm_gem_shrinker_cleanup(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); + unregister_shrinker(&priv->shrinker); +} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index eb4bb8b2f3a5..9766f9ae4b7d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -29,10 +29,11 @@ #define BO_PINNED 0x2000 static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, int nr) + struct msm_gpu *gpu, int nr_bos, int nr_cmds) { struct msm_gem_submit *submit; - int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); + int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + + (nr_cmds * sizeof(*submit->cmd)); submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (!submit) @@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->gpu = gpu; submit->fence = NULL; submit->pid = get_pid(task_pid(current)); + submit->cmd = (void *)&submit->bos[nr_bos]; /* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; @@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_vaddr_locked(&obj->base); + ptr = msm_gem_get_vaddr_locked(&obj->base); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob last_offset = off; } + msm_gem_put_vaddr_locked(&obj->base); + return 0; } @@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->pipe != MSM_PIPE_3D0) return -EINVAL; - if (args->nr_cmds > MAX_CMDS) - return -EINVAL; - - submit = submit_create(dev, gpu, args->nr_bos); - if (!submit) - return -ENOMEM; + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); + submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); + if (!submit) { + ret = -ENOMEM; + goto out_unlock; + } ret = submit_lookup_objects(submit, args, file); if (ret) @@ -462,6 +467,7 @@ out: submit_cleanup(submit); if (ret) msm_gem_submit_free(submit); +out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index a7a0b6d9b057..3a294d0da3a0 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, return -EINVAL; for_each_sg(sgt->sgl, sg, sgt->nents, i) { - u32 pa = sg_phys(sg) - sg->offset; + dma_addr_t pa = sg_phys(sg) - sg->offset; size_t bytes = sg->length + sg->offset; - VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes); ret = iommu_map(domain, da, pa, bytes, prot); if (ret) @@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, if (unmapped < bytes) return unmapped; - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); + VERB("unmap[%d]: %08x(%zx)", i, da, bytes); BUG_ON(!PAGE_ALIGNED(bytes)); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e32222c3d44f..40e41e5cdbc6 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -61,10 +61,8 @@ struct msm_kms_funcs { struct msm_kms { const struct msm_kms_funcs *funcs; - /* irq handling: */ - bool in_irq; - struct list_head irq_list; /* list of mdp4_irq */ - uint32_t vblank_mask; /* irq bits set for userspace vblank */ + /* irq number to be passed on to drm_irq_install */ + int irq; }; static inline void msm_kms_init(struct msm_kms *kms, @@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms, struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev); +int msm_mdss_init(struct drm_device *dev); +void msm_mdss_destroy(struct drm_device *dev); #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c index 830857c47c86..17fe4e53e0d1 100644 --- a/drivers/gpu/drm/msm/msm_perf.c +++ b/drivers/gpu/drm/msm/msm_perf.c @@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t sz, loff_t *ppos) { struct msm_perf_state *perf = file->private_data; - int n = 0, ret; + int n = 0, ret = 0; mutex_lock(&perf->read_lock); @@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf, } n = min((int)sz, perf->buftot - perf->bufpos); - ret = copy_to_user(buf, &perf->buf[perf->bufpos], n); - if (ret) + if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) { + ret = -EFAULT; goto out; + } perf->bufpos += n; *ppos += n; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0857710c2ff2..3a5fdfcd67ae 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -27,6 +27,11 @@ * This bypasses drm_debugfs_create_files() mainly because we need to use * our own fops for a bit more control. In particular, we don't want to * do anything if userspace doesn't have the debugfs file open. + * + * The module-param "rd_full", which defaults to false, enables snapshotting + * all (non-written) buffers in the submit, rather than just cmdstream bo's. + * This is useful to capture the contents of (for example) vbo's or textures, + * or shader programs (if not emitted inline in cmdstream). */ #ifdef CONFIG_DEBUG_FS @@ -40,6 +45,10 @@ #include "msm_gpu.h" #include "msm_gem.h" +static bool rd_full = false; +MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); +module_param_named(rd_full, rd_full, bool, 0600); + enum rd_sect_type { RD_NONE, RD_TEST, /* ascii text */ @@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf, goto out; n = min_t(int, sz, circ_count_to_end(&rd->fifo)); - ret = copy_to_user(buf, fptr, n); - if (ret) + if (copy_to_user(buf, fptr, n)) { + ret = -EFAULT; goto out; + } fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); *ppos += n; @@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor) kfree(rd); } +static void snapshot_buf(struct msm_rd_state *rd, + struct msm_gem_submit *submit, int idx, + uint32_t iova, uint32_t size) +{ + struct msm_gem_object *obj = submit->bos[idx].obj; + const char *buf; + + buf = msm_gem_get_vaddr_locked(&obj->base); + if (IS_ERR(buf)) + return; + + if (iova) { + buf += iova - submit->bos[idx].iova; + } else { + iova = submit->bos[idx].iova; + size = obj->base.size; + } + + rd_write_section(rd, RD_GPUADDR, + (uint32_t[2]){ iova, size }, 8); + rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); + + msm_gem_put_vaddr_locked(&obj->base); +} + /* called under struct_mutex */ void msm_rd_dump_submit(struct msm_gem_submit *submit) { @@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); - /* could be nice to have an option (module-param?) to snapshot - * all the bo's associated with the submit. Handy to see vtx - * buffers, etc. For now just the cmdstream bo's is enough. - */ + if (rd_full) { + for (i = 0; i < submit->nr_bos; i++) { + /* buffers that are written to probably don't start out + * with anything interesting: + */ + if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) + continue; + + snapshot_buf(rd, submit, i, 0, 0); + } + } for (i = 0; i < submit->nr_cmds; i++) { - uint32_t idx = submit->cmd[i].idx; uint32_t iova = submit->cmd[i].iova; uint32_t szd = submit->cmd[i].size; /* in dwords */ - struct msm_gem_object *obj = submit->bos[idx].obj; - const char *buf = msm_gem_vaddr_locked(&obj->base); - - if (IS_ERR(buf)) - continue; - buf += iova - submit->bos[idx].iova; - - rd_write_section(rd, RD_GPUADDR, - (uint32_t[2]){ iova, szd * 4 }, 8); - rd_write_section(rd, RD_BUFFER_CONTENTS, - buf, szd * 4); + /* snapshot cmdstream bo's (if we haven't already): */ + if (!rd_full) { + snapshot_buf(rd, submit, submit->cmd[i].idx, + submit->cmd[i].iova, szd * 4); + } switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 42f5359cf988..f326cf6a32e6 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) goto fail; } - ring->start = msm_gem_vaddr_locked(ring->bo); + ring->start = msm_gem_get_vaddr_locked(ring->bo); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); goto fail; @@ -59,7 +59,9 @@ fail: void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) { - if (ring->bo) + if (ring->bo) { + msm_gem_put_vaddr(ring->bo); drm_gem_object_unreference_unlocked(ring->bo); + } kfree(ring); } diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 5ab13e7939db..2922a82cba8e 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -3,13 +3,7 @@ config DRM_NOUVEAU depends on DRM && PCI select FW_LOADER select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - select FB - select FRAMEBUFFER_CONSOLE if !EXPERT select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT select X86_PLATFORM_DEVICES if ACPI && X86 diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6f318c54da33..0cb7a18cde26 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc) nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); } -static void -nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, +static int +nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size) { - int end = (start + size > 256) ? 256 : start + size, i; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; @@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, */ if (!nv_crtc->base.primary->fb) { nv_crtc->lut.depth = 0; - return; + return 0; } nv_crtc_gamma_load(crtc); + + return 0; } static int diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index aea81a547e85..34c0f2f67548 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev) struct nv04_display *disp = nv04_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_encoder *encoder; - struct drm_crtc *crtc; struct nouveau_crtc *nv_crtc; - /* Turn every CRTC off. */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } - /* Restore state */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) encoder->enc_restore(&encoder->base.base); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index a665b78b2af5..434d1e29f279 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder, /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ - if (crtc) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } + if (crtc) + drm_crtc_force_disable(crtc); } return 0; diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 331620a52afa..287a7d6fa480 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -29,6 +29,7 @@ struct nv_device_info_v0 { #define NV_DEVICE_INFO_V0_FERMI 0x07 #define NV_DEVICE_INFO_V0_KEPLER 0x08 #define NV_DEVICE_INFO_V0_MAXWELL 0x09 +#define NV_DEVICE_INFO_V0_PASCAL 0x0a __u8 family; __u8 pad06[2]; __u64 ram_size; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 982aad8fa645..e6e9537537cf 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -39,6 +39,7 @@ #define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f #define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f #define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f +#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f #define NV50_DISP /* cl5070.h */ 0x00005070 #define G82_DISP /* cl5070.h */ 0x00008270 @@ -50,6 +51,8 @@ #define GK110_DISP /* cl5070.h */ 0x00009270 #define GM107_DISP /* cl5070.h */ 0x00009470 #define GM200_DISP /* cl5070.h */ 0x00009570 +#define GP100_DISP /* cl5070.h */ 0x00009770 +#define GP104_DISP /* cl5070.h */ 0x00009870 #define NV31_MPEG 0x00003174 #define G82_MPEG 0x00008274 @@ -86,6 +89,8 @@ #define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d #define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d #define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d +#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d +#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d #define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e #define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e @@ -105,6 +110,8 @@ #define MAXWELL_A /* cl9097.h */ 0x0000b097 #define MAXWELL_B /* cl9097.h */ 0x0000b197 +#define PASCAL_A /* cl9097.h */ 0x0000c097 + #define NV74_BSP 0x000074b0 #define GT212_MSVLD 0x000085b1 @@ -128,6 +135,8 @@ #define FERMI_DMA 0x000090b5 #define KEPLER_DMA_COPY_A 0x0000a0b5 #define MAXWELL_DMA_COPY_A 0x0000b0b5 +#define PASCAL_DMA_COPY_A 0x0000c0b5 +#define PASCAL_DMA_COPY_B 0x0000c1b5 #define FERMI_DECOMPRESS 0x000090b8 @@ -137,6 +146,7 @@ #define KEPLER_COMPUTE_B 0x0000a1c0 #define MAXWELL_COMPUTE_A 0x0000b0c0 #define MAXWELL_COMPUTE_B 0x0000b1c0 +#define PASCAL_COMPUTE_A 0x0000c0c0 #define NV74_CIPHER 0x000074c1 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 126a85cc81bc..7ea8aa7ca408 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -33,7 +33,10 @@ enum nvkm_devidx { NVKM_ENGINE_CE0, NVKM_ENGINE_CE1, NVKM_ENGINE_CE2, - NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2, + NVKM_ENGINE_CE3, + NVKM_ENGINE_CE4, + NVKM_ENGINE_CE5, + NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5, NVKM_ENGINE_CIPHER, NVKM_ENGINE_DISP, @@ -50,7 +53,8 @@ enum nvkm_devidx { NVKM_ENGINE_NVENC0, NVKM_ENGINE_NVENC1, - NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1, + NVKM_ENGINE_NVENC2, + NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2, NVKM_ENGINE_NVDEC, NVKM_ENGINE_PM, @@ -102,6 +106,7 @@ struct nvkm_device { NV_C0 = 0xc0, NV_E0 = 0xe0, GM100 = 0x110, + GP100 = 0x130, } card_type; u32 chipset; u8 chiprev; @@ -136,7 +141,7 @@ struct nvkm_device { struct nvkm_volt *volt; struct nvkm_engine *bsp; - struct nvkm_engine *ce[3]; + struct nvkm_engine *ce[6]; struct nvkm_engine *cipher; struct nvkm_disp *disp; struct nvkm_dma *dma; @@ -149,7 +154,7 @@ struct nvkm_device { struct nvkm_engine *mspdec; struct nvkm_engine *msppp; struct nvkm_engine *msvld; - struct nvkm_engine *nvenc[2]; + struct nvkm_engine *nvenc[3]; struct nvkm_engine *nvdec; struct nvkm_pm *pm; struct nvkm_engine *sec; @@ -170,7 +175,6 @@ struct nvkm_device_func { void (*fini)(struct nvkm_device *, bool suspend); resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); - bool cpu_coherent; }; struct nvkm_device_quirk { @@ -206,7 +210,7 @@ struct nvkm_device_chip { int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **); int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **); int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **); @@ -219,7 +223,7 @@ struct nvkm_device_chip { int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index b5370cb56e3c..e5c9b6268dcc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -28,6 +28,7 @@ struct nvkm_device_tegra { } iommu; int gpu_speedo; + int gpu_speedo_id; }; struct nvkm_device_tegra_func { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h index 594d719ba41e..d3d26a1e215d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h @@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h index d4fdce27b297..e82049667ce4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h @@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 15ddfcf5e8db..ed92fec5292c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); +int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 6515f5810a26..89cf99307828 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h index e39a1fea930b..a72f3290528a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h @@ -7,6 +7,9 @@ struct nvkm_bios { u32 size; u8 *data; + u32 image0_size; + u32 imaged_addr; + u32 bmp_offset; u32 bit_offset; @@ -22,10 +25,9 @@ struct nvkm_bios { u8 nvbios_checksum(const u8 *data, int size); u16 nvbios_findstr(const u8 *data, int size, const char *str, int len); int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len); - -#define nvbios_rd08(b,o) (b)->data[(o)] -#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)]) -#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)]) +u8 nvbios_rd08(struct nvkm_bios *, u32 addr); +u16 nvbios_rd16(struct nvkm_bios *, u32 addr); +u32 nvbios_rd32(struct nvkm_bios *, u32 addr); int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 0a734fd06acf..3a410275fa71 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -56,6 +56,8 @@ struct nvkm_fb { int regions; } tile; + u8 page; + struct nvkm_memory *mmu_rd; struct nvkm_memory *mmu_wr; }; @@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); #include <subdev/bios.h> #include <subdev/bios/ramcfg.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h index c6b90b6543b3..cd755baf9cab 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h @@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); +int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index 2e80682b2da1..27d25b18d85c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h @@ -7,11 +7,14 @@ struct nvkm_mc { struct nvkm_subdev subdev; }; -void nvkm_mc_intr(struct nvkm_mc *, bool *handled); -void nvkm_mc_intr_unarm(struct nvkm_mc *); -void nvkm_mc_intr_rearm(struct nvkm_mc *); -void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx); -void nvkm_mc_unk260(struct nvkm_mc *, u32 data); +void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_intr(struct nvkm_device *, bool *handled); +void nvkm_mc_intr_unarm(struct nvkm_device *); +void nvkm_mc_intr_rearm(struct nvkm_device *); +void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable); +void nvkm_mc_unk260(struct nvkm_device *, u32 data); int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **); @@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **); +int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index ddb913889d7e..e6523e2cea9f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h @@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **); +int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); /* pcie functions */ int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h index c6edd95a5b69..b04c38c07761 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h @@ -43,9 +43,8 @@ struct nvkm_secboot { const struct nvkm_secboot_func *func; struct nvkm_subdev subdev; + enum nvkm_devidx devidx; u32 base; - u32 irq_mask; - u32 enable_mask; }; #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h index 8fb575a92c48..71ebbfd4484f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h @@ -8,10 +8,11 @@ struct nvkm_top { struct list_head device; }; -u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx); -u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs); -enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault); -enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn); +u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); +u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); +u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); +enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); +enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h index feff55cff05b..b765f4ffcde6 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h @@ -12,6 +12,9 @@ struct nvkm_volt { u32 uv; u8 vid; } vid[256]; + + u32 max_uv; + u32 min_uv; }; int nvkm_volt_get(struct nvkm_volt *); diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index eb7de487a2b3..7bd4683216d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm) case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: + case NV_DEVICE_INFO_V0_PASCAL: return NVIF_CLASS_SW_GF100; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index db76b94e6e26..f2ad17aa33f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -45,6 +45,8 @@ static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; + bool optimus_flags_detected; + bool optimus_skip_dsm; acpi_handle dhandle; acpi_handle rom_handle; } nouveau_dsm_priv; @@ -57,9 +59,6 @@ bool nouveau_is_v1_dsm(void) { return nouveau_dsm_priv.dsm_detected; } -#define NOUVEAU_DSM_HAS_MUX 0x1 -#define NOUVEAU_DSM_HAS_OPT 0x2 - #ifdef CONFIG_VGA_SWITCHEROO static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, @@ -110,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * * requirements on the fourth parameter, so a private implementation * instead of using acpi_check_dsm(). */ -static int nouveau_check_optimus_dsm(acpi_handle handle) +static int nouveau_dsm_get_optimus_functions(acpi_handle handle) { int result; @@ -125,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle) * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. * If the n-th bit is enabled, function n is supported */ - return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); + if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS)) + return result; + return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg) @@ -212,26 +213,55 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = { .get_client_id = nouveau_dsm_get_client_id, }; -static int nouveau_dsm_pci_probe(struct pci_dev *pdev) +/* + * Firmware supporting Windows 8 or later do not use _DSM to put the device into + * D3cold, they instead rely on disabling power resources on the parent. + */ +static bool nouveau_pr3_present(struct pci_dev *pdev) +{ + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + struct acpi_device *parent_adev; + + if (!parent_pdev) + return false; + + parent_adev = ACPI_COMPANION(&parent_pdev->dev); + if (!parent_adev) + return false; + + return acpi_has_method(parent_adev->handle, "_PR3"); +} + +static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, + bool *has_mux, bool *has_opt, + bool *has_opt_flags, bool *has_pr3) { acpi_handle dhandle; - int retval = 0; + bool supports_mux; + int optimus_funcs; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) - return false; + return; if (!acpi_has_method(dhandle, "_DSM")) - return false; + return; + + supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, + 1 << NOUVEAU_DSM_POWER); + optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle); - if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, - 1 << NOUVEAU_DSM_POWER)) - retval |= NOUVEAU_DSM_HAS_MUX; + /* Does not look like a Nvidia device. */ + if (!supports_mux && !optimus_funcs) + return; - if (nouveau_check_optimus_dsm(dhandle)) - retval |= NOUVEAU_DSM_HAS_OPT; + *dhandle_out = dhandle; + *has_mux = supports_mux; + *has_opt = !!optimus_funcs; + *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS); + *has_pr3 = false; - if (retval & NOUVEAU_DSM_HAS_OPT) { + if (optimus_funcs) { uint32_t result; nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, &result); @@ -239,11 +269,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); - } - if (retval) - nouveau_dsm_priv.dhandle = dhandle; - return retval; + *has_pr3 = nouveau_pr3_present(pdev); + } } static bool nouveau_dsm_detect(void) @@ -251,11 +279,13 @@ static bool nouveau_dsm_detect(void) char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; - int has_dsm = 0; - int has_optimus = 0; + acpi_handle dhandle = NULL; + bool has_mux = false; + bool has_optimus = false; + bool has_optimus_flags = false; + bool has_power_resources = false; int vga_count = 0; bool guid_valid; - int retval; bool ret = false; /* lookup the MXM GUID */ @@ -268,32 +298,32 @@ static bool nouveau_dsm_detect(void) while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags, &has_power_resources); } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags, &has_power_resources); } /* find the optimus DSM or the old v1 DSM */ - if (has_optimus == 1) { + if (has_optimus) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); + if (has_power_resources) + pr_info("nouveau: detected PR support, will not use DSM\n"); nouveau_dsm_priv.optimus_detected = true; + nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags; + nouveau_dsm_priv.optimus_skip_dsm = has_power_resources; ret = true; - } else if (vga_count == 2 && has_dsm && guid_valid) { + } else if (vga_count == 2 && has_mux && guid_valid) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", @@ -321,11 +351,12 @@ void nouveau_register_dsm_handler(void) void nouveau_switcheroo_optimus_dsm(void) { u32 result = 0; - if (!nouveau_dsm_priv.optimus_detected) + if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm) return; - nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, - 0x3, &result); + if (nouveau_dsm_priv.optimus_flags_detected) + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, + 0x3, &result); nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 5e3f3e826476..528bdeffb339 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; - if (!nvxx_device(&drm->device)->func->cpu_coherent) - nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; + nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; nvbo->page_shift = 12; if (drm->client.vm) { @@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) if (ret) return ret; - /* - * TTM buffers allocated using the DMA API already have a mapping, let's - * use it instead. - */ - if (!nvbo->force_coherent) - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, - &nvbo->kmap); + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); ttm_bo_unreserve(&nvbo->bo); return ret; @@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) if (!nvbo) return; - /* - * TTM buffers allocated using the DMA API already had a coherent - * mapping which we used, no need to unmap. - */ - if (!nvbo->force_coherent) - ttm_bo_kunmap(&nvbo->kmap); + ttm_bo_kunmap(&nvbo->kmap); } void @@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, return 0; } -static inline void * -_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz) -{ - struct ttm_dma_tt *dma_tt; - u8 *m = mem; - - index *= sz; - - if (m) { - /* kmap'd address, return the corresponding offset */ - m += index; - } else { - /* DMA-API mapping, lookup the right address */ - dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; - m = dma_tt->cpu_address[index / PAGE_SIZE]; - m += index % PAGE_SIZE; - } - - return m; -} -#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m)) - void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) { bool is_iomem; u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) iowrite16_native(val, (void __force __iomem *)mem); @@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) return ioread32_native((void __force __iomem *)mem); @@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) iowrite32_native(val, (void __force __iomem *)mem); @@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, - no_wait_gpu, new_mem); nouveau_fence_unref(&fence); } @@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_mem_reg *, struct ttm_mem_reg *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, @@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + ret = ttm_bo_wait(bo, intr, no_wait_gpu); + if (ret) + return ret; + if (nvbo->pin_refcnt) NV_WARN(drm, "Moving pinned object %p!\n", nvbo); @@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem); out: if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { @@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) dev = drm->dev; pdev = device->dev; - /* - * Objects matching this condition have been marked as force_coherent, - * so use the DMA API for them. - */ - if (!nvxx_device(&drm->device)->func->cpu_coherent && - ttm->caching_state == tt_uncached) - return ttm_dma_populate(ttm_dma, dev->dev); - #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { return ttm_agp_tt_populate(ttm); @@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) dev = drm->dev; pdev = device->dev; - /* - * Objects matching this condition have been marked as force_coherent, - * so use the DMA API for them. - */ - if (!nvxx_device(&drm->device)->func->cpu_coherent && - ttm->caching_state == tt_uncached) { - ttm_dma_unpopulate(ttm_dma, dev->dev); - return; - } - #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { ttm_agp_tt_unpopulate(ttm); diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index b1d2527c5625..f9b3c811187e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -191,7 +191,8 @@ static int nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, u32 engine, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A, + static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A, + MAXWELL_CHANNEL_GPFIFO_A, KEPLER_CHANNEL_GPFIFO_B, KEPLER_CHANNEL_GPFIFO_A, FERMI_CHANNEL_GPFIFO, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7c77f960c8b8..afbf557b23d4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify) { struct nouveau_crtc *nv_crtc = container_of(notify, typeof(*nv_crtc), vblank); - drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); + drm_crtc_handle_vblank(&nv_crtc->base); return NVIF_NOTIFY_KEEP; } @@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev) if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { static const u16 oclass[] = { + GP104_DISP, + GP100_DISP, GM200_DISP, GM107_DISP, GK110_DISP, @@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev) nouveau_display_vblank_fini(dev); drm_kms_helper_poll_fini(dev); + drm_crtc_force_disable_all(dev); drm_mode_config_cleanup(dev); if (disp->dtor) @@ -760,12 +763,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state) - { { }, event, nouveau_crtc(crtc)->index, - fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, + { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0], new_bo->bo.offset }; /* Keep vblanks on during flip, for the target crtc of this flip */ - drm_vblank_get(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_get(crtc); /* Emit a page flip */ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { @@ -810,7 +812,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return 0; fail_unreserve: - drm_vblank_put(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_put(crtc); ttm_bo_unreserve(&old_bo->bo); fail_unpin: mutex_unlock(&cli->mutex); @@ -842,17 +844,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - drm_arm_vblank_event(dev, s->crtc, s->event); + drm_crtc_arm_vblank_event(s->crtc, s->event); } else { - drm_send_vblank_event(dev, s->crtc, s->event); + drm_crtc_send_vblank_event(s->crtc, s->event); /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } } else { /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } list_del(&s->head); @@ -873,9 +875,10 @@ nouveau_flip_complete(struct nvif_notify *notify) if (!nouveau_finish_page_flip(chan, &state)) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - nv_set_crtc_base(drm->dev, state.crtc, state.offset + - state.y * state.pitch + - state.x * state.bpp / 8); + nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc), + state.offset + state.crtc->y * + state.pitch + state.crtc->x * + state.bpp / 8); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 24273bacd885..0420ee861ea4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, struct nouveau_page_flip_state { struct list_head head; struct drm_pending_vblank_event *event; - int crtc, bpp, pitch, x, y; + struct drm_crtc *crtc; + int bpp, pitch; u64 offset; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 11f8dd9c0edb..66c1280c0f1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,13 +22,11 @@ * Authors: Ben Skeggs */ -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include "drmP.h" @@ -200,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case KEPLER_CHANNEL_GPFIFO_A: case KEPLER_CHANNEL_GPFIFO_B: case MAXWELL_CHANNEL_GPFIFO_A: + case PASCAL_CHANNEL_GPFIFO_A: ret = nvc0_fence_create(drm); break; default: @@ -315,16 +314,19 @@ static int nouveau_drm_probe(struct pci_dev *pdev, bool boot = false; int ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; - /* remove conflicting drivers (vesafb, efifb etc) */ + /* We need to check that the chipset is supported before booting + * fbdev off the hardware, as there's no way to put it back. + */ + ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); + if (ret) + return ret; + + nvkm_device_del(&device); + + /* Remove conflicting drivers (vesafb, efifb etc). */ aper = alloc_apertures(3); if (!aper) return -ENOMEM; @@ -438,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_vga_init(drm); if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + if (!nvxx_device(&drm->device)->mmu) { + ret = -ENOSYS; + goto fail_device; + } + ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 0x1000, NULL, &drm->client.vm); if (ret) @@ -498,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - pm_runtime_get_sync(dev->dev); + if (nouveau_runtime_pm != 0) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } + nouveau_fbcon_fini(dev); nouveau_accel_fini(drm); nouveau_hwmon_fini(dev); @@ -970,7 +981,7 @@ driver_stub = { .gem_prime_vmap = nouveau_gem_prime_vmap, .gem_prime_vunmap = nouveau_gem_prime_vunmap, - .gem_free_object = nouveau_gem_object_del, + .gem_free_object_unlocked = nouveau_gem_object_del, .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, @@ -1078,7 +1089,6 @@ nouveau_drm_init(void) driver_pci = driver_stub; driver_pci.set_busid = drm_pci_set_busid; driver_platform = driver_stub; - driver_platform.set_busid = drm_platform_set_busid; nouveau_display_options(); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 2e3a62d38fe9..64c4ce7115ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -57,7 +57,8 @@ struct nouveau_fence_priv { int (*context_new)(struct nouveau_channel *); void (*context_del)(struct nouveau_channel *); - u32 contexts, context_base; + u32 contexts; + u64 context_base; bool uevent; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 1ff4166af26e..71f764bf4cc6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -535,6 +535,40 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, nouveau_hwmon_get_in0_input, NULL, 0); static ssize_t +nouveau_hwmon_get_in0_min(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvkm_volt *volt = nvxx_volt(&drm->device); + + if (!volt || !volt->min_uv) + return -ENODEV; + + return sprintf(buf, "%i\n", volt->min_uv / 1000); +} + +static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO, + nouveau_hwmon_get_in0_min, NULL, 0); + +static ssize_t +nouveau_hwmon_get_in0_max(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvkm_volt *volt = nvxx_volt(&drm->device); + + if (!volt || !volt->max_uv) + return -ENODEV; + + return sprintf(buf, "%i\n", volt->max_uv / 1000); +} + +static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO, + nouveau_hwmon_get_in0_max, NULL, 0); + +static ssize_t nouveau_hwmon_get_in0_label(struct device *d, struct device_attribute *a, char *buf) { @@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { static struct attribute *hwmon_in0_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_min.dev_attr.attr, + &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_label.dev_attr.attr, NULL }; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index bcee91497eb9..1825dbc33192 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: + case NV_DEVICE_INFO_V0_PASCAL: node->memtype = (nvbo->tile_flags & 0xff00) >> 8; break; default: diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 675e9e077a95..08f9c6fa0f7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) ntfy->p->base.event = &ntfy->p->e.base; ntfy->p->base.file_priv = f; ntfy->p->base.pid = current->pid; - ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree; ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 7d9248b8c664..da8fd5ff9d0f 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); - dsize = ALIGN(image->width * image->height, 32) >> 5; + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 3ffc2b0057bf..7d0edcbcfca7 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp, .pushbuf = 0xb0007d00, }; static const s32 oclass[] = { + GP104_DISP_CORE_CHANNEL_DMA, + GP100_DISP_CORE_CHANNEL_DMA, GM200_DISP_CORE_CHANNEL_DMA, GM107_DISP_CORE_CHANNEL_DMA, GK110_DISP_CORE_CHANNEL_DMA, @@ -1346,21 +1348,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -static void +static int nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = min_t(u32, start + size, 256); u32 i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; } nv50_crtc_lut_load(crtc); + + return 0; } static void diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 1aeb698e9707..af3d3c49411a 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 839f4c8c1805..054b6a056d99 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index b18557858f19..19044aba265e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_ENGINE_CE0 ] = "ce0", [NVKM_ENGINE_CE1 ] = "ce1", [NVKM_ENGINE_CE2 ] = "ce2", + [NVKM_ENGINE_CE3 ] = "ce3", + [NVKM_ENGINE_CE4 ] = "ce4", + [NVKM_ENGINE_CE5 ] = "ce5", [NVKM_ENGINE_CIPHER ] = "cipher", [NVKM_ENGINE_DISP ] = "disp", [NVKM_ENGINE_DMAOBJ ] = "dma", @@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_ENGINE_MSVLD ] = "msvld", [NVKM_ENGINE_NVENC0 ] = "nvenc0", [NVKM_ENGINE_NVENC1 ] = "nvenc1", + [NVKM_ENGINE_NVENC2 ] = "nvenc2", [NVKM_ENGINE_NVDEC ] = "nvdec", [NVKM_ENGINE_PM ] = "pm", [NVKM_ENGINE_SEC ] = "sec", @@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) } } - nvkm_mc_reset(device->mc, subdev->index); + nvkm_mc_reset(device, subdev->index); time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 9c19d59b47df..a4458a8eb30a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o nvkm-y += nvkm/engine/ce/gk104.o nvkm-y += nvkm/engine/ce/gm107.o nvkm-y += nvkm/engine/ce/gm200.o +nvkm-y += nvkm/engine/ce/gp100.o +nvkm-y += nvkm/engine/ce/gp104.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c new file mode 100644 index 000000000000..c7710456bc30 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c @@ -0,0 +1,102 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" +#include <core/enum.h> + +#include <nvif/class.h> + +static const struct nvkm_enum +gp100_ce_launcherr_report[] = { + { 0x0, "NO_ERR" }, + { 0x1, "2D_LAYER_EXCEEDS_DEPTH" }, + { 0x2, "INVALID_ALIGNMENT" }, + { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" }, + { 0x4, "SRC_LINE_EXCEEDS_PITCH" }, + { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" }, + { 0x6, "DST_LINE_EXCEEDS_PITCH" }, + { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" }, + { 0x8, "BAD_SRC_PIXEL_COMP_REF" }, + { 0x9, "INVALID_VALUE" }, + { 0xa, "UNUSED_FIELD" }, + { 0xb, "INVALID_OPERATION" }, + { 0xc, "NO_RESOURCES" }, + { 0xd, "INVALID_CONFIG" }, + {} +}; + +static void +gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) +{ + struct nvkm_subdev *subdev = &ce->subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x104418 + base); + const struct nvkm_enum *en = + nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f); + nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : ""); +} + +void +gp100_ce_intr(struct nvkm_engine *ce) +{ + const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80; + struct nvkm_subdev *subdev = &ce->subdev; + struct nvkm_device *device = subdev->device; + u32 mask = nvkm_rd32(device, 0x10440c + base); + u32 intr = nvkm_rd32(device, 0x104410 + base) & mask; + if (intr & 0x00000001) { //XXX: guess + nvkm_warn(subdev, "BLOCKPIPE\n"); + nvkm_wr32(device, 0x104410 + base, 0x00000001); + intr &= ~0x00000001; + } + if (intr & 0x00000002) { //XXX: guess + nvkm_warn(subdev, "NONBLOCKPIPE\n"); + nvkm_wr32(device, 0x104410 + base, 0x00000002); + intr &= ~0x00000002; + } + if (intr & 0x00000004) { + gp100_ce_intr_launcherr(ce, base); + nvkm_wr32(device, 0x104410 + base, 0x00000004); + intr &= ~0x00000004; + } + if (intr) { + nvkm_warn(subdev, "intr %08x\n", intr); + nvkm_wr32(device, 0x104410 + base, intr); + } +} + +static const struct nvkm_engine_func +gp100_ce = { + .intr = gp100_ce_intr, + .sclass = { + { -1, -1, PASCAL_DMA_COPY_A }, + {} + } +}; + +int +gp100_ce_new(struct nvkm_device *device, int index, + struct nvkm_engine **pengine) +{ + return nvkm_engine_new_(&gp100_ce, device, index, true, pengine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c new file mode 100644 index 000000000000..20e019788a53 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c @@ -0,0 +1,44 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" +#include <core/enum.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +gp104_ce = { + .intr = gp100_ce_intr, + .sclass = { + { -1, -1, PASCAL_DMA_COPY_B }, + { -1, -1, PASCAL_DMA_COPY_A }, + {} + } +}; + +int +gp104_ce_new(struct nvkm_device *device, int index, + struct nvkm_engine **pengine) +{ + return nvkm_engine_new_(&gp104_ce, device, index, true, pengine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index e2fa8b161943..2dce405976ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -4,4 +4,5 @@ void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *); void gk104_ce_intr(struct nvkm_engine *); +void gp100_ce_intr(struct nvkm_engine *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 4572debcb0c9..7218a067a6c5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2148,6 +2148,67 @@ nv12b_chipset = { .sw = gf100_sw_new, }; +static const struct nvkm_device_chip +nv130_chipset = { + .name = "GP100", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp100_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .secboot = gm200_secboot_new, + .pci = gp100_pci_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp100_ce_new, + .ce[1] = gp100_ce_new, + .ce[2] = gp100_ce_new, + .ce[3] = gp100_ce_new, + .ce[4] = gp100_ce_new, + .ce[5] = gp100_ce_new, + .dma = gf119_dma_new, + .disp = gp100_disp_new, + .fifo = gp100_fifo_new, + .gr = gp100_gr_new, + .sw = gf100_sw_new, +}; + +static const struct nvkm_device_chip +nv134_chipset = { + .name = "GP104", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp104_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .pci = gp100_pci_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp104_ce_new, + .ce[1] = gp104_ce_new, + .ce[2] = gp104_ce_new, + .ce[3] = gp104_ce_new, + .disp = gp104_disp_new, + .dma = gf119_dma_new, + .fifo = gp100_fifo_new, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(CE0 , device->ce[0] , device->ce[0]); _(CE1 , device->ce[1] , device->ce[1]); _(CE2 , device->ce[2] , device->ce[2]); + _(CE3 , device->ce[3] , device->ce[3]); + _(CE4 , device->ce[4] , device->ce[4]); + _(CE5 , device->ce[5] , device->ce[5]); _(CIPHER , device->cipher , device->cipher); _(DISP , device->disp , &device->disp->engine); _(DMAOBJ , device->dma , &device->dma->engine); @@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(MSVLD , device->msvld , device->msvld); _(NVENC0 , device->nvenc[0], device->nvenc[0]); _(NVENC1 , device->nvenc[1], device->nvenc[1]); + _(NVENC2 , device->nvenc[2], device->nvenc[2]); _(NVDEC , device->nvdec , device->nvdec); _(PM , device->pm , &device->pm->engine); _(SEC , device->sec , device->sec); @@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x100: device->card_type = NV_E0; break; case 0x110: case 0x120: device->card_type = GM100; break; + case 0x130: device->card_type = GP100; break; default: break; } @@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x124: device->chip = &nv124_chipset; break; case 0x126: device->chip = &nv126_chipset; break; case 0x12b: device->chip = &nv12b_chipset; break; + case 0x130: device->chip = &nv130_chipset; break; + case 0x134: device->chip = &nv134_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); goto done; @@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_ENGINE_CE0 , ce[0]); _(NVKM_ENGINE_CE1 , ce[1]); _(NVKM_ENGINE_CE2 , ce[2]); + _(NVKM_ENGINE_CE3 , ce[3]); + _(NVKM_ENGINE_CE4 , ce[4]); + _(NVKM_ENGINE_CE5 , ce[5]); _(NVKM_ENGINE_CIPHER , cipher); _(NVKM_ENGINE_DISP , disp); _(NVKM_ENGINE_DMAOBJ , dma); @@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_ENGINE_MSVLD , msvld); _(NVKM_ENGINE_NVENC0 , nvenc[0]); _(NVKM_ENGINE_NVENC1 , nvenc[1]); + _(NVKM_ENGINE_NVENC2 , nvenc[2]); _(NVKM_ENGINE_NVDEC , nvdec); _(NVKM_ENGINE_PM , pm); _(NVKM_ENGINE_SEC , sec); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 62ad0300cfa5..b1b693219db3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1614,7 +1614,6 @@ nvkm_device_pci_func = { .fini = nvkm_device_pci_fini, .resource_addr = nvkm_device_pci_resource_addr, .resource_size = nvkm_device_pci_resource_size, - .cpu_coherent = !IS_ENABLED(CONFIG_ARM), }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index ec12efb4689a..939682f18788 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -191,13 +191,11 @@ static irqreturn_t nvkm_device_tegra_intr(int irq, void *arg) { struct nvkm_device_tegra *tdev = arg; - struct nvkm_mc *mc = tdev->device.mc; + struct nvkm_device *device = &tdev->device; bool handled = false; - if (likely(mc)) { - nvkm_mc_intr_unarm(mc); - nvkm_mc_intr(mc, &handled); - nvkm_mc_intr_rearm(mc); - } + nvkm_mc_intr_unarm(device); + nvkm_mc_intr(device, &handled); + nvkm_mc_intr_rearm(device); return handled ? IRQ_HANDLED : IRQ_NONE; } @@ -247,7 +245,6 @@ nvkm_device_tegra_func = { .fini = nvkm_device_tegra_fini, .resource_addr = nvkm_device_tegra_resource_addr, .resource_size = nvkm_device_tegra_resource_size, - .cpu_coherent = false, }; int @@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, goto remove; tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; + tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, NVKM_DEVICE_TEGRA, pdev->id, NULL, cfg, dbg, detect, mmio, subdev_mask, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 137066426ed7..79a8f71cf788 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; + case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index e2a64ed14b22..77a52b54a31e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o nvkm-y += nvkm/engine/disp/gk110.o nvkm-y += nvkm/engine/disp/gm107.o nvkm-y += nvkm/engine/disp/gm200.o +nvkm-y += nvkm/engine/disp/gp100.o +nvkm-y += nvkm/engine/disp/gp104.o nvkm-y += nvkm/engine/disp/outp.o nvkm-y += nvkm/engine/disp/outpdp.o @@ -45,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o nvkm-y += nvkm/engine/disp/rootgk110.o nvkm-y += nvkm/engine/disp/rootgm107.o nvkm-y += nvkm/engine/disp/rootgm200.o +nvkm-y += nvkm/engine/disp/rootgp100.o +nvkm-y += nvkm/engine/disp/rootgp104.o nvkm-y += nvkm/engine/disp/channv50.o nvkm-y += nvkm/engine/disp/changf119.o nvkm-y += nvkm/engine/disp/dmacnv50.o nvkm-y += nvkm/engine/disp/dmacgf119.o +nvkm-y += nvkm/engine/disp/dmacgp104.o nvkm-y += nvkm/engine/disp/basenv50.o nvkm-y += nvkm/engine/disp/baseg84.o @@ -59,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o nvkm-y += nvkm/engine/disp/basegf119.o nvkm-y += nvkm/engine/disp/basegk104.o nvkm-y += nvkm/engine/disp/basegk110.o +nvkm-y += nvkm/engine/disp/basegp104.o nvkm-y += nvkm/engine/disp/corenv50.o nvkm-y += nvkm/engine/disp/coreg84.o @@ -70,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o nvkm-y += nvkm/engine/disp/coregk110.o nvkm-y += nvkm/engine/disp/coregm107.o nvkm-y += nvkm/engine/disp/coregm200.o +nvkm-y += nvkm/engine/disp/coregp100.o +nvkm-y += nvkm/engine/disp/coregp104.o nvkm-y += nvkm/engine/disp/ovlynv50.o nvkm-y += nvkm/engine/disp/ovlyg84.o @@ -77,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o nvkm-y += nvkm/engine/disp/ovlygt215.o nvkm-y += nvkm/engine/disp/ovlygf119.o nvkm-y += nvkm/engine/disp/ovlygk104.o +nvkm-y += nvkm/engine/disp/ovlygp104.o nvkm-y += nvkm/engine/disp/piocnv50.o nvkm-y += nvkm/engine/disp/piocgf119.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c new file mode 100644 index 000000000000..51688e37c54e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp104_disp_base_oclass = { + .base.oclass = GK110_DISP_BASE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_base_new, + .func = &gp104_disp_dmac_func, + .mthd = &gf119_disp_base_chan_mthd, + .chid = 1, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h index aee374884c96..f5f683d9fd20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h @@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior; extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd; extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd; +extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; struct nv50_disp_pioc_oclass { int (*ctor)(const struct nv50_disp_chan_func *, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c index 6b1dc703dac7..21fbf89b6319 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c @@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = { } }; -static void +void gf119_disp_core_fini(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c new file mode 100644 index 000000000000..d5dff6619d4d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c @@ -0,0 +1,38 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp100_disp_core_oclass = { + .base.oclass = GP100_DISP_CORE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_core_new, + .func = &gf119_disp_core_func, + .mthd = &gk104_disp_core_chan_mthd, + .chid = 0, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c new file mode 100644 index 000000000000..6922f4007b61 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c @@ -0,0 +1,78 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <subdev/timer.h> + +#include <nvif/class.h> + +static int +gp104_disp_core_init(struct nv50_disp_dmac *chan) +{ + struct nv50_disp *disp = chan->base.root->disp; + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + + /* enable error reporting */ + nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001); + + /* initialise channel for dma command submission */ + nvkm_wr32(device, 0x611494, chan->push); + nvkm_wr32(device, 0x611498, 0x00010000); + nvkm_wr32(device, 0x61149c, 0x00000001); + nvkm_mask(device, 0x610490, 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000, 0x00000000); + nvkm_wr32(device, 0x610490, 0x01000013); + + /* wait for it to go inactive */ + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x610490) & 0x80000000)) + break; + ) < 0) { + nvkm_error(subdev, "core init: %08x\n", + nvkm_rd32(device, 0x610490)); + return -EBUSY; + } + + return 0; +} + +const struct nv50_disp_dmac_func +gp104_disp_core_func = { + .init = gp104_disp_core_init, + .fini = gf119_disp_core_fini, + .bind = gf119_disp_dmac_bind, +}; + +const struct nv50_disp_dmac_oclass +gp104_disp_core_oclass = { + .base.oclass = GP104_DISP_CORE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_core_new, + .func = &gp104_disp_core_func, + .mthd = &gk104_disp_core_chan_mthd, + .chid = 0, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c index 876b14549a58..a57f7cef307a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c @@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, chan->base.chid << 27 | 0x00000001); } -static void +void gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c new file mode 100644 index 000000000000..ad24c2c57696 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c @@ -0,0 +1,66 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <subdev/timer.h> + +static int +gp104_disp_dmac_init(struct nv50_disp_dmac *chan) +{ + struct nv50_disp *disp = chan->base.root->disp; + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + int chid = chan->base.chid; + + /* enable error reporting */ + nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + + /* initialise channel for dma command submission */ + nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); + nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); + nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); + nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); + nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); + + /* wait for it to go inactive */ + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) + break; + ) < 0) { + nvkm_error(subdev, "ch %d init: %08x\n", chid, + nvkm_rd32(device, 0x610490 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +const struct nv50_disp_dmac_func +gp104_disp_dmac_func = { + .init = gp104_disp_dmac_init, + .fini = gf119_disp_dmac_fini, + .bind = gf119_disp_dmac_bind, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h index fc84eb8b5c45..43ac05857853 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h @@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); extern const struct nv50_disp_dmac_func nv50_disp_core_func; extern const struct nv50_disp_dmac_func gf119_disp_dmac_func; +void gf119_disp_dmac_fini(struct nv50_disp_dmac *); int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); extern const struct nv50_disp_dmac_func gf119_disp_core_func; +void gf119_disp_core_fini(struct nv50_disp_dmac *); + +extern const struct nv50_disp_dmac_func gp104_disp_dmac_func; struct nv50_disp_dmac_oclass { int (*ctor)(const struct nv50_disp_dmac_func *, @@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass; extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass; extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass; + +extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass; + +extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass; +extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass; +extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index 5dd34382f55a..29e84b241cca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c @@ -76,12 +76,10 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, mask |= 0x0001 << or; mask |= 0x0100 << head; - list_for_each_entry(outp, &disp->base.outp, head) { if ((outp->info.hasht & 0xff) == type && (outp->info.hashm & mask) == mask) { - *data = nvbios_outp_match(bios, outp->info.hasht, - outp->info.hashm, + *data = nvbios_outp_match(bios, outp->info.hasht, mask, ver, hdr, cnt, len, info); if (!*data) return NULL; @@ -415,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work) nvkm_wr32(device, 0x6101d0, 0x80000000); } -static void +void gf119_disp_intr_error(struct nv50_disp *disp, int chid) { struct nvkm_subdev *subdev = &disp->base.engine.subdev; @@ -463,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp) u32 stat = nvkm_rd32(device, 0x61009c); int chid = ffs(stat) - 1; if (chid >= 0) - gf119_disp_intr_error(disp, chid); + disp->func->intr_error(disp, chid); intr &= ~0x00000002; } @@ -507,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, static const struct nv50_disp_func gf119_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gf119_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c index a86384b8e388..37f145cf30d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gk104_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gk104_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c index 0d574c7e594a..e14ac946608c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gk110_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gk110_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index f4b9cf8574be..2f2437cc5891 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gm107_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gm107_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c index 67eec8620719..9f368d4ee61e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gm200_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gm200_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c new file mode 100644 index 000000000000..4f81bf31435e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "rootnv50.h" + +static const struct nv50_disp_func +gp100_disp = { + .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, + .uevent = &gf119_disp_chan_uevent, + .super = gf119_disp_intr_supervisor, + .root = &gp100_disp_root_oclass, + .head.vblank_init = gf119_disp_vblank_init, + .head.vblank_fini = gf119_disp_vblank_fini, + .head.scanoutpos = gf119_disp_root_scanoutpos, + .outp.internal.crt = nv50_dac_output_new, + .outp.internal.tmds = nv50_sor_output_new, + .outp.internal.lvds = nv50_sor_output_new, + .outp.internal.dp = gm200_sor_dp_new, + .dac.nr = 3, + .dac.power = nv50_dac_power, + .dac.sense = nv50_dac_sense, + .sor.nr = 4, + .sor.power = nv50_sor_power, + .sor.hda_eld = gf119_hda_eld, + .sor.hdmi = gk104_hdmi_ctrl, + .sor.magic = gm200_sor_magic, +}; + +int +gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +{ + return gf119_disp_new_(&gp100_disp, device, index, pdisp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c new file mode 100644 index 000000000000..3bf3380336e4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c @@ -0,0 +1,81 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "rootnv50.h" + +static void +gp104_disp_intr_error(struct nv50_disp *disp, int chid) +{ + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12)); + u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12)); + u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12)); + + nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n", + chid, (mthd & 0x0000ffc), data, mthd, unkn); + + if (chid < ARRAY_SIZE(disp->chan)) { + switch (mthd & 0xffc) { + case 0x0080: + nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR); + break; + default: + break; + } + } + + nvkm_wr32(device, 0x61009c, (1 << chid)); + nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000); +} + +static const struct nv50_disp_func +gp104_disp = { + .intr = gf119_disp_intr, + .intr_error = gp104_disp_intr_error, + .uevent = &gf119_disp_chan_uevent, + .super = gf119_disp_intr_supervisor, + .root = &gp104_disp_root_oclass, + .head.vblank_init = gf119_disp_vblank_init, + .head.vblank_fini = gf119_disp_vblank_fini, + .head.scanoutpos = gf119_disp_root_scanoutpos, + .outp.internal.crt = nv50_dac_output_new, + .outp.internal.tmds = nv50_sor_output_new, + .outp.internal.lvds = nv50_sor_output_new, + .outp.internal.dp = gm200_sor_dp_new, + .dac.nr = 3, + .dac.power = nv50_dac_power, + .dac.sense = nv50_dac_sense, + .sor.nr = 4, + .sor.power = nv50_sor_power, + .sor.hda_eld = gf119_hda_eld, + .sor.hdmi = gk104_hdmi_ctrl, + .sor.magic = gm200_sor_magic, +}; + +int +gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +{ + return gf119_disp_new_(&gp104_disp, device, index, pdisp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index fcb1b0c46d64..fbb8c7dc18fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -32,6 +32,7 @@ #include <subdev/bios/init.h> #include <subdev/bios/pll.h> #include <subdev/devinit.h> +#include <subdev/timer.h> static const struct nvkm_disp_oclass * nv50_disp_root_(struct nvkm_disp *base) @@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, list_for_each_entry(outp, &disp->base.outp, head) { if ((outp->info.hasht & 0xff) == type && (outp->info.hashm & mask) == mask) { - *data = nvbios_outp_match(bios, outp->info.hasht, - outp->info.hashm, + *data = nvbios_outp_match(bios, outp->info.hasht, mask, ver, hdr, cnt, len, info); if (!*data) return NULL; @@ -426,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) return outp; } +static bool +nv50_disp_dptmds_war(struct nvkm_device *device) +{ + switch (device->chipset) { + case 0x94: + case 0x96: + case 0x98: + case 0xaa: + case 0xac: + return true; + default: + break; + } + return false; +} + +static bool +nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) { + switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) { + case 0x00000000: + case 0x00030000: + return true; + default: + break; + } + } + return false; + +} + +static void +nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + + if (!nv50_disp_dptmds_war_needed(disp, outp)) + return; + + nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000); + nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000); + nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001); + + nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000); + nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000); + nvkm_usec(device, 400, NVKM_DELAY); + nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000); + nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000); + + if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) { + u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); + u32 pu_pc = seqctl & 0x0000000f; + nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000); + } +} + +static void +nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + u32 sorpwr; + + if (!nv50_disp_dptmds_war_needed(disp, outp)) + return; + + sorpwr = nvkm_rd32(device, 0x61c004 + soff); + if (sorpwr & 0x00000001) { + u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); + u32 pd_pc = (seqctl & 0x00000f00) >> 8; + u32 pu_pc = seqctl & 0x0000000f; + + nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000); + + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) + break; + ); + nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) + break; + ); + + nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000); + nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000); + } + + nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000); + nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000); + + if (sorpwr & 0x00000001) { + nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001); + } +} + +static void +nv50_disp_update_sppll1(struct nv50_disp *disp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + bool used = false; + int sor; + + if (!nv50_disp_dptmds_war(device)) + return; + + for (sor = 0; sor < disp->func->sor.nr; sor++) { + u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800)); + switch (clksor & 0x03000000) { + case 0x02000000: + case 0x03000000: + used = true; + break; + default: + break; + } + } + + if (used) + return; + + nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000); +} + static void nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) { @@ -679,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) nvkm_mask(device, hreg, 0x0000000f, hval); nvkm_mask(device, oreg, mask, oval); + + nv50_disp_dptmds_war_2(disp, &outp->info); } /* If programming a TMDS output on a SOR that can also be configured for @@ -720,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) nv50_disp_intr_unk40_0_tmds(disp, &outp->info); + nv50_disp_dptmds_war_3(disp, &outp->info); } void @@ -767,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work) continue; nv50_disp_intr_unk40_0(disp, head); } + nv50_disp_update_sppll1(disp); } nvkm_wr32(device, 0x610030, 0x80000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index aecebd8717e5..1e1de6bfe85a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -68,6 +68,7 @@ struct nv50_disp_func_outp { struct nv50_disp_func { void (*intr)(struct nv50_disp *); + void (*intr_error)(struct nv50_disp *, int chid); const struct nvkm_event_func *uevent; void (*super)(struct work_struct *); @@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int); void gf119_disp_vblank_fini(struct nv50_disp *, int); void gf119_disp_intr(struct nv50_disp *); void gf119_disp_intr_supervisor(struct work_struct *); +void gf119_disp_intr_error(struct nv50_disp *, int); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c index 2e2dc0641ef2..2f0220b39f34 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c @@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = { } }; -static const struct nv50_disp_chan_mthd +const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd = { .name = "Overlay", .addr = 0x001000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c new file mode 100644 index 000000000000..97e2dd2d908e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c @@ -0,0 +1,38 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp104_disp_ovly_oclass = { + .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_ovly_new, + .func = &gp104_disp_dmac_func, + .mthd = &gk104_disp_ovly_chan_mthd, + .chid = 5, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c new file mode 100644 index 000000000000..ac8fdd728ec6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c @@ -0,0 +1,58 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "rootnv50.h" +#include "dmacnv50.h" + +#include <nvif/class.h> + +static const struct nv50_disp_root_func +gp100_disp_root = { + .init = gf119_disp_root_init, + .fini = gf119_disp_root_fini, + .dmac = { + &gp100_disp_core_oclass, + &gk110_disp_base_oclass, + &gk104_disp_ovly_oclass, + }, + .pioc = { + &gk104_disp_oimm_oclass, + &gk104_disp_curs_oclass, + }, +}; + +static int +gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) +{ + return nv50_disp_root_new_(&gp100_disp_root, disp, oclass, + data, size, pobject); +} + +const struct nvkm_disp_oclass +gp100_disp_root_oclass = { + .base.oclass = GP100_DISP, + .base.minver = -1, + .base.maxver = -1, + .ctor = gp100_disp_root_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c new file mode 100644 index 000000000000..8443e04dc626 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "rootnv50.h" +#include "dmacnv50.h" + +#include <nvif/class.h> + +static const struct nv50_disp_root_func +gp104_disp_root = { + .init = gf119_disp_root_init, + .fini = gf119_disp_root_fini, + .dmac = { + &gp104_disp_core_oclass, + &gp104_disp_base_oclass, + &gp104_disp_ovly_oclass, + }, + .pioc = { + &gk104_disp_oimm_oclass, + &gk104_disp_curs_oclass, + }, +}; + +static int +gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) +{ + return nv50_disp_root_new_(&gp104_disp_root, disp, oclass, + data, size, pobject); +} + +const struct nvkm_disp_oclass +gp104_disp_root_oclass = { + .base.oclass = GP104_DISP, + .base.minver = -1, + .base.maxver = -1, + .ctor = gp104_disp_root_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h index cb449ed8d92c..ad00f1724b72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h @@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass; extern const struct nvkm_disp_oclass gk110_disp_root_oclass; extern const struct nvkm_disp_oclass gm107_disp_root_oclass; extern const struct nvkm_disp_oclass gm200_disp_root_oclass; +extern const struct nvkm_disp_oclass gp100_disp_root_oclass; +extern const struct nvkm_disp_oclass gp104_disp_root_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 65e5d291ecda..98651a43bc12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o nvkm-y += nvkm/engine/fifo/gm107.o nvkm-y += nvkm/engine/fifo/gm200.o nvkm-y += nvkm/engine/fifo/gm20b.o +nvkm-y += nvkm/engine/fifo/gp100.o nvkm-y += nvkm/engine/fifo/chan.o nvkm-y += nvkm/engine/fifo/channv50.o @@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o nvkm-y += nvkm/engine/fifo/gpfifogk104.o nvkm-y += nvkm/engine/fifo/gpfifogk110.o nvkm-y += nvkm/engine/fifo/gpfifogm200.o +nvkm-y += nvkm/engine/fifo/gpfifogp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h index e06f4d46f802..230f64e5f731 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h @@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *, extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass; extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass; extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass; +extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 743f3a189f28..103c0afaaa6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) } if (eu == NULL) { - enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit); + enum nvkm_devidx engidx = nvkm_top_fault(device, unit); if (engidx < NVKM_SUBDEV_NR) { const char *src = nvkm_subdev_name[engidx]; char *dst = en; @@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) struct gk104_fifo *fifo = gk104_fifo(base); struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_top *top = device->top; int engn, runl, pbid, ret, i, j; enum nvkm_devidx engidx; u32 *map; @@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) /* Determine runlist configuration from topology device info. */ i = 0; - while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) { + while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { /* Determine which PBDMA handles requests for this engine. */ for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { if (map[j] & (1 << runl)) { @@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) } } - nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n", - engn, runl, pbid); + nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", + engn, runl, pbid, nvkm_subdev_name[engidx]); fifo->engine[engn].engine = nvkm_device_engine(device, engidx); fifo->engine[engn].runl = runl; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c new file mode 100644 index 000000000000..eff83f7fb705 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c @@ -0,0 +1,67 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "gk104.h" +#include "changk104.h" + +static const struct nvkm_enum +gp100_fifo_fault_engine[] = { + { 0x01, "DISPLAY" }, + { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, + { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, + { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, + { 0x06, "HOST0" }, + { 0x07, "HOST1" }, + { 0x08, "HOST2" }, + { 0x09, "HOST3" }, + { 0x0a, "HOST4" }, + { 0x0b, "HOST5" }, + { 0x0c, "HOST6" }, + { 0x0d, "HOST7" }, + { 0x0e, "HOST8" }, + { 0x0f, "HOST9" }, + { 0x10, "HOST10" }, + { 0x13, "PERF" }, + { 0x17, "PMU" }, + { 0x18, "PTP" }, + { 0x1f, "PHYSICAL" }, + {} +}; + +static const struct gk104_fifo_func +gp100_fifo = { + .fault.engine = gp100_fifo_fault_engine, + .fault.reason = gk104_fifo_fault_reason, + .fault.hubclient = gk104_fifo_fault_hubclient, + .fault.gpcclient = gk104_fifo_fault_gpcclient, + .chan = { + &gp100_fifo_gpfifo_oclass, + NULL + }, +}; + +int +gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +{ + return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c new file mode 100644 index 000000000000..1530a9217aea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "changk104.h" + +#include <nvif/class.h> + +const struct nvkm_fifo_chan_oclass +gp100_fifo_gpfifo_oclass = { + .base.oclass = PASCAL_CHANNEL_GPFIFO_A, + .base.minver = 0, + .base.maxver = 0, + .ctor = gk104_fifo_gpfifo_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 290ed0db8047..f1c494182248 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o nvkm-y += nvkm/engine/gr/gm107.o nvkm-y += nvkm/engine/gr/gm200.o nvkm-y += nvkm/engine/gr/gm20b.o +nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o @@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o nvkm-y += nvkm/engine/gr/ctxgm107.o nvkm-y += nvkm/engine/gr/ctxgm200.o nvkm-y += nvkm/engine/gr/ctxgm20b.o +nvkm-y += nvkm/engine/gr/ctxgp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index b02d8f50ea6a..bc77eea351a5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) const struct gf100_grctx_func *grctx = gr->func->grctx; u32 idle_timeout; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index ac895edce164..52048b5a5274 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *); extern const struct gf100_grctx_func gm20b_grctx; +extern const struct gf100_grctx_func gp100_grctx; + /* context init value lists */ extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index f521de11a299..c925ade5880e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c @@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); } const struct gf100_grctx_func diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 9ba337778ef5..c46b3fdf7203 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); nvkm_mask(device, 0x418800, 0x00200000, 0x00200000); nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c new file mode 100644 index 000000000000..3d1ae7ddf7dd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c @@ -0,0 +1,179 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "ctxgf100.h" + +#include <subdev/fb.h> + +/******************************************************************************* + * PGRAPH context implementation + ******************************************************************************/ + +static void +gp100_grctx_generate_pagepool(struct gf100_grctx *info) +{ + const struct gf100_grctx_func *grctx = info->gr->func->grctx; + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access); + mmio_refn(info, 0x40800c, 0x00000000, s, b); + mmio_wr32(info, 0x408010, 0x80000000); + mmio_refn(info, 0x419004, 0x00000000, s, b); + mmio_wr32(info, 0x419008, 0x00000000); +} + +static void +gp100_grctx_generate_attrib(struct gf100_grctx *info) +{ + struct gf100_gr *gr = info->gr; + const struct gf100_grctx_func *grctx = gr->func->grctx; + const u32 alpha = grctx->alpha_nr; + const u32 attrib = grctx->attrib_nr; + const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max); + const u32 size = roundup(gr->tpc_total * pertpc, 0x80); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size, (1 << s), access); + const int max_batches = 0xffff; + u32 ao = 0; + u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total; + int gpc, ppc, n = 0; + + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_refn(info, 0x419c2c, 0x10000000, s, b); + mmio_refn(info, 0x419b00, 0x00000000, s, b); + mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7); + mmio_wr32(info, 0x405830, attrib); + mmio_wr32(info, 0x40585c, alpha); + mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { + const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; + const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; + const u32 u = 0x418ea0 + (n * 0x04); + const u32 o = PPC_UNIT(gpc, ppc, 0); + if (!(gr->ppc_mask[gpc] & (1 << ppc))) + continue; + mmio_wr32(info, o + 0xc0, bs); + mmio_wr32(info, o + 0xf4, bo); + mmio_wr32(info, o + 0xf0, bs); + bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, o + 0xe4, as); + mmio_wr32(info, o + 0xf8, ao); + ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, u, bs); + } + } + + mmio_wr32(info, 0x418eec, 0x00000000); + mmio_wr32(info, 0x41befc, 0x00000000); +} + +static void +gp100_grctx_generate_405b60(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4); + u32 dist[TPC_MAX / 4] = {}; + u32 gpcs[GPC_MAX * 2] = {}; + u8 tpcnr[GPC_MAX]; + int tpc, gpc, i; + + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); + + /* won't result in the same distribution as the binary driver where + * some of the gpcs have more tpcs than others, but this shall do + * for the moment. the code for earlier gpus has this issue too. + */ + for (gpc = -1, i = 0; i < gr->tpc_total; i++) { + do { + gpc = (gpc + 1) % gr->gpc_nr; + } while(!tpcnr[gpc]); + tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; + + dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8); + gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8); + } + + for (i = 0; i < dist_nr; i++) + nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]); + for (i = 0; i < gr->gpc_nr * 2; i++) + nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); +} + +static void +gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const struct gf100_grctx_func *grctx = gr->func->grctx; + u32 idle_timeout, tmp; + int i; + + gf100_gr_mmio(gr, gr->fuc_sw_ctx); + + idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000); + + grctx->pagepool(info); + grctx->bundle(info); + grctx->attrib(info); + grctx->unkn(gr); + + gm200_grctx_generate_tpcid(gr); + gf100_grctx_generate_r406028(gr); + gk104_grctx_generate_r418bb8(gr); + + for (i = 0; i < 8; i++) + nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000); + nvkm_wr32(device, 0x406500, 0x00000000); + + nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr); + + for (tmp = 0, i = 0; i < gr->gpc_nr; i++) + tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5); + nvkm_wr32(device, 0x4041c4, tmp); + + gp100_grctx_generate_405b60(gr); + + gf100_gr_icmd(gr, gr->fuc_bundle); + nvkm_wr32(device, 0x404154, idle_timeout); + gf100_gr_mthd(gr, gr->fuc_method); +} + +const struct gf100_grctx_func +gp100_grctx = { + .main = gp100_grctx_generate_main, + .unkn = gk104_grctx_generate_unkn, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x1080, + .pagepool = gp100_grctx_generate_pagepool, + .pagepool_size = 0x20000, + .attrib = gp100_grctx_generate_attrib, + .attrib_nr_max = 0x660, + .attrib_nr = 0x440, + .alpha_nr_max = 0xc00, + .alpha_nr = 0x800, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index ae9ab5b1ab97..157919c788e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1457,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) struct nvkm_device *device = subdev->device; struct nvkm_secboot *sb = device->secboot; int i; + int ret = 0; if (gr->firmware) { /* load fuc microcode */ - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); + ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); else gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d); + if (ret) + return ret; + if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); + ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); else gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad); + if (ret) + return ret; - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); /* start both of them running */ nvkm_wr32(device, 0x409840, 0xffffffff); @@ -1576,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) } /* load HUB microcode */ - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); nvkm_wr32(device, 0x4091c0, 0x01000000); for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); @@ -1599,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) nvkm_wr32(device, 0x41a188, i >> 6); nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); } - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); /* load register lists */ gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 2b98abdb9270..268b8d60ff73 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[]; extern const struct gf100_gr_init gm107_gr_init_wwdx_0[]; extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; void gm107_gr_init_bios(struct gf100_gr *); + +void gm200_gr_init_gpc_mmu(struct gf100_gr *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 4ca8ed15191c..de8b806b88fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) if (ret) return ret; - return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c index 4dfa4513bb6c..6435f1257572 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c @@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr) return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c); } -static void +void gm200_gr_init_gpc_mmu(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c new file mode 100644 index 000000000000..26ad79def0ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -0,0 +1,171 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ctxgf100.h" + +#include <nvif/class.h> + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */ + const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f; + nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */ + nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ +} + +static int +gp100_gr_init(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); + u32 data[TPC_MAX / 8] = {}; + u8 tpcnr[GPC_MAX]; + int gpc, tpc, rop; + int i; + + gr->func->init_gpc_mmu(gr); + + gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + + nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); + + memset(data, 0x00, sizeof(data)); + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); + for (i = 0, gpc = -1; i < gr->tpc_total; i++) { + do { + gpc = (gpc + 1) % gr->gpc_nr; + } while (!tpcnr[gpc]); + tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; + + data[i / 8] |= tpc << ((i % 8) * 4); + } + + nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); + nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); + nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); + nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), + gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | + gr->tpc_total); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); + nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); + nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); + + gr->func->init_rop_active_fbps(gr); + + nvkm_wr32(device, 0x400500, 0x00010001); + nvkm_wr32(device, 0x400100, 0xffffffff); + nvkm_wr32(device, 0x40013c, 0xffffffff); + nvkm_wr32(device, 0x400124, 0x00000002); + nvkm_wr32(device, 0x409c24, 0x000f0002); + nvkm_wr32(device, 0x405848, 0xc0000000); + nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001); + nvkm_wr32(device, 0x404000, 0xc0000000); + nvkm_wr32(device, 0x404600, 0xc0000000); + nvkm_wr32(device, 0x408030, 0xc0000000); + nvkm_wr32(device, 0x404490, 0xc0000000); + nvkm_wr32(device, 0x406018, 0xc0000000); + nvkm_wr32(device, 0x407020, 0x40000000); + nvkm_wr32(device, 0x405840, 0xc0000000); + nvkm_wr32(device, 0x405844, 0x00ffffff); + nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); + + nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000); + nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000); + + gr->func->init_ppc_exceptions(gr); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); + for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105); + } + nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); + nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); + } + + for (rop = 0; rop < gr->rop_nr; rop++) { + nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); + nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); + } + + nvkm_wr32(device, 0x400108, 0xffffffff); + nvkm_wr32(device, 0x400138, 0xffffffff); + nvkm_wr32(device, 0x400118, 0xffffffff); + nvkm_wr32(device, 0x400130, 0xffffffff); + nvkm_wr32(device, 0x40011c, 0xffffffff); + nvkm_wr32(device, 0x400134, 0xffffffff); + + gf100_gr_zbc_init(gr); + + return gf100_gr_init_ctxctl(gr); +} + +static const struct gf100_gr_func +gp100_gr = { + .init = gp100_gr_init, + .init_gpc_mmu = gm200_gr_init_gpc_mmu, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .rops = gm200_gr_rops, + .ppc_nr = 2, + .grctx = &gp100_grctx, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, PASCAL_A, &gf100_fermi }, + { -1, -1, PASCAL_COMPUTE_A }, + {} + } +}; + +int +gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gm200_gr_new_(&gp100_gr, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c index 69de8c6259fe..f1e15a4d4f64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c @@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); - nvkm_wo32(chan->inst, i + 1, 0x0436086c); - nvkm_wo32(chan->inst, i + 2, 0x000c001b); + nvkm_wo32(chan->inst, i + 4, 0x0436086c); + nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c index 2207dac23981..300f5ed5de0b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c @@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); - nvkm_wo32(chan->inst, i + 1, 0x0436086c); - nvkm_wo32(chan->inst, i + 2, 0x000c001b); + nvkm_wo32(chan->inst, i + 4, 0x0436086c); + nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index e15b9627b07e..f3c30b2a788e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -26,6 +26,49 @@ #include <subdev/bios.h> #include <subdev/bios/bmp.h> #include <subdev/bios/bit.h> +#include <subdev/bios/image.h> + +static bool +nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) +{ + u32 p = *addr; + + if (*addr > bios->image0_size && bios->imaged_addr) { + *addr -= bios->image0_size; + *addr += bios->imaged_addr; + } + + if (unlikely(*addr + size >= bios->size)) { + nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); + return false; + } + + return true; +} + +u8 +nvbios_rd08(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 1))) + return bios->data[addr]; + return 0x00; +} + +u16 +nvbios_rd16(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 2))) + return get_unaligned_le16(&bios->data[addr]); + return 0x0000; +} + +u32 +nvbios_rd32(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 4))) + return get_unaligned_le32(&bios->data[addr]); + return 0x00000000; +} u8 nvbios_checksum(const u8 *data, int size) @@ -100,8 +143,9 @@ int nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) { struct nvkm_bios *bios; + struct nvbios_image image; struct bit_entry bit_i; - int ret; + int ret, idx = 0; if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL))) return -ENOMEM; @@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) if (ret) return ret; + /* Some tables have weird pointers that need adjustment before + * they're dereferenced. I'm not entirely sure why... + */ + if (nvbios_image(bios, idx++, &image)) { + bios->image0_size = image.size; + while (nvbios_image(bios, idx++, &image)) { + if (image.type == 0xe0) { + bios->imaged_addr = image.base; + break; + } + } + } + /* detect type of vbios we're dealing with */ bios->bmp_offset = nvbios_findstr(bios->data, bios->size, "\xff\x7f""NV\0", 5); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c index 05332476354a..d89e78c4e689 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c @@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) case 0x30: case 0x40: case 0x41: + case 0x42: *hdr = nvbios_rd08(bios, data + 0x01); *len = nvbios_rd08(bios, data + 0x02); *cnt = nvbios_rd08(bios, data + 0x03); @@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx, break; case 0x40: case 0x41: + case 0x42: *hdr = nvbios_rd08(bios, data + 0x04); *cnt = 0; *len = 0; @@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx, break; case 0x40: case 0x41: + case 0x42: info->flags = nvbios_rd08(bios, data + 0x04); info->script[0] = nvbios_rd16(bios, data + 0x05); info->script[1] = nvbios_rd16(bios, data + 0x07); @@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, info->pe = nvbios_rd08(bios, data + 0x02); info->tx_pu = nvbios_rd08(bios, data + 0x03); break; + case 0x42: + info->dc = nvbios_rd08(bios, data + 0x00); + info->pe = nvbios_rd08(bios, data + 0x01); + info->tx_pu = nvbios_rd08(bios, data + 0x02); + break; default: data = 0x0000; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c index 74b14cf09308..1dbff7aeafec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c @@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image) bool nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image) { + u32 imaged_addr = bios->imaged_addr; memset(image, 0x00, sizeof(*image)); + bios->imaged_addr = 0; do { image->base += image->size; - if (image->last || !nvbios_imagen(bios, image)) + if (image->last || !nvbios_imagen(bios, image)) { + bios->imaged_addr = imaged_addr; return false; + } } while(idx--); + bios->imaged_addr = imaged_addr; return true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c index 91a7dc56e406..2ca23a9157ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c @@ -77,15 +77,17 @@ g84_pll_mapping[] = { {} }; -static u16 +static u32 pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { struct bit_entry bit_C; - u16 data = 0x0000; + u32 data = 0x0000; if (!bit_entry(bios, 'C', &bit_C)) { if (bit_C.version == 1 && bit_C.length >= 10) data = nvbios_rd16(bios, bit_C.offset + 8); + if (bit_C.version == 2 && bit_C.length >= 4) + data = nvbios_rd32(bios, bit_C.offset + 0); if (data) { *ver = nvbios_rd08(bios, data + 0); *hdr = nvbios_rd08(bios, data + 1); @@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios) } } -static u16 +static u32 pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) { struct pll_mapping *map; u8 hdr, cnt; - u16 data; + u32 data; data = pll_limits_table(bios, ver, &hdr, &cnt, len); if (data && *ver >= 0x30) { @@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) map = pll_map(bios); while (map && map->reg) { if (map->reg == reg && *ver >= 0x20) { - u16 addr = (data += hdr); + u32 addr = (data += hdr); *type = map->type; while (cnt--) { if (nvbios_rd32(bios, data) == map->reg) @@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) return 0x0000; } -static u16 +static u32 pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) { struct pll_mapping *map; u8 hdr, cnt; - u16 data; + u32 data; data = pll_limits_table(bios, ver, &hdr, &cnt, len); if (data && *ver >= 0x30) { @@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) map = pll_map(bios); while (map && map->reg) { if (map->type == type && *ver >= 0x20) { - u16 addr = (data += hdr); + u32 addr = (data += hdr); *reg = map->reg; while (cnt--) { if (nvbios_rd32(bios, data) == map->reg) @@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info) struct nvkm_device *device = subdev->device; u8 ver, len; u32 reg = type; - u16 data; + u32 data; if (type > PLL_MAX) { reg = type; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c index c268e5afe852..b4a308f3cf7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c @@ -26,21 +26,6 @@ #include <subdev/bios/image.h> #include <subdev/bios/pmu.h> -static u32 -weirdo_pointer(struct nvkm_bios *bios, u32 data) -{ - struct nvbios_image image; - int idx = 0; - if (nvbios_image(bios, idx++, &image)) { - data -= image.size; - while (nvbios_image(bios, idx++, &image)) { - if (image.type == 0xe0) - return image.base + data; - } - } - return 0; -} - u32 nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { @@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) if (!bit_entry(bios, 'p', &bit_p)) { if (bit_p.version == 2 && bit_p.length >= 4) data = nvbios_rd32(bios, bit_p.offset + 0x00); - if ((data = weirdo_pointer(bios, data))) { + if (data) { *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */ *hdr = nvbios_rd08(bios, data + 0x01); *len = nvbios_rd08(bios, data + 0x02); @@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info) u32 data; memset(info, 0x00, sizeof(*info)); while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) { - if ( pmuE.type == type && - (data = weirdo_pointer(bios, pmuE.data))) { + if (pmuE.type == type && (data = pmuE.data)) { info->init_addr_pmu = nvbios_rd32(bios, data + 0x08); info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c); info->boot_addr = data + 0x30; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c index d0ae7454764e..b57c370c725d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c @@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz) { struct bit_entry bit_P; - u16 rammap = 0x0000; + u32 rammap = 0x0000; if (!bit_entry(bios, 'P', &bit_P)) { if (bit_P.version == 2) - rammap = nvbios_rd16(bios, bit_P.offset + 4); + rammap = nvbios_rd32(bios, bit_P.offset + 4); if (rammap) { *ver = nvbios_rd08(bios, rammap + 0); @@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u8 snr, ssz; - u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); + u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); if (rammap && idx < *cnt) { rammap = rammap + *hdr + (idx * (*len + (snr * ssz))); *hdr = *len; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c index 78c449b417b7..89d5543118cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c @@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) { struct nvkm_device *device = clk->base.subdev.device; u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); - u32 sctl = nvkm_rd32(device, dctl + (doff * 4)); + u32 sclk, sctl, sdiv = 2; switch (ssrc & 0x00000003) { case 0: @@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) case 2: return 100000; case 3: - if (sctl & 0x80000000) { - u32 sclk = read_vco(clk, dsrc + (doff * 4)); - u32 sdiv = (sctl & 0x0000003f) + 2; - return (sclk * 2) / sdiv; + sclk = read_vco(clk, dsrc + (doff * 4)); + + /* Memclk has doff of 0 despite its alt. location */ + if (doff <= 2) { + sctl = nvkm_rd32(device, dctl + (doff * 4)); + + if (sctl & 0x80000000) { + if (ssrc & 0x100) + sctl >>= 8; + + sdiv = (sctl & 0x3f) + 2; + } } - return read_vco(clk, dsrc + (doff * 4)); + return (sclk * 2) / sdiv; default: return 0; } @@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx) if (info->coef) { nvkm_wr32(device, addr + 0x04, info->coef); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); nvkm_msec(device, 2000, if (nvkm_rd32(device, addr + 0x00) & 0x00020000) break; ); - nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); } } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c index 975c401bccab..06bc0d2d6ae1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c @@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx) if (info->coef) { nvkm_wr32(device, addr + 0x04, info->coef); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); nvkm_msec(device, 2000, if (nvkm_rd32(device, addr + 0x00) & 0x00020000) break; ); - nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index 5f0ee24e31b8..218893e3e5f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -28,69 +28,6 @@ #include <core/tegra.h> #include <subdev/timer.h> -#define KHZ (1000) -#define MHZ (KHZ * 1000) - -#define MASK(w) ((1 << w) - 1) - -#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) -#define GPCPLL_CFG_ENABLE BIT(0) -#define GPCPLL_CFG_IDDQ BIT(1) -#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) -#define GPCPLL_CFG_LOCK BIT(17) - -#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) -#define GPCPLL_COEFF_M_SHIFT 0 -#define GPCPLL_COEFF_M_WIDTH 8 -#define GPCPLL_COEFF_N_SHIFT 8 -#define GPCPLL_COEFF_N_WIDTH 8 -#define GPCPLL_COEFF_P_SHIFT 16 -#define GPCPLL_COEFF_P_WIDTH 6 - -#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) -#define GPCPLL_CFG2_SETUP2_SHIFT 16 -#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 - -#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) -#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 - -#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 -#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) -#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 -#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 -#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 -#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 -#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 - -#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) -#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 - -#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) -#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 -#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 -#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 -#define GPC2CLK_OUT_VCODIV_WIDTH 6 -#define GPC2CLK_OUT_VCODIV_SHIFT 8 -#define GPC2CLK_OUT_VCODIV1 0 -#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ - GPC2CLK_OUT_VCODIV_SHIFT) -#define GPC2CLK_OUT_BYPDIV_WIDTH 6 -#define GPC2CLK_OUT_BYPDIV_SHIFT 0 -#define GPC2CLK_OUT_BYPDIV31 0x3c -#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ - GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ - | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ - | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) -#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ - GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ - | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ - | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) - -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ - (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) - static const u8 _pl_to_div[] = { /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, @@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = { .min_pl = 1, .max_pl = 32, }; -static void +void gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) { struct nvkm_device *device = clk->base.subdev.device; @@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); } -static u32 -gk20a_pllg_calc_rate(struct gk20a_clk *clk) +void +gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT; + val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT; + nvkm_wr32(device, GPCPLL_COEFF, val); +} + +u32 +gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll) { u32 rate; u32 divider; - rate = clk->parent_rate * clk->pll.n; - divider = clk->pll.m * clk->pl_to_div(clk->pll.pl); + rate = clk->parent_rate * pll->n; + divider = pll->m * clk->pl_to_div(pll->pl); return rate / divider / 2; } -static int -gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) +int +gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate, + struct gk20a_pll *pll) { struct nvkm_subdev *subdev = &clk->base.subdev; u32 target_clk_f, ref_clk_f, target_freq; @@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) target_clk_f = rate * 2 / KHZ; ref_clk_f = clk->parent_rate / KHZ; - max_vco_f = clk->params->max_vco; + target_vco_f = target_clk_f + target_clk_f / 50; + max_vco_f = max(clk->params->max_vco, target_vco_f); min_vco_f = clk->params->min_vco; best_m = clk->params->max_m; best_n = clk->params->min_n; best_pl = clk->params->min_pl; - target_vco_f = target_clk_f + target_clk_f / 50; - if (max_vco_f < target_vco_f) - max_vco_f = target_vco_f; - /* min_pl <= high_pl <= max_pl */ high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; high_pl = min(high_pl, clk->params->max_pl); @@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) target_vco_f = target_clk_f * clk->pl_to_div(pl); for (m = clk->params->min_m; m <= clk->params->max_m; m++) { - u32 u_f, vco_f; - - u_f = ref_clk_f / m; + u32 u_f = ref_clk_f / m; if (u_f < clk->params->min_u) break; @@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) break; for (; n <= n2; n++) { + u32 vco_f; + if (n < clk->params->min_n) continue; if (n > clk->params->max_n) @@ -247,16 +194,16 @@ found_match: "no best match for target @ %dMHz on gpc_pll", target_clk_f / KHZ); - clk->pll.m = best_m; - clk->pll.n = best_n; - clk->pll.pl = best_pl; + pll->m = best_m; + pll->n = best_n; + pll->pl = best_pl; - target_freq = gk20a_pllg_calc_rate(clk); + target_freq = gk20a_pllg_calc_rate(clk, pll); nvkm_debug(subdev, - "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", - target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl, - clk->pl_to_div(clk->pll.pl)); + "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n", + target_freq / KHZ, pll->m, pll->n, pll->pl, + clk->pl_to_div(pll->pl)); return 0; } @@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; - u32 val; - int ramp_timeout; + struct gk20a_pll pll; + int ret = 0; /* get old coefficients */ - val = nvkm_rd32(device, GPCPLL_COEFF); + gk20a_pllg_read_mnp(clk, &pll); /* do nothing if NDIV is the same */ - if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) + if (n == pll.n) return 0; - /* setup */ - nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, - 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); - nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, - 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); - /* pll slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); /* new ndiv ready for ramp */ - val = nvkm_rd32(device, GPCPLL_COEFF); - val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); - val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + pll.n = n; udelay(1); - nvkm_wr32(device, GPCPLL_COEFF, val); + gk20a_pllg_write_mnp(clk, &pll); /* dynamic ramp to new ndiv */ - val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); - val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; udelay(1); - nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); - for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { - udelay(1); - val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); - if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) - break; - } + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; /* exit slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, @@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); - if (ramp_timeout <= 0) { - nvkm_error(subdev, "gpcpll dynamic ramp timeout\n"); - return -ETIMEDOUT; - } - - return 0; + return ret; } -static void +static int gk20a_pllg_enable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; + u32 val; nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); nvkm_rd32(device, GPCPLL_CFG); + + /* enable lock detection */ + val = nvkm_rd32(device, GPCPLL_CFG); + if (val & GPCPLL_CFG_LOCK_DET_OFF) { + val &= ~GPCPLL_CFG_LOCK_DET_OFF; + nvkm_wr32(device, GPCPLL_CFG, val); + } + + /* wait for lock */ + if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK, + GPCPLL_CFG_LOCK) < 0) + return -ETIMEDOUT; + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; } static void @@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); nvkm_rd32(device, GPCPLL_CFG); } static int -_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) +gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; - u32 val, cfg; - struct gk20a_pll old_pll; - u32 n_lo; - - /* get old coefficients */ - gk20a_pllg_read_mnp(clk, &old_pll); - - /* do NDIV slide if there is no change in M and PL */ - cfg = nvkm_rd32(device, GPCPLL_CFG); - if (allow_slide && clk->pll.m == old_pll.m && - clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) { - return gk20a_pllg_slide(clk, clk->pll.n); - } - - /* slide down to NDIV_LO */ - if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { - int ret; - - n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); - ret = gk20a_pllg_slide(clk, n_lo); + struct gk20a_pll cur_pll; + int ret; - if (ret) - return ret; - } + gk20a_pllg_read_mnp(clk, &cur_pll); - /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, - 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); - - /* put PLL in bypass before programming it */ - val = nvkm_rd32(device, SEL_VCO); - val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); udelay(2); - nvkm_wr32(device, SEL_VCO, val); - - /* get out from IDDQ */ - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_IDDQ) { - val &= ~GPCPLL_CFG_IDDQ; - nvkm_wr32(device, GPCPLL_CFG, val); - nvkm_rd32(device, GPCPLL_CFG); - udelay(2); - } gk20a_pllg_disable(clk); - nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__, - clk->pll.m, clk->pll.n, clk->pll.pl); - - n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); - val = clk->pll.m << GPCPLL_COEFF_M_SHIFT; - val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT; - val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT; - nvkm_wr32(device, GPCPLL_COEFF, val); + gk20a_pllg_write_mnp(clk, pll); - gk20a_pllg_enable(clk); - - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_LOCK_DET_OFF) { - val &= ~GPCPLL_CFG_LOCK_DET_OFF; - nvkm_wr32(device, GPCPLL_CFG, val); - } - - if (nvkm_usec(device, 300, - if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK) - break; - ) < 0) - return -ETIMEDOUT; - - /* switch to VCO mode */ - nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), - BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + ret = gk20a_pllg_enable(clk); + if (ret) + return ret; /* restore out divider 1:1 */ - val = nvkm_rd32(device, GPC2CLK_OUT); - if ((val & GPC2CLK_OUT_VCODIV_MASK) != - (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) { - val &= ~GPC2CLK_OUT_VCODIV_MASK; - val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT; - udelay(2); - nvkm_wr32(device, GPC2CLK_OUT, val); - /* Intentional 2nd write to assure linear divider operation */ - nvkm_wr32(device, GPC2CLK_OUT, val); - nvkm_rd32(device, GPC2CLK_OUT); - } + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); - /* slide up to new NDIV */ - return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0; + return 0; } static int -gk20a_pllg_program_mnp(struct gk20a_clk *clk) +gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll) { - int err; + struct gk20a_pll cur_pll; + int ret; - err = _gk20a_pllg_program_mnp(clk, true); - if (err) - err = _gk20a_pllg_program_mnp(clk, false); + if (gk20a_pllg_is_enabled(clk)) { + gk20a_pllg_read_mnp(clk, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gk20a_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; - return err; + /* slide up to new NDIV */ + return gk20a_pllg_slide(clk, pll->n); } static struct nvkm_pstate @@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; + struct gk20a_pll pll; switch (src) { case nv_clk_src_crystal: return device->crystal; case nv_clk_src_gpc: - gk20a_pllg_read_mnp(clk, &clk->pll); - return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; + gk20a_pllg_read_mnp(clk, &pll); + return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV; default: nvkm_error(subdev, "invalid clock source %d\n", src); return -EINVAL; @@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) struct gk20a_clk *clk = gk20a_clk(base); return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * - GK20A_CLK_GPC_MDIV); + GK20A_CLK_GPC_MDIV, &clk->pll); } int gk20a_clk_prog(struct nvkm_clk *base) { struct gk20a_clk *clk = gk20a_clk(base); + int ret; + + ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll); + if (ret) + ret = gk20a_pllg_program_mnp(clk, &clk->pll); - return gk20a_pllg_program_mnp(clk); + return ret; } void @@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base) { } +int +gk20a_clk_setup_slide(struct gk20a_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 step_a, step_b; + + switch (clk->parent_rate) { + case 12000000: + case 12800000: + case 13000000: + step_a = 0x2b; + step_b = 0x0b; + break; + case 19200000: + step_a = 0x12; + step_b = 0x08; + break; + case 38400000: + step_a = 0x04; + step_b = 0x05; + break; + default: + nvkm_error(subdev, "invalid parent clock rate %u KHz", + clk->parent_rate / KHZ); + return -EINVAL; + } + + nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, + step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); + nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, + step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); + + return 0; +} + void gk20a_clk_fini(struct nvkm_clk *base) { struct nvkm_device *device = base->subdev.device; struct gk20a_clk *clk = gk20a_clk(base); - u32 val; /* slide to VCO min */ - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_ENABLE) { + if (gk20a_pllg_is_enabled(clk)) { struct gk20a_pll pll; u32 n_lo; gk20a_pllg_read_mnp(clk, &pll); - n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); + n_lo = gk20a_pllg_n_lo(clk, &pll); gk20a_pllg_slide(clk, n_lo); } - /* put PLL in bypass before disabling it */ - nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); - gk20a_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); } static int @@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base) struct nvkm_device *device = subdev->device; int ret; + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); ret = base->func->prog(&clk->base); @@ -646,7 +610,7 @@ gk20a_clk = { }; int -_gk20a_clk_ctor(struct nvkm_device *device, int index, +gk20a_clk_ctor(struct nvkm_device *device, int index, const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params, struct gk20a_clk *clk) @@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, + ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, clk); clk->pl_to_div = pl_to_div; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h index 13c46740197d..0d1450972162 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -24,9 +24,79 @@ #ifndef __NVKM_CLK_GK20A_H__ #define __NVKM_CLK_GK20A_H__ +#define KHZ (1000) +#define MHZ (KHZ * 1000) + +#define MASK(w) ((1 << (w)) - 1) + #define GK20A_CLK_GPC_MDIV 1000 #define SYS_GPCPLL_CFG_BASE 0x00137000 +#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) +#define GPCPLL_CFG_ENABLE BIT(0) +#define GPCPLL_CFG_IDDQ BIT(1) +#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) +#define GPCPLL_CFG_LOCK BIT(17) + +#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) +#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0 +#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9 +#define GPCPLL_CFG3_VCO_CTRL_MASK \ + (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT) +#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 +#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8 + +#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) +#define GPCPLL_COEFF_M_SHIFT 0 +#define GPCPLL_COEFF_M_WIDTH 8 +#define GPCPLL_COEFF_N_SHIFT 8 +#define GPCPLL_COEFF_N_WIDTH 8 +#define GPCPLL_COEFF_N_MASK \ + (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT) +#define GPCPLL_COEFF_P_SHIFT 16 +#define GPCPLL_COEFF_P_WIDTH 6 + +#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) +#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 +#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 +#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 +#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 +#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 + +#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0) +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ + (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) + +#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) +#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 + +#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) +#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 +#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 +#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 +#define GPC2CLK_OUT_VCODIV_WIDTH 6 +#define GPC2CLK_OUT_VCODIV_SHIFT 8 +#define GPC2CLK_OUT_VCODIV1 0 +#define GPC2CLK_OUT_VCODIV2 2 +#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ + GPC2CLK_OUT_VCODIV_SHIFT) +#define GPC2CLK_OUT_BYPDIV_WIDTH 6 +#define GPC2CLK_OUT_BYPDIV_SHIFT 0 +#define GPC2CLK_OUT_BYPDIV31 0x3c +#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ + | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ + | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) +#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ + | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ + | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) /* All frequencies in Khz */ struct gk20a_clk_pllg_params { @@ -54,7 +124,29 @@ struct gk20a_clk { }; #define gk20a_clk(p) container_of((p), struct gk20a_clk, base) -int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, +u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *); +int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *); +void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *); +void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *); + +static inline bool +gk20a_pllg_is_enabled(struct gk20a_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = nvkm_rd32(device, GPCPLL_CFG); + return val & GPCPLL_CFG_ENABLE; +} + +static inline u32 +gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll) +{ + return DIV_ROUND_UP(pll->m * clk->params->min_vco, + clk->parent_rate / KHZ); +} + +int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, const struct gk20a_clk_pllg_params *, struct gk20a_clk *); void gk20a_clk_fini(struct nvkm_clk *); int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src); @@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); int gk20a_clk_prog(struct nvkm_clk *); void gk20a_clk_tidy(struct nvkm_clk *); +int gk20a_clk_setup_slide(struct gk20a_clk *); + #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c index 71b2bbb61973..b284e949f732 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -21,20 +21,123 @@ */ #include <subdev/clk.h> +#include <subdev/volt.h> +#include <subdev/timer.h> #include <core/device.h> +#include <core/tegra.h> #include "priv.h" #include "gk20a.h" -#define KHZ (1000) -#define MHZ (KHZ * 1000) - -#define MASK(w) ((1 << w) - 1) +#define GPCPLL_CFG_SYNC_MODE BIT(2) #define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340) #define BYPASSCTRL_SYS_GPCPLL_SHIFT 0 #define BYPASSCTRL_SYS_GPCPLL_WIDTH 1 +#define GPCPLL_CFG2_SDM_DIN_SHIFT 0 +#define GPCPLL_CFG2_SDM_DIN_WIDTH 8 +#define GPCPLL_CFG2_SDM_DIN_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT) +#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8 +#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15 +#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10) +#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0 +#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7 +#define GPCPLL_DVFS0_DFS_COEFF_MASK \ + (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT) +#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8 +#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7 +#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \ + (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT) + +#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14) +#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0 +#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1 +#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8 +#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15 +#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16 +#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12 +#define GPCPLL_DVFS1_EN_SDM_SHIFT 28 +#define GPCPLL_DVFS1_EN_SDM_WIDTH 1 +#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28) +#define GPCPLL_DVFS1_EN_DFS_SHIFT 29 +#define GPCPLL_DVFS1_EN_DFS_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29) +#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30 +#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30) +#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31 +#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31) + +#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20) +#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16) + +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24 +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7 + +#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ +#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ + +struct gm20b_clk_dvfs_params { + s32 coeff_slope; + s32 coeff_offs; + u32 vco_ctrl; +}; + +static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = { + .coeff_slope = -165230, + .coeff_offs = 214007, + .vco_ctrl = 0x7 << 3, +}; + +/* + * base.n is now the *integer* part of the N factor. + * sdm_din contains n's decimal part. + */ +struct gm20b_pll { + struct gk20a_pll base; + u32 sdm_din; +}; + +struct gm20b_clk_dvfs { + u32 dfs_coeff; + s32 dfs_det_max; + s32 dfs_ext_cal; +}; + +struct gm20b_clk { + /* currently applied parameters */ + struct gk20a_clk base; + struct gm20b_clk_dvfs dvfs; + u32 uv; + + /* new parameters to apply */ + struct gk20a_pll new_pll; + struct gm20b_clk_dvfs new_dvfs; + u32 new_uv; + + const struct gm20b_clk_dvfs_params *dvfs_params; + + /* fused parameters */ + s32 uvdet_slope; + s32 uvdet_offs; + + /* safe frequency we can use at minimum voltage */ + u32 safe_fmax_vmin; +}; +#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base) + static u32 pl_to_div(u32 pl) { return pl; @@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = { .min_pl = 1, .max_pl = 31, }; +static void +gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + u32 val; + + gk20a_pllg_read_mnp(&clk->base, &pll->base); + val = nvkm_rd32(device, GPCPLL_CFG2); + pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) & + MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); +} + +static void +gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + gk20a_pllg_write_mnp(&clk->base, &pll->base); +} + +/* + * Determine DFS_COEFF for the requested voltage. Always select external + * calibration override equal to the voltage, and set maximum detection + * limit "0" (to make sure that PLL output remains under F/V curve when + * voltage increases). + */ +static void +gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gm20b_clk_dvfs_params *p = clk->dvfs_params; + u32 coeff; + /* Work with mv as uv would likely trigger an overflow */ + s32 mv = DIV_ROUND_CLOSEST(uv, 1000); + + /* coeff = slope * voltage + offset */ + coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs; + coeff = DIV_ROUND_CLOSEST(coeff, 1000); + dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH)); + + dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs, + clk->uvdet_slope); + /* should never happen */ + if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE)) + nvkm_error(subdev, "dfs_ext_cal overflow!\n"); + + dvfs->dfs_det_max = 0; + + nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n", + __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal, + dvfs->dfs_det_max); +} + +/* + * Solve equation for integer and fractional part of the effective NDIV: + * + * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) + + * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE + * + * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw. + */ +static void +gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gk20a_clk_pllg_params *p = clk->base.params; + u32 n; + s32 det_delta; + u32 rem, rem_range; + + /* calculate current ext_cal and subtract previous one */ + det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs, + clk->uvdet_slope); + det_delta -= clk->dvfs.dfs_ext_cal; + det_delta = min(det_delta, clk->dvfs.dfs_det_max); + det_delta *= clk->dvfs.dfs_coeff; + + /* integer part of n */ + n = (n_eff << DFS_DET_RANGE) - det_delta; + /* should never happen! */ + if (n <= 0) { + nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n"); + n = 1 << DFS_DET_RANGE; + } + if (n >> DFS_DET_RANGE > p->max_n) { + nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n"); + n = p->max_n << DFS_DET_RANGE; + } + *n_int = n >> DFS_DET_RANGE; + + /* fractional part of n */ + rem = ((u32)n) & MASK(DFS_DET_RANGE); + rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE; + /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */ + rem = (rem << rem_range) - BIT(SDM_DIN_RANGE); + /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */ + *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); + + nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__, + n_eff, *n_int, *sdm_din); +} + +static int +gm20b_pllg_slide(struct gm20b_clk *clk, u32 n) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll pll; + u32 n_int, sdm_din; + int ret = 0; + + /* calculate the new n_int/sdm_din for this n/uv */ + gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din); + + /* get old coefficients */ + gm20b_pllg_read_mnp(clk, &pll); + /* do nothing if NDIV is the same */ + if (n_int == pll.base.n && sdm_din == pll.sdm_din) + return 0; + + /* pll slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); + + /* new ndiv ready for ramp */ + /* in DVFS mode SDM is updated via "new" field */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT); + pll.base.n = n_int; + udelay(1); + gk20a_pllg_write_mnp(&clk->base, &pll.base); + + /* dynamic ramp to new ndiv */ + udelay(1); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); + + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; + + /* in DVFS mode complete SDM update */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + + /* exit slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); + nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); + + return ret; +} + +static int +gm20b_pllg_enable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); + nvkm_rd32(device, GPCPLL_CFG); + + /* In DVFS mode lock cannot be used - so just delay */ + udelay(40); + + /* set SYNC_MODE for glitchless switch out of bypass */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, + GPCPLL_CFG_SYNC_MODE); + nvkm_rd32(device, GPCPLL_CFG); + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; +} + +static void +gm20b_pllg_disable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + + /* clear SYNC_MODE before disabling PLL */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0); + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); + nvkm_rd32(device, GPCPLL_CFG); +} + +static int +gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll cur_pll; + u32 n_int, sdm_din; + /* if we only change pdiv, we can do a glitchless transition */ + bool pdiv_only; + int ret; + + gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din); + gm20b_pllg_read_mnp(clk, &cur_pll); + pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din && + cur_pll.base.m == pll->m; + + /* need full sequence if clock not enabled yet */ + if (!gk20a_pllg_is_enabled(&clk->base)) + pdiv_only = false; + + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + udelay(2); + + if (pdiv_only) { + u32 old = cur_pll.base.pl; + u32 new = pll->pl; + + /* + * we can do a glitchless transition only if the old and new PL + * parameters share at least one bit set to 1. If this is not + * the case, calculate and program an interim PL that will allow + * us to respect that rule. + */ + if ((old & new) == 0) { + cur_pll.base.pl = min(old | BIT(ffs(new) - 1), + new | BIT(ffs(old) - 1)); + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } + + cur_pll.base.pl = new; + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } else { + /* disable before programming if more than pdiv changes */ + gm20b_pllg_disable(clk); + + cur_pll.base = *pll; + cur_pll.base.n = n_int; + cur_pll.sdm_din = sdm_din; + gm20b_pllg_write_mnp(clk, &cur_pll); + + ret = gm20b_pllg_enable(clk); + if (ret) + return ret; + } + + /* restore out divider 1:1 */ + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + + return 0; +} + +static int +gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct gk20a_pll cur_pll; + int ret; + + if (gk20a_pllg_is_enabled(&clk->base)) { + gk20a_pllg_read_mnp(&clk->base, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gm20b_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; + + /* slide up to new NDIV */ + return gm20b_pllg_slide(clk, pll->n); +} + +static int +gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gm20b_clk *clk = gm20b_clk(base); + struct nvkm_subdev *subdev = &base->subdev; + struct nvkm_volt *volt = base->subdev.device->volt; + int ret; + + ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] * + GK20A_CLK_GPC_MDIV, &clk->new_pll); + if (ret) + return ret; + + clk->new_uv = volt->vid[cstate->voltage].uv; + gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs); + + nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv); + + return 0; +} + +/* + * Compute PLL parameters that are always safe for the current voltage + */ +static void +gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll) +{ + u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ; + u32 parent_rate = clk->base.parent_rate / KHZ; + u32 nmin, nsafe; + + /* remove a safe margin of 10% */ + if (rate > clk->safe_fmax_vmin) + rate = rate * (100 - 10) / 100; + + /* gpc2clk */ + rate *= 2; + + nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate); + nsafe = pll->m * rate / (clk->base.parent_rate); + + if (nsafe < nmin) { + pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate); + nsafe = nmin; + } + + pll->n = nsafe; +} + +static void +gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK, + coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); +} + +static void +gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + u32 val; + + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1), + dfs_det_cal); + udelay(1); + + val = nvkm_rd32(device, GPCPLL_DVFS1); + if (!(val & BIT(25))) { + /* Use external value to overwrite calibration value */ + val |= BIT(25) | BIT(16); + nvkm_wr32(device, GPCPLL_DVFS1, val); + } +} + +static void +gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, + GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK, + dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT | + dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); + + gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal); +} + +static int +gm20b_clk_prog(struct nvkm_clk *base) +{ + struct gm20b_clk *clk = gm20b_clk(base); + u32 cur_freq; + int ret; + + /* No change in DVFS settings? */ + if (clk->uv == clk->new_uv) + goto prog; + + /* + * Interim step for changing DVFS detection settings: low enough + * frequency to be safe at at DVFS coeff = 0. + * + * 1. If voltage is increasing: + * - safe frequency target matches the lowest - old - frequency + * - DVFS settings are still old + * - Voltage already increased to new level by volt, but maximum + * detection limit assures PLL output remains under F/V curve + * + * 2. If voltage is decreasing: + * - safe frequency target matches the lowest - new - frequency + * - DVFS settings are still old + * - Voltage is also old, it will be lowered by volt afterwards + * + * Interim step can be skipped if old frequency is below safe minimum, + * i.e., it is low enough to be safe at any voltage in operating range + * with zero DVFS coefficient. + */ + cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc); + if (cur_freq > clk->safe_fmax_vmin) { + struct gk20a_pll pll_safe; + + if (clk->uv < clk->new_uv) + /* voltage will raise: safe frequency is current one */ + pll_safe = clk->base.pll; + else + /* voltage will drop: safe frequency is new one */ + pll_safe = clk->new_pll; + + gm20b_dvfs_calc_safe_pll(clk, &pll_safe); + ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe); + if (ret) + return ret; + } + + /* + * DVFS detection settings transition: + * - Set DVFS coefficient zero + * - Set calibration level to new voltage + * - Set DVFS coefficient to match new voltage + */ + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + +prog: + clk->uv = clk->new_uv; + clk->dvfs = clk->new_dvfs; + clk->base.pll = clk->new_pll; + + return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll); +} + static struct nvkm_pstate gm20b_pstates[] = { { @@ -133,9 +714,99 @@ gm20b_pstates[] = { .voltage = 12, }, }, - }; +static void +gm20b_clk_fini(struct nvkm_clk *base) +{ + struct nvkm_device *device = base->subdev.device; + struct gm20b_clk *clk = gm20b_clk(base); + + /* slide to VCO min */ + if (gk20a_pllg_is_enabled(&clk->base)) { + struct gk20a_pll pll; + u32 n_lo; + + gk20a_pllg_read_mnp(&clk->base, &pll); + n_lo = gk20a_pllg_n_lo(&clk->base, &pll); + gm20b_pllg_slide(clk, n_lo); + } + + gm20b_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); +} + +static int +gm20b_clk_init_dvfs(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + bool fused = clk->uvdet_offs && clk->uvdet_slope; + static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */ + u32 data; + int ret; + + /* Enable NA DVFS */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT, + GPCPLL_DVFS1_EN_DFS_BIT); + + /* Set VCO_CTRL */ + if (clk->dvfs_params->vco_ctrl) + nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK, + clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT); + + if (fused) { + /* Start internal calibration, but ignore results */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* got uvdev parameters from fuse, skip calibration */ + goto calibrated; + } + + /* + * If calibration parameters are not fused, start internal calibration, + * wait for completion, and use results along with default slope to + * calculate ADC offset during boot. + */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* Wait for internal calibration done (spec < 2us). */ + ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT); + if (ret < 0) { + nvkm_error(subdev, "GPCPLL calibration timeout\n"); + return -ETIMEDOUT; + } + + data = nvkm_rd32(device, GPCPLL_CFG3) >> + GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT; + data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH); + + clk->uvdet_slope = ADC_SLOPE_UV; + clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV; + + nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n", + clk->uvdet_offs, clk->uvdet_slope); + +calibrated: + /* Compute and apply initial DVFS parameters */ + gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs); + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + + return 0; +} + +/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */ +static const struct nvkm_clk_func gm20b_clk; + static int gm20b_clk_init(struct nvkm_clk *base) { @@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base) struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; int ret; + u32 data; + + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, + GPC2CLK_OUT_INIT_VAL); /* Set the global bypass control to VCO */ nvkm_mask(device, BYPASSCTRL_SYS, MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT, 0); + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + + /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */ + data = nvkm_rd32(device, 0x021944); + if (!(data & 0x3)) { + data |= 0x2; + nvkm_wr32(device, 0x021944, data); + + data = nvkm_rd32(device, 0x021948); + data |= 0x1; + nvkm_wr32(device, 0x021948, data); + } + + /* Disable idle slow down */ + nvkm_mask(device, 0x20160, 0x003f0000, 0x0); + + /* speedo >= 1? */ + if (clk->base.func == &gm20b_clk) { + struct gm20b_clk *_clk = gm20b_clk(base); + struct nvkm_volt *volt = device->volt; + + /* Get current voltage */ + _clk->uv = nvkm_volt_get(volt); + + /* Initialize DVFS */ + ret = gm20b_clk_init_dvfs(_clk); + if (ret) + return ret; + } + /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); - ret = base->func->prog(&clk->base); + ret = base->func->prog(base); if (ret) { nvkm_error(subdev, "cannot initialize clock\n"); return ret; @@ -169,6 +881,7 @@ gm20b_clk_speedo0 = { .prog = gk20a_clk_prog, .tidy = gk20a_clk_tidy, .pstates = gm20b_pstates, + /* Speedo 0 only supports 12 voltages */ .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1, .domains = { { nv_clk_src_crystal, 0xff }, @@ -177,8 +890,26 @@ gm20b_clk_speedo0 = { }, }; -int -gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +static const struct nvkm_clk_func +gm20b_clk = { + .init = gm20b_clk_init, + .fini = gm20b_clk_fini, + .read = gk20a_clk_read, + .calc = gm20b_clk_calc, + .prog = gm20b_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gm20b_pstates, + .nr_pstates = ARRAY_SIZE(gm20b_pstates), + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max }, + }, +}; + +static int +gm20b_clk_new_speedo0(struct nvkm_device *device, int index, + struct nvkm_clk **pclk) { struct gk20a_clk *clk; int ret; @@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, - &gm20b_pllg_params, clk); + ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, + &gm20b_pllg_params, clk); clk->pl_to_div = pl_to_div; clk->div_to_pl = div_to_pl; return ret; } + +/* FUSE register */ +#define FUSE_RESERVED_CALIB0 0x204 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6 +#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30 +#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2 + +static int +gm20b_clk_init_fused_params(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + u32 val = 0; + u32 rev = 0; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA) + tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val); + rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH); +#endif + + /* No fused parameters, we will calibrate later */ + if (rev == 0) + return -EINVAL; + + /* Integer part in mV + fractional part in uV */ + clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH)); + + /* Integer part in mV + fractional part in 100uV */ + clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100; + + nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n", + clk->uvdet_slope, clk->uvdet_offs); + return 0; +} + +static int +gm20b_clk_init_safe_fmax(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_volt *volt = subdev->device->volt; + struct nvkm_pstate *pstates = clk->base.base.func->pstates; + int nr_pstates = clk->base.base.func->nr_pstates; + int vmin, id = 0; + u32 fmax = 0; + int i; + + /* find lowest voltage we can use */ + vmin = volt->vid[0].uv; + for (i = 1; i < volt->vid_nr; i++) { + if (volt->vid[i].uv <= vmin) { + vmin = volt->vid[i].uv; + id = volt->vid[i].vid; + } + } + + /* find max frequency at this voltage */ + for (i = 0; i < nr_pstates; i++) + if (pstates[i].base.voltage == id) + fmax = max(fmax, + pstates[i].base.domain[nv_clk_src_gpc]); + + if (!fmax) { + nvkm_error(subdev, "failed to evaluate safe fmax\n"); + return -EINVAL; + } + + /* we are safe at 90% of the max frequency */ + clk->safe_fmax_vmin = fmax * (100 - 10) / 100; + nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin); + + return 0; +} + +int +gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct gm20b_clk *clk; + struct nvkm_subdev *subdev; + struct gk20a_clk_pllg_params *clk_params; + int ret; + + /* Speedo 0 GPUs cannot use noise-aware PLL */ + if (tdev->gpu_speedo_id == 0) + return gm20b_clk_new_speedo0(device, index, pclk); + + /* Speedo >= 1, use NAPLL */ + clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base.base; + subdev = &clk->base.base.subdev; + + /* duplicate the clock parameters since we will patch them below */ + clk_params = (void *) (clk + 1); + *clk_params = gm20b_pllg_params; + ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params, + &clk->base); + if (ret) + return ret; + + /* + * NAPLL can only work with max_u, clamp the m range so + * gk20a_pllg_calc_mnp always uses it + */ + clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u, + (clk->base.parent_rate / KHZ)); + if (clk_params->max_m == 0) { + nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n"); + kfree(clk); + return gm20b_clk_new_speedo0(device, index, pclk); + } + + clk->base.pl_to_div = pl_to_div; + clk->base.div_to_pl = div_to_pl; + + clk->dvfs_params = &gm20b_dvfs_params; + + ret = gm20b_clk_init_fused_params(clk); + /* + * we will calibrate during init - should never happen on + * prod parts + */ + if (ret) + nvkm_warn(subdev, "no fused calibration parameters\n"); + + ret = gm20b_clk_init_safe_fmax(clk); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index 842d5de96d73..edcc157e6ac8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o nvkm-y += nvkm/subdev/fb/gk20a.o nvkm-y += nvkm/subdev/fb/gm107.o nvkm-y += nvkm/subdev/fb/gm200.o +nvkm-y += nvkm/subdev/fb/gp100.o +nvkm-y += nvkm/subdev/fb/gp104.o nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o @@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o nvkm-y += nvkm/subdev/fb/ramgf100.o nvkm-y += nvkm/subdev/fb/ramgk104.o nvkm-y += nvkm/subdev/fb/ramgm107.o +nvkm-y += nvkm/subdev/fb/ramgp100.o nvkm-y += nvkm/subdev/fb/sddr2.o nvkm-y += nvkm/subdev/fb/sddr3.o nvkm-y += nvkm/subdev/fb/gddr3.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index ce90242b8cce..a7049c041594 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -25,6 +25,7 @@ #include "ram.h" #include <core/memory.h> +#include <core/option.h> #include <subdev/bios.h> #include <subdev/bios/M0203.h> #include <engine/gr.h> @@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init) fb->func->init(fb); + if (fb->func->init_page) + fb->func->init_page(fb); + if (fb->func->init_unkn) + fb->func->init_unkn(fb); return 0; } @@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev); fb->func = func; fb->tile.regions = fb->func->tile.regions; + fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index e649ead5ccfc..76433cc66fff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c @@ -72,6 +72,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb) } void +gf100_fb_init_page(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + switch (fb->page) { + case 16: + nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); + break; + case 17: + default: + nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); + fb->page = 17; + break; + } +} + +void gf100_fb_init(struct nvkm_fb *base) { struct gf100_fb *fb = gf100_fb(base); @@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base) if (fb->r100c10_page) nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); - - nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ } void * @@ -125,6 +139,7 @@ gf100_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gf100_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h index 2160e5a39c9a..449f431644b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h @@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, void *gf100_fb_dtor(struct nvkm_fb *); void gf100_fb_init(struct nvkm_fb *); void gf100_fb_intr(struct nvkm_fb *); + +void gp100_fb_init(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c index b41f0f70038c..4245e2e6e604 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c @@ -29,6 +29,7 @@ gk104_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gk104_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c index 7306f7dfc3b9..f815fe2bbf08 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c @@ -27,7 +27,6 @@ static void gk20a_fb_init(struct nvkm_fb *fb) { struct nvkm_device *device = fb->subdev.device; - nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8); nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8); } @@ -36,6 +35,7 @@ static const struct nvkm_fb_func gk20a_fb = { .oneinit = gf100_fb_oneinit, .init = gk20a_fb_init, + .init_page = gf100_fb_init_page, .memtype_valid = gf100_fb_memtype_valid, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c index 4869fdb753c9..db699025f546 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c @@ -29,6 +29,7 @@ gm107_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gm107_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c index 44f5716f64d8..62f653240be3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c @@ -26,6 +26,24 @@ #include <core/memory.h> +void +gm200_fb_init_page(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + switch (fb->page) { + case 16: + nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001); + break; + case 17: + nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000); + break; + default: + nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800); + fb->page = 0; + break; + } +} + static void gm200_fb_init(struct nvkm_fb *base) { @@ -48,6 +66,7 @@ gm200_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gm200_fb_init, + .init_page = gm200_fb_init_page, .intr = gf100_fb_intr, .ram_new = gm107_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c new file mode 100644 index 000000000000..98474aec1921 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c @@ -0,0 +1,69 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ram.h" + +#include <core/memory.h> + +static void +gp100_fb_init_unkn(struct nvkm_fb *base) +{ + struct nvkm_device *device = gf100_fb(base)->base.subdev.device; + nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80)); + nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4)); + nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8)); + nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc)); +} + +void +gp100_fb_init(struct nvkm_fb *base) +{ + struct gf100_fb *fb = gf100_fb(base); + struct nvkm_device *device = fb->base.subdev.device; + + if (fb->r100c10_page) + nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); + + nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8); + nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8); + nvkm_mask(device, 0x100cc4, 0x00060000, + max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17); +} + +static const struct nvkm_fb_func +gp100_fb = { + .dtor = gf100_fb_dtor, + .oneinit = gf100_fb_oneinit, + .init = gp100_fb_init, + .init_page = gm200_fb_init_page, + .init_unkn = gp100_fb_init_unkn, + .ram_new = gp100_ram_new, + .memtype_valid = gf100_fb_memtype_valid, +}; + +int +gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +{ + return gf100_fb_new_(&gp100_fb, device, index, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c new file mode 100644 index 000000000000..92cb71861bec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ram.h" + +#include <core/memory.h> + +static const struct nvkm_fb_func +gp104_fb = { + .dtor = gf100_fb_dtor, + .oneinit = gf100_fb_oneinit, + .init = gp100_fb_init, + .init_page = gm200_fb_init_page, + .ram_new = gp100_ram_new, + .memtype_valid = gf100_fb_memtype_valid, +}; + +int +gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +{ + return gf100_fb_new_(&gp104_fb, device, index, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index d97d640e60a0..e905d44fa1d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -8,6 +8,8 @@ struct nvkm_fb_func { void *(*dtor)(struct nvkm_fb *); int (*oneinit)(struct nvkm_fb *); void (*init)(struct nvkm_fb *); + void (*init_page)(struct nvkm_fb *); + void (*init_unkn)(struct nvkm_fb *); void (*intr)(struct nvkm_fb *); struct { @@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nvkm_fb_tile *); int gf100_fb_oneinit(struct nvkm_fb *); +void gf100_fb_init_page(struct nvkm_fb *); bool gf100_fb_memtype_valid(struct nvkm_fb *, u32); + +void gm200_fb_init_page(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h index f816cbf2ced3..b9ec0ae6723a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h @@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **); +int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c new file mode 100644 index 000000000000..f3be408b5e5e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -0,0 +1,146 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "ram.h" + +#include <subdev/bios.h> +#include <subdev/bios/init.h> +#include <subdev/bios/rammap.h> + +static int +gp100_ram_init(struct nvkm_ram *ram) +{ + struct nvkm_subdev *subdev = &ram->fb->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_bios *bios = device->bios; + u8 ver, hdr, cnt, len, snr, ssz; + u32 data; + int i; + + /* run a bunch of tables from rammap table. there's actually + * individual pointers for each rammap entry too, but, nvidia + * seem to just run the last two entries' scripts early on in + * their init, and never again.. we'll just run 'em all once + * for now. + * + * i strongly suspect that each script is for a separate mode + * (likely selected by 0x9a065c's lower bits?), and the + * binary driver skips the one that's already been setup by + * the init tables. + */ + data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); + if (!data || hdr < 0x15) + return -EINVAL; + + cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */ + data = nvbios_rd32(bios, data + 0x10); /* guess u32... */ + if (cnt) { + u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0; + for (i = 0; i < cnt; i++, data += 4) { + if (i != save >> 4) { + nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4); + nvbios_exec(&(struct nvbios_init) { + .subdev = subdev, + .bios = bios, + .offset = nvbios_rd32(bios, data), + .execute = 1, + }); + } + } + nvkm_mask(device, 0x9a065c, 0x000000f0, save); + } + + nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000); + nvkm_wr32(device, 0x10ecc0, 0xffffffff); + nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010); + return 0; +} + +static const struct nvkm_ram_func +gp100_ram_func = { + .init = gp100_ram_init, + .get = gf100_ram_get, + .put = gf100_ram_put, +}; + +int +gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) +{ + struct nvkm_ram *ram; + struct nvkm_subdev *subdev = &fb->subdev; + struct nvkm_device *device = subdev->device; + enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios); + const u32 rsvd_head = ( 256 * 1024); /* vga memory */ + const u32 rsvd_tail = (1024 * 1024); /* vbios etc */ + u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa; + u32 fbio_opt = nvkm_rd32(device, 0x021c14); + u64 part, size = 0, comm = ~0ULL; + bool mixed = false; + int ret; + + nvkm_debug(subdev, "022438: %08x\n", fbpa_num); + nvkm_debug(subdev, "021c14: %08x\n", fbio_opt); + for (fbpa = 0; fbpa < fbpa_num; fbpa++) { + if (!(fbio_opt & (1 << fbpa))) { + part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000)); + nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part); + part = part << 20; + if (part != comm) { + if (comm != ~0ULL) + mixed = true; + comm = min(comm, part); + } + size = size + part; + } + } + + ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram); + *pram = ram; + if (ret) + return ret; + + nvkm_mm_fini(&ram->vram); + + if (mixed) { + ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, + ((comm * fbpa_num) - rsvd_head) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + + ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >> + NVKM_RAM_MM_SHIFT, + (size - (comm * fbpa_num) - rsvd_tail) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + } else { + ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, + (size - rsvd_head - rsvd_tail) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild index 932b366598aa..12d6f4f102cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild @@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o nvkm-y += nvkm/subdev/ltc/gk104.o nvkm-y += nvkm/subdev/ltc/gm107.o nvkm-y += nvkm/subdev/ltc/gm200.o +nvkm-y += nvkm/subdev/ltc/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c index c9eb677967a8..4a0fa0a9b802 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c @@ -23,7 +23,6 @@ */ #include "priv.h" -#include <core/enum.h> #include <subdev/fb.h> #include <subdev/timer.h> @@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) nvkm_wr32(device, 0x17ea58, depth); } -static const struct nvkm_bitfield +const struct nvkm_bitfield gf100_ltc_lts_intr_name[] = { { 0x00000001, "IDLE_ERROR_IQ" }, { 0x00000002, "IDLE_ERROR_CBC" }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index 389fb13a1998..ec0a3844b2d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c @@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) nvkm_wr32(device, 0x17e34c, depth); } -static void +void gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) { struct nvkm_subdev *subdev = <c->subdev; struct nvkm_device *device = subdev->device; u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); - u32 stat = nvkm_rd32(device, base + 0x00c); + u32 intr = nvkm_rd32(device, base + 0x00c); + u16 stat = intr & 0x0000ffff; + char msg[128]; if (stat) { - nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat); - nvkm_wr32(device, base + 0x00c, stat); + nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat); + nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg); } + + nvkm_wr32(device, base + 0x00c, intr); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c new file mode 100644 index 000000000000..0bdfb2f40266 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c @@ -0,0 +1,75 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" + +static void +gp100_ltc_intr(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + u32 mask; + + mask = nvkm_rd32(device, 0x0001c0); + while (mask) { + u32 s, c = __ffs(mask); + for (s = 0; s < ltc->lts_nr; s++) + gm107_ltc_intr_lts(ltc, c, s); + mask &= ~(1 << c); + } +} + +static int +gp100_ltc_oneinit(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + ltc->ltc_nr = nvkm_rd32(device, 0x12006c); + ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28; + /*XXX: tagram allocation - TBD */ + return nvkm_mm_init(<c->tags, 0, 0, 1); +} + +static void +gp100_ltc_init(struct nvkm_ltc *ltc) +{ + /*XXX: PMU LS call to setup tagram address */ +} + +static const struct nvkm_ltc_func +gp100_ltc = { + .oneinit = gp100_ltc_oneinit, + .init = gp100_ltc_init, + .intr = gp100_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, + .invalidate = gf100_ltc_invalidate, + .flush = gf100_ltc_flush, +}; + +int +gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +{ + return nvkm_ltc_new_(&gp100_ltc, device, index, pltc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index 6d81c695ed0d..8b95f96e3ffa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -2,6 +2,7 @@ #define __NVKM_LTC_PRIV_H__ #define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev) #include <subdev/ltc.h> +#include <core/enum.h> int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, int index, struct nvkm_ltc **); @@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); void gf100_ltc_invalidate(struct nvkm_ltc *); void gf100_ltc_flush(struct nvkm_ltc *); +extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[]; void gm107_ltc_intr(struct nvkm_ltc *); +void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts); void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32); void gm107_ltc_cbc_wait(struct nvkm_ltc *); void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild index 49695ac7be2e..12943f92c206 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild @@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o nvkm-y += nvkm/subdev/mc/gf100.o nvkm-y += nvkm/subdev/mc/gk104.o nvkm-y += nvkm/subdev/mc/gk20a.o +nvkm-y += nvkm/subdev/mc/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c index 350a8caa84c8..6b25e25f9eba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c @@ -27,43 +27,67 @@ #include <subdev/top.h> void -nvkm_mc_unk260(struct nvkm_mc *mc, u32 data) +nvkm_mc_unk260(struct nvkm_device *device, u32 data) { - if (mc->func->unk260) + struct nvkm_mc *mc = device->mc; + if (likely(mc) && mc->func->unk260) mc->func->unk260(mc, data); } void -nvkm_mc_intr_unarm(struct nvkm_mc *mc) +nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en) { - return mc->func->intr_unarm(mc); + struct nvkm_mc *mc = device->mc; + const struct nvkm_mc_map *map; + if (likely(mc) && mc->func->intr_mask) { + u32 mask = nvkm_top_intr_mask(device, devidx); + for (map = mc->func->intr; !mask && map->stat; map++) { + if (map->unit == devidx) + mask = map->stat; + } + mc->func->intr_mask(mc, mask, en ? mask : 0); + } +} + +void +nvkm_mc_intr_unarm(struct nvkm_device *device) +{ + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_unarm(mc); } void -nvkm_mc_intr_rearm(struct nvkm_mc *mc) +nvkm_mc_intr_rearm(struct nvkm_device *device) { - return mc->func->intr_rearm(mc); + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_rearm(mc); } static u32 -nvkm_mc_intr_mask(struct nvkm_mc *mc) +nvkm_mc_intr_stat(struct nvkm_mc *mc) { - u32 intr = mc->func->intr_mask(mc); + u32 intr = mc->func->intr_stat(mc); if (WARN_ON_ONCE(intr == 0xffffffff)) intr = 0; /* likely fallen off the bus */ return intr; } void -nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) +nvkm_mc_intr(struct nvkm_device *device, bool *handled) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; struct nvkm_subdev *subdev; - const struct nvkm_mc_map *map = mc->func->intr; - u32 stat, intr = nvkm_mc_intr_mask(mc); + const struct nvkm_mc_map *map; + u32 stat, intr; u64 subdevs; - stat = nvkm_top_intr(device->top, intr, &subdevs); + if (unlikely(!mc)) + return; + + intr = nvkm_mc_intr_stat(mc); + stat = nvkm_top_intr(device, intr, &subdevs); while (subdevs) { enum nvkm_devidx subidx = __ffs64(subdevs); subdev = nvkm_device_subdev(device, subidx); @@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) subdevs &= ~BIT_ULL(subidx); } - while (map->stat) { + for (map = mc->func->intr; map->stat; map++) { if (intr & map->stat) { subdev = nvkm_device_subdev(device, map->unit); if (subdev) nvkm_subdev_intr(subdev); stat &= ~map->stat; } - map++; } if (stat) @@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) *handled = intr != 0; } -static void -nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) +static u32 +nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, + enum nvkm_devidx devidx) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; const struct nvkm_mc_map *map; - u64 pmc_enable; - - if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) { - for (map = mc->func->reset; map && map->stat; map++) { - if (map->unit == devidx) { - pmc_enable = map->stat; - break; + u64 pmc_enable = 0; + if (likely(mc)) { + if (!(pmc_enable = nvkm_top_reset(device, devidx))) { + for (map = mc->func->reset; map && map->stat; map++) { + if (!isauto || !map->noauto) { + if (map->unit == devidx) { + pmc_enable = map->stat; + break; + } + } } } } + return pmc_enable; +} +void +nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx); if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); @@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) } void -nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx) +nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx) { - if (likely(mc)) - nvkm_mc_reset_(mc, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) + nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); +} + +void +nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) { + nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); + nvkm_rd32(device, 0x000200); + } } static int nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) { - struct nvkm_mc *mc = nvkm_mc(subdev); - nvkm_mc_intr_unarm(mc); + nvkm_mc_intr_unarm(subdev->device); return 0; } @@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev) struct nvkm_mc *mc = nvkm_mc(subdev); if (mc->func->init) mc->func->init(mc); - nvkm_mc_intr_rearm(mc); + nvkm_mc_intr_rearm(subdev->device); return 0; } @@ -148,16 +191,21 @@ nvkm_mc = { .fini = nvkm_mc_fini, }; +void +nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device, + int index, struct nvkm_mc *mc) +{ + nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); + mc->func = func; +} + int nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, int index, struct nvkm_mc **pmc) { struct nvkm_mc *mc; - if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - - nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); - mc->func = func; + nvkm_mc_ctor(func, device, index, *pmc); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c index 5c85b47f071d..c3d66ef5dc12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c @@ -57,7 +57,7 @@ g84_mc = { .intr = g84_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = g84_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c index 0280b43cc10c..93ad4982ce5f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c @@ -57,7 +57,7 @@ g98_mc = { .intr = g98_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = g98_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c index 8397e223bd43..d2c4d6033abb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c @@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc) } u32 -gf100_mc_intr_mask(struct nvkm_mc *mc) +gf100_mc_intr_stat(struct nvkm_mc *mc) { struct nvkm_device *device = mc->subdev.device; u32 intr0 = nvkm_rd32(device, 0x000100); @@ -85,6 +85,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc) } void +gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat) +{ + struct nvkm_device *device = mc->subdev.device; + nvkm_mask(device, 0x000640, mask, stat); + nvkm_mask(device, 0x000644, mask, stat); +} + +void gf100_mc_unk260(struct nvkm_mc *mc, u32 data) { nvkm_wr32(mc->subdev.device, 0x000260, data); @@ -97,6 +105,7 @@ gf100_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gf100_mc_reset, .unk260 = gf100_mc_unk260, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c index 317464212c7d..7b8c6ecad1a5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c @@ -26,6 +26,7 @@ const struct nvkm_mc_map gk104_mc_reset[] = { { 0x00000100, NVKM_ENGINE_FIFO }, + { 0x00002000, NVKM_SUBDEV_PMU, true }, {} }; @@ -53,6 +54,7 @@ gk104_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gk104_mc_reset, .unk260 = gf100_mc_unk260, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c index 60b044f517ed..ca1bf3279dbe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c @@ -30,6 +30,7 @@ gk20a_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gk104_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c new file mode 100644 index 000000000000..4d22f4abd6de --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -0,0 +1,103 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define gp100_mc(p) container_of((p), struct gp100_mc, base) +#include "priv.h" + +struct gp100_mc { + struct nvkm_mc base; + spinlock_t lock; + bool intr; + u32 mask; +}; + +static void +gp100_mc_intr_update(struct gp100_mc *mc) +{ + struct nvkm_device *device = mc->base.subdev.device; + u32 mask = mc->intr ? mc->mask : 0, i; + for (i = 0; i < 2; i++) { + nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask); + nvkm_wr32(device, 0x000160 + (i * 0x04), mask); + } +} + +static void +gp100_mc_intr_unarm(struct nvkm_mc *base) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->intr = false; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static void +gp100_mc_intr_rearm(struct nvkm_mc *base) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->intr = true; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static void +gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->mask = (mc->mask & ~mask) | intr; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static const struct nvkm_mc_func +gp100_mc = { + .init = nv50_mc_init, + .intr = gk104_mc_intr, + .intr_unarm = gp100_mc_intr_unarm, + .intr_rearm = gp100_mc_intr_rearm, + .intr_mask = gp100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, + .reset = gk104_mc_reset, +}; + +int +gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +{ + struct gp100_mc *mc; + + if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL))) + return -ENOMEM; + nvkm_mc_ctor(&gp100_mc, device, index, &mc->base); + *pmc = &mc->base; + + spin_lock_init(&mc->lock); + mc->intr = false; + mc->mask = 0x7fffffff; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c index aad0ba95bf18..99d50a3d956f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c @@ -53,13 +53,20 @@ gt215_mc_intr[] = { {}, }; +static void +gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat) +{ + nvkm_mask(mc->subdev.device, 0x000640, mask, stat); +} + static const struct nvkm_mc_func gt215_mc = { .init = nv50_mc_init, .intr = gt215_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_mask = gt215_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = gt215_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c index a062624e906b..6509defd1460 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c @@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc) } u32 -nv04_mc_intr_mask(struct nvkm_mc *mc) +nv04_mc_intr_stat(struct nvkm_mc *mc) { return nvkm_rd32(mc->subdev.device, 0x000100); } @@ -75,7 +75,7 @@ nv04_mc = { .intr = nv04_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv04_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c index 55f0b9166b52..9213107901e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c @@ -39,7 +39,7 @@ nv11_mc = { .intr = nv11_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv04_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c index c40fa67f79a5..64bf5bbf8146 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c @@ -48,7 +48,7 @@ nv17_mc = { .intr = nv17_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c index cc56271db564..65fa44a64b98 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c @@ -43,7 +43,7 @@ nv44_mc = { .intr = nv17_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c index 343b6078580d..fe93b4fd7100 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c @@ -50,7 +50,7 @@ nv50_mc = { .intr = nv50_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h index a12038118512..4f0576a06d24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h @@ -3,12 +3,15 @@ #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev) #include <subdev/mc.h> +void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, + int index, struct nvkm_mc *); int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int index, struct nvkm_mc **); struct nvkm_mc_map { u32 stat; u32 unit; + bool noauto; }; struct nvkm_mc_func { @@ -18,8 +21,10 @@ struct nvkm_mc_func { void (*intr_unarm)(struct nvkm_mc *); /* enable reporting of interrupts to host */ void (*intr_rearm)(struct nvkm_mc *); + /* (un)mask delivery of specific interrupts */ + void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat); /* retrieve pending interrupt mask (NV_PMC_INTR) */ - u32 (*intr_mask)(struct nvkm_mc *); + u32 (*intr_stat)(struct nvkm_mc *); const struct nvkm_mc_map *reset; void (*unk260)(struct nvkm_mc *, u32); }; @@ -27,7 +32,7 @@ struct nvkm_mc_func { void nv04_mc_init(struct nvkm_mc *); void nv04_mc_intr_unarm(struct nvkm_mc *); void nv04_mc_intr_rearm(struct nvkm_mc *); -u32 nv04_mc_intr_mask(struct nvkm_mc *); +u32 nv04_mc_intr_stat(struct nvkm_mc *); extern const struct nvkm_mc_map nv04_mc_reset[]; extern const struct nvkm_mc_map nv17_mc_intr[]; @@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *); void gf100_mc_intr_unarm(struct nvkm_mc *); void gf100_mc_intr_rearm(struct nvkm_mc *); -u32 gf100_mc_intr_mask(struct nvkm_mc *); +void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32); +u32 gf100_mc_intr_stat(struct nvkm_mc *); void gf100_mc_unk260(struct nvkm_mc *, u32); extern const struct nvkm_mc_map gk104_mc_intr[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild index 3c2519fdeb81..2a31b7d66a6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild @@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o nvkm-y += nvkm/subdev/pci/gf100.o nvkm-y += nvkm/subdev/pci/gf106.o nvkm-y += nvkm/subdev/pci/gk104.o +nvkm-y += nvkm/subdev/pci/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index 6b0328bd7eed..eb9b278198b2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -69,15 +69,13 @@ static irqreturn_t nvkm_pci_intr(int irq, void *arg) { struct nvkm_pci *pci = arg; - struct nvkm_mc *mc = pci->subdev.device->mc; + struct nvkm_device *device = pci->subdev.device; bool handled = false; - if (likely(mc)) { - nvkm_mc_intr_unarm(mc); - if (pci->msi) - pci->func->msi_rearm(pci); - nvkm_mc_intr(mc, &handled); - nvkm_mc_intr_rearm(mc); - } + nvkm_mc_intr_unarm(device); + if (pci->msi) + pci->func->msi_rearm(pci); + nvkm_mc_intr(device, &handled); + nvkm_mc_intr_rearm(device); return handled ? IRQ_HANDLED : IRQ_NONE; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c new file mode 100644 index 000000000000..82c5234a06ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c @@ -0,0 +1,44 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "priv.h" + +static void +gp100_pci_msi_rearm(struct nvkm_pci *pci) +{ + nvkm_pci_wr32(pci, 0x0704, 0x00000000); +} + +static const struct nvkm_pci_func +gp100_pci_func = { + .rd32 = nv40_pci_rd32, + .wr08 = nv40_pci_wr08, + .wr32 = nv40_pci_wr32, + .msi_rearm = gp100_pci_msi_rearm, +}; + +int +gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +{ + return nvkm_pci_new_(&gp100_pci_func, device, index, ppci); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c index 213fdba6cfa0..314be2192b7d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c @@ -19,8 +19,9 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - #include "priv.h" + +#include <subdev/mc.h> #include <subdev/timer.h> static const char * @@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb) int ret; /* enable engine */ - nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask); - nvkm_rd32(device, 0x200); + nvkm_mc_enable(device, sb->devidx); ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0); if (ret < 0) { - nvkm_mask(device, 0x200, sb->enable_mask, 0x0); nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n"); + nvkm_mc_disable(device, sb->devidx); return ret; } @@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb) /* enable IRQs */ nvkm_wr32(device, sb->base + 0x010, 0xff); - nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask); - nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask); + nvkm_mc_intr_mask(device, sb->devidx, true); return 0; } @@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb) struct nvkm_device *device = sb->subdev.device; /* disable IRQs and wait for any previous code to complete */ - nvkm_mask(device, 0x644, sb->irq_mask, 0x0); - nvkm_mask(device, 0x640, sb->irq_mask, 0x0); + nvkm_mc_intr_mask(device, sb->devidx, false); nvkm_wr32(device, sb->base + 0x014, 0xff); falcon_wait_idle(device, sb->base); /* disable engine */ - nvkm_mask(device, 0x200, sb->enable_mask, 0x0); + nvkm_mc_disable(device, sb->devidx); return 0; } @@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev) return ret; } - /* - * Build all blobs - the same blobs can be used to perform secure boot - * multiple times - */ - if (sb->func->prepare_blobs) - ret = sb->func->prepare_blobs(sb); - - return ret; + return 0; } static int @@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func, /* setup the performing falcon's base address and masks */ switch (func->boot_falcon) { case NVKM_SECBOOT_FALCON_PMU: + sb->devidx = NVKM_SUBDEV_PMU; sb->base = 0x10a000; - sb->irq_mask = 0x1000000; - sb->enable_mask = 0x2000; break; default: nvkm_error(&sb->subdev, "invalid secure boot falcon\n"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c index cc100dc940ea..f1e2dc914366 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c @@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb) /* Write LS blob */ ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob); + if (ret) + nvkm_gpuobj_del(&gsb->ls_blob); cleanup: ls_ucode_mgr_cleanup(&mgr); @@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb) int ret; /* Load and prepare the managed falcon's firmwares */ - ret = gm200_secboot_prepare_ls_blob(gsb); - if (ret) - return ret; + if (!gsb->ls_blob) { + ret = gm200_secboot_prepare_ls_blob(gsb); + if (ret) + return ret; + } /* Load the HS firmware that will load the LS firmwares */ - ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", - &gsb->acr_load_blob, - &gsb->acr_load_bl_desc, true); - if (ret) - return ret; + if (!gsb->acr_load_blob) { + ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", + &gsb->acr_load_blob, + &gsb->acr_load_bl_desc, true); + if (ret) + return ret; + } /* Load the HS firmware bootloader */ - ret = gm200_secboot_prepare_hsbl_blob(gsb); - if (ret) - return ret; + if (!gsb->hsbl_blob) { + ret = gm200_secboot_prepare_hsbl_blob(gsb); + if (ret) + return ret; + } return 0; } static int -gm200_secboot_prepare_blobs(struct nvkm_secboot *sb) +gm200_secboot_prepare_blobs(struct gm200_secboot *gsb) { - struct gm200_secboot *gsb = gm200_secboot(sb); int ret; ret = gm20x_secboot_prepare_blobs(gsb); @@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb) return ret; /* dGPU only: load the HS firmware that unprotects the WPR region */ - ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", - &gsb->acr_unload_blob, - &gsb->acr_unload_bl_desc, false); - if (ret) - return ret; + if (!gsb->acr_unload_blob) { + ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", + &gsb->acr_unload_blob, + &gsb->acr_unload_bl_desc, false); + if (ret) + return ret; + } return 0; } +static int +gm200_secboot_blobs_ready(struct gm200_secboot *gsb) +{ + struct nvkm_subdev *subdev = &gsb->base.subdev; + int ret; + + /* firmware already loaded, nothing to do... */ + if (gsb->firmware_ok) + return 0; + + ret = gsb->func->prepare_blobs(gsb); + if (ret) { + nvkm_error(subdev, "failed to load secure firmware\n"); + return ret; + } + + gsb->firmware_ok = true; + + return 0; +} /* @@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) struct gm200_secboot *gsb = gm200_secboot(sb); int ret; + /* Make sure all blobs are ready */ + ret = gm200_secboot_blobs_ready(gsb); + if (ret) + return ret; + /* * Dummy GM200 implementation: perform secure boot each time we are * called on FECS. Since only FECS and GPCCS are managed and started @@ -1373,7 +1407,6 @@ gm200_secboot = { .dtor = gm200_secboot_dtor, .init = gm200_secboot_init, .fini = gm200_secboot_fini, - .prepare_blobs = gm200_secboot_prepare_blobs, .reset = gm200_secboot_reset, .start = gm200_secboot_start, .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) | @@ -1415,6 +1448,7 @@ gm200_secboot_func = { .bl_desc_size = sizeof(struct gm200_flcn_bl_desc), .fixup_bl_desc = gm200_secboot_fixup_bl_desc, .fixup_hs_desc = gm200_secboot_fixup_hs_desc, + .prepare_blobs = gm200_secboot_prepare_blobs, }; int @@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index 684320484b70..d5395ebfe8d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c @@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc { u32 data_size; }; +static int +gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb) +{ + struct nvkm_subdev *subdev = &gsb->base.subdev; + int acr_size; + int ret; + + ret = gm20x_secboot_prepare_blobs(gsb); + if (ret) + return ret; + + acr_size = gsb->acr_load_blob->size; + /* + * On Tegra the WPR region is set by the bootloader. It is illegal for + * the HS blob to be larger than this region. + */ + if (acr_size > gsb->wpr_size) { + nvkm_error(subdev, "WPR region too small for FW blob!\n"); + nvkm_error(subdev, "required: %dB\n", acr_size); + nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size); + return -ENOSPC; + } + + return 0; +} + /** * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW * @@ -88,6 +114,7 @@ gm20b_secboot_func = { .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc), .fixup_bl_desc = gm20b_secboot_fixup_bl_desc, .fixup_hs_desc = gm20b_secboot_fixup_hs_desc, + .prepare_blobs = gm20b_secboot_prepare_blobs, }; @@ -147,32 +174,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb) #endif static int -gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int acr_size; - int ret; - - ret = gm20x_secboot_prepare_blobs(gsb); - if (ret) - return ret; - - acr_size = gsb->acr_load_blob->size; - /* - * On Tegra the WPR region is set by the bootloader. It is illegal for - * the HS blob to be larger than this region. - */ - if (acr_size > gsb->wpr_size) { - nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n"); - nvkm_error(&sb->subdev, "required: %dB\n", acr_size); - nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size); - return -ENOSPC; - } - - return 0; -} - -static int gm20b_secboot_init(struct nvkm_secboot *sb) { struct gm200_secboot *gsb = gm200_secboot(sb); @@ -189,7 +190,6 @@ static const struct nvkm_secboot_func gm20b_secboot = { .dtor = gm200_secboot_dtor, .init = gm20b_secboot_init, - .prepare_blobs = gm20b_secboot_prepare_blobs, .reset = gm200_secboot_reset, .start = gm200_secboot_start, .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS), diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h index f2b09dee7c5d..a9a8a0e1017e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h @@ -30,7 +30,6 @@ struct nvkm_secboot_func { int (*init)(struct nvkm_secboot *); int (*fini)(struct nvkm_secboot *, bool suspend); void *(*dtor)(struct nvkm_secboot *); - int (*prepare_blobs)(struct nvkm_secboot *); int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon); int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon); @@ -147,10 +146,8 @@ struct hsflcn_acr_desc { * @inst: instance block for HS falcon * @pgd: page directory for the HS falcon * @vm: address space used by the HS falcon - * @bl_desc_size: size of the BL descriptor used by this chip. - * @fixup_bl_desc: hook that generates the proper BL descriptor format from - * the generic GM200 format into a data array of size - * bl_desc_size + * @falcon_state: current state of the managed falcons + * @firmware_ok: whether the firmware blobs have been created */ struct gm200_secboot { struct nvkm_secboot base; @@ -196,9 +193,19 @@ struct gm200_secboot { RUNNING, } falcon_state[NVKM_SECBOOT_FALCON_END]; + bool firmware_ok; }; #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) +/** + * Contains functions we wish to abstract between GM200-like implementations + * @bl_desc_size: size of the BL descriptor used by this chip. + * @fixup_bl_desc: hook that generates the proper BL descriptor format from + * the generic GM200 format into a data array of size + * bl_desc_size + * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used + * @prepare_blobs: prepares the various blobs needed for secure booting + */ struct gm200_secboot_func { /* * Size of the bootloader descriptor for this chip. A block of this @@ -214,6 +221,7 @@ struct gm200_secboot_func { * we want the HS FW to set up. */ void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *); + int (*prepare_blobs)(struct gm200_secboot *); }; int gm200_secboot_init(struct nvkm_secboot *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c index a1b264664aad..fe063d5728e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c @@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top) } u32 -nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index) +nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; if (top) { @@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index) } u32 -nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs) +nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx) { + struct nvkm_top *top = device->top; + struct nvkm_top_device *info; + + if (top) { + list_for_each_entry(info, &top->device, head) { + if (info->index == devidx && info->intr >= 0) + return BIT(info->intr); + } + } + + return 0; +} + +u32 +nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs) +{ + struct nvkm_top *top = device->top; struct nvkm_top_device *info; u64 subdevs = 0; u32 handled = 0; @@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs) } enum nvkm_devidx -nvkm_top_fault(struct nvkm_top *top, int fault) +nvkm_top_fault(struct nvkm_device *device, int fault) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; list_for_each_entry(info, &top->device, head) { @@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault) } enum nvkm_devidx -nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn) +nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; int n = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index e06acc340e99..efac3402f9dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top) struct nvkm_subdev *subdev = &top->subdev; struct nvkm_device *device = subdev->device; struct nvkm_top_device *info = NULL; - u32 data, type; + u32 data, type, inst; int i; for (i = 0; i < 64; i++) { @@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top) if (!(info = nvkm_top_device_new(top))) return -ENOMEM; type = ~0; + inst = 0; } data = nvkm_rd32(device, 0x022700 + (i * 0x04)); @@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top) case 0x00000000: /* NOT_VALID */ continue; case 0x00000001: /* DATA */ + inst = (data & 0x3c000000) >> 26; info->addr = (data & 0x00fff000); info->fault = (data & 0x000000f8) >> 3; break; @@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top) continue; /* Translate engine type to NVKM engine identifier. */ +#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A +#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \ + info->index = NVKM_ENGINE_##A##0 + inst switch (type) { - case 0x00000000: info->index = NVKM_ENGINE_GR; break; - case 0x00000001: info->index = NVKM_ENGINE_CE0; break; - case 0x00000002: info->index = NVKM_ENGINE_CE1; break; - case 0x00000003: info->index = NVKM_ENGINE_CE2; break; - case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break; - case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break; - case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break; - case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break; - case 0x0000000c: info->index = NVKM_ENGINE_VIC; break; - case 0x0000000d: info->index = NVKM_ENGINE_SEC; break; - case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break; - case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break; - case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break; + case 0x00000000: A_(GR ); break; + case 0x00000001: A_(CE0 ); break; + case 0x00000002: A_(CE1 ); break; + case 0x00000003: A_(CE2 ); break; + case 0x00000008: A_(MSPDEC); break; + case 0x00000009: A_(MSPPP ); break; + case 0x0000000a: A_(MSVLD ); break; + case 0x0000000b: A_(MSENC ); break; + case 0x0000000c: A_(VIC ); break; + case 0x0000000d: A_(SEC ); break; + case 0x0000000e: B_(NVENC ); break; + case 0x0000000f: A_(NVENC1); break; + case 0x00000010: A_(NVDEC ); break; + case 0x00000013: B_(CE ); break; break; default: break; } - nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d " - "runlist %2d intr %2d reset %2d\n", type, + nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " + "engine %2d runlist %2d intr %2d " + "reset %2d\n", type, inst, info->index == NVKM_SUBDEV_NR ? NULL : nvkm_subdev_name[info->index], info->addr, info->fault, info->engine, info->runlist, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index 6b2d7531a7ff..1c3d23b0e84a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); if (data && info.vidmask && info.base && info.step) { + volt->min_uv = info.min; + volt->max_uv = info.max; for (i = 0; i < info.vidmask + 1; i++) { if (info.base >= info.min && info.base <= info.max) { @@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) } volt->vid_mask = info.vidmask; } else if (data && info.vidmask) { + volt->min_uv = 0xffffffff; + volt->max_uv = 0; for (i = 0; i < cnt; i++) { data = nvbios_volt_entry_parse(bios, i, &ver, &hdr, &ivid); @@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) volt->vid[volt->vid_nr].uv = ivid.voltage; volt->vid[volt->vid_nr].vid = ivid.vid; volt->vid_nr++; + volt->min_uv = min(volt->min_uv, ivid.voltage); + volt->max_uv = max(volt->max_uv, ivid.voltage); } } volt->vid_mask = info.vidmask; + } else if (data && info.type == NVBIOS_VOLT_PWM) { + volt->min_uv = info.base; + volt->max_uv = info.base + info.pwm_range; } } @@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device, volt->func = func; /* Assuming the non-bios device should build the voltage table later */ - if (bios) + if (bios) { nvkm_volt_parse_bios(bios, volt); + nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n", + volt->min_uv, volt->max_uv); + } if (volt->vid_nr) { for (i = 0; i < volt->vid_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c index d554455326da..ce5d83cdc7cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c @@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale, return mv; } -int +static int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo) { + static const int v_scale = 1000; int mv; mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef); - mv = DIV_ROUND_UP(mv, 1000); + mv = DIV_ROUND_UP(mv, v_scale); return mv * 1000; } -int +static int gk20a_volt_vid_get(struct nvkm_volt *base) { struct gk20a_volt *volt = gk20a_volt(base); @@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base) return -EINVAL; } -int +static int gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) { struct gk20a_volt *volt = gk20a_volt(base); @@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000); } -int +static int gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition) { struct gk20a_volt *volt = gk20a_volt(base); @@ -143,9 +144,9 @@ gk20a_volt = { }; int -_gk20a_volt_ctor(struct nvkm_device *device, int index, - const struct cvb_coef *coefs, int nb_coefs, - struct gk20a_volt *volt) +gk20a_volt_ctor(struct nvkm_device *device, int index, + const struct cvb_coef *coefs, int nb_coefs, + int vmin, struct gk20a_volt *volt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int i, uv; @@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index, volt->base.vid_nr = nb_coefs; for (i = 0; i < volt->base.vid_nr; i++) { volt->base.vid[i].vid = i; - volt->base.vid[i].uv = - gk20a_volt_calc_voltage(&coefs[i], - tdev->gpu_speedo); + volt->base.vid[i].uv = max( + gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo), + vmin); nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i, volt->base.vid[i].vid, volt->base.vid[i].uv); } @@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) return -ENOMEM; *pvolt = &volt->base; - return _gk20a_volt_ctor(device, index, gk20a_cvb_coef, - ARRAY_SIZE(gk20a_cvb_coef), volt); + return gk20a_volt_ctor(device, index, gk20a_cvb_coef, + ARRAY_SIZE(gk20a_cvb_coef), 0, volt); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h index 0fa3b502bcf8..6a6c97f9684e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h @@ -37,13 +37,8 @@ struct gk20a_volt { struct regulator *vdd; }; -int _gk20a_volt_ctor(struct nvkm_device *device, int index, - const struct cvb_coef *coefs, int nb_coefs, - struct gk20a_volt *volt); - -int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo); -int gk20a_volt_vid_get(struct nvkm_volt *volt); -int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid); -int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition); +int gk20a_volt_ctor(struct nvkm_device *device, int index, + const struct cvb_coef *coefs, int nb_coefs, + int vmin, struct gk20a_volt *volt); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c index 49b5ecb701e4..74db4d28930f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c @@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = { /* 921600 */ { 2647676, -106455, 1632 }, }; +static const struct cvb_coef gm20b_na_cvb_coef[] = { + /* KHz, c0, c1, c2, c3, c4, c5 */ + /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 }, + /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 }, + /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 }, + /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 }, + /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 }, + /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 }, + /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 }, + /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 }, + /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 }, + /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 }, + /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 }, + /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 }, + /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 }, +}; + +const u32 speedo_to_vmin[] = { + /* 0, 1, 2, 3, 4, */ + 950000, 840000, 818750, 840000, 810000, +}; + int gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) { + struct nvkm_device_tegra *tdev = device->func->tegra(device); struct gk20a_volt *volt; + u32 vmin; + + if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) { + nvdev_error(device, "unsupported speedo %d\n", + tdev->gpu_speedo_id); + return -EINVAL; + } volt = kzalloc(sizeof(*volt), GFP_KERNEL); if (!volt) return -ENOMEM; *pvolt = &volt->base; - return _gk20a_volt_ctor(device, index, gm20b_cvb_coef, - ARRAY_SIZE(gm20b_cvb_coef), volt); + vmin = speedo_to_vmin[tdev->gpu_speedo_id]; + + if (tdev->gpu_speedo_id >= 1) + return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef, + ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt); + else + return gk20a_volt_ctor(device, index, gm20b_cvb_coef, + ARRAY_SIZE(gm20b_cvb_coef), vmin, volt); } diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 336ad4de9981..556f81f6b2c7 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -4,11 +4,6 @@ config DRM_OMAP depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM select OMAP2_DSS select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_SYS_FOPS default n help DRM display driver for OMAP2/3/4 based boards. diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index 2a618afe0f53..c226da145fb3 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -1,80 +1,80 @@ menu "OMAPDRM External Display Device Drivers" -config DISPLAY_ENCODER_OPA362 +config DRM_OMAP_ENCODER_OPA362 tristate "OPA362 external analog amplifier" help Driver for OPA362 external analog TV amplifier controlled through a GPIO. -config DISPLAY_ENCODER_TFP410 +config DRM_OMAP_ENCODER_TFP410 tristate "TFP410 DPI to DVI Encoder" help Driver for TFP410 DPI to DVI encoder. -config DISPLAY_ENCODER_TPD12S015 +config DRM_OMAP_ENCODER_TPD12S015 tristate "TPD12S015 HDMI ESD protection and level shifter" help Driver for TPD12S015, which offers HDMI ESD protection and level shifting. -config DISPLAY_CONNECTOR_DVI +config DRM_OMAP_CONNECTOR_DVI tristate "DVI Connector" depends on I2C help Driver for a generic DVI connector. -config DISPLAY_CONNECTOR_HDMI +config DRM_OMAP_CONNECTOR_HDMI tristate "HDMI Connector" help Driver for a generic HDMI connector. -config DISPLAY_CONNECTOR_ANALOG_TV +config DRM_OMAP_CONNECTOR_ANALOG_TV tristate "Analog TV Connector" help Driver for a generic analog TV connector. -config DISPLAY_PANEL_DPI +config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" help Driver for generic DPI panels. -config DISPLAY_PANEL_DSI_CM +config DRM_OMAP_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DSI command mode panels. -config DISPLAY_PANEL_SONY_ACX565AKM +config DRM_OMAP_PANEL_SONY_ACX565AKM tristate "ACX565AKM Panel" depends on SPI && BACKLIGHT_CLASS_DEVICE help This is the LCD panel used on Nokia N900 -config DISPLAY_PANEL_LGPHILIPS_LB035Q02 +config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02 tristate "LG.Philips LB035Q02 LCD Panel" depends on SPI help LCD Panel used on the Gumstix Overo Palo35 -config DISPLAY_PANEL_SHARP_LS037V7DW01 +config DRM_OMAP_PANEL_SHARP_LS037V7DW01 tristate "Sharp LS037V7DW01 LCD Panel" depends on BACKLIGHT_CLASS_DEVICE help LCD Panel used in TI's SDP3430 and EVM boards -config DISPLAY_PANEL_TPO_TD028TTEC1 +config DRM_OMAP_PANEL_TPO_TD028TTEC1 tristate "TPO TD028TTEC1 LCD Panel" depends on SPI help LCD panel used in Openmoko. -config DISPLAY_PANEL_TPO_TD043MTEA1 +config DRM_OMAP_PANEL_TPO_TD043MTEA1 tristate "TPO TD043MTEA1 LCD Panel" depends on SPI help LCD Panel used in OMAP3 Pandora -config DISPLAY_PANEL_NEC_NL8048HL11 +config DRM_OMAP_PANEL_NEC_NL8048HL11 tristate "NEC NL8048HL11 Panel" depends on SPI depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile index 9aa176bfbf2e..46baafb1a83e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -1,14 +1,14 @@ -obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o -obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o -obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o -obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o -obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o -obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o -obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o -obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o -obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o -obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o +obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o +obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o +obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o +obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o +obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o +obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 8511c648a15c..3485d1ecd655 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -14,9 +14,10 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -25,7 +26,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 747f26a55e43..684b7aeda411 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -15,10 +15,10 @@ #include <linux/slab.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings dvic_default_timings = { .x_res = 640, .y_res = 480, @@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { adapter = of_get_i2c_adapter_by_node(adapter_node); + of_node_put(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 667ca4a24ece..7bdf83af9797 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -17,10 +17,10 @@ #include <linux/of_gpio.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, .y_res = 480, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 9594ff7a2b0c..fe4e7ec3bab0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -18,9 +18,8 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 671806ca7d6a..d768217cefe0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -15,8 +15,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 916a89978387..46855c8f5cbf 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 7c2331be8d15..7f16f985ab22 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -15,11 +15,13 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/regulator/consumer.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -32,6 +34,7 @@ struct panel_drv_data { int backlight_gpio; struct gpio_desc *enable_gpio; + struct regulator *vcc_supply; }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (r) return r; + r = regulator_enable(ddata->vcc_supply); + if (r) { + in->ops.dpi->disable(in); + return r; + } + gpiod_set_value_cansleep(ddata->enable_gpio, 1); if (gpio_is_valid(ddata->backlight_gpio)) @@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpio_set_value_cansleep(ddata->backlight_gpio, 0); gpiod_set_value_cansleep(ddata->enable_gpio, 0); + regulator_disable(ddata->vcc_supply); in->ops.dpi->disable(in); @@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev) ddata->enable_gpio = gpio; + /* + * Many different panels are supported by this driver and there are + * probably very different needs for their reset pins in regards to + * timing and order relative to the enable gpio. So for now it's just + * ensured that the reset line isn't active. + */ + gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc"); + if (IS_ERR(ddata->vcc_supply)) + return PTR_ERR(ddata->vcc_supply); + ddata->backlight_gpio = -ENOENT; r = of_get_display_timing(node, "panel-timing", &timing); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 2b118071b5a1..0eae8afaed90 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -25,10 +25,10 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> #include <video/mipi_display.h> +#include "../dss/omapdss.h" + /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 @@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev) return 0; err_sysfs_create: - if (bldev != NULL) - backlight_device_unregister(bldev); + backlight_device_unregister(bldev); err_bl: destroy_workqueue(ddata->workqueue); err_reg: diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index ac680e1de603..6dfb96cea293 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -17,8 +17,7 @@ #include <linux/gpio.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -51,9 +50,6 @@ struct panel_drv_data { struct omap_video_timings videomode; - /* used for non-DT boot, to be removed */ - int backlight_gpio; - struct gpio_desc *enable_gpio; }; @@ -171,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 1); - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; return 0; @@ -190,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 0); - in->ops.dpi->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -256,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi) ddata->enable_gpio = gpio; - ddata->backlight_gpio = -ENOENT; - in = omapdss_of_find_source_for_first_ep(node); if (IS_ERR(in)) { dev_err(&spi->dev, "failed to find video source\n"); @@ -290,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) if (r) return r; - if (gpio_is_valid(ddata->backlight_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, - GPIOF_OUT_INIT_LOW, "panel backlight"); - if (r) - goto err_gpio; - } - ddata->videomode = lb035q02_timings; dssdev = &ddata->dssdev; @@ -316,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) return 0; err_reg: -err_gpio: omap_dss_put_device(ddata->in); return r; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 38d2920a95e6..fc4c238c9583 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -18,7 +18,7 @@ #include <linux/gpio/consumer.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 4363fffc87e3..3d3efc561ea9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -13,11 +13,11 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index deb416736aad..157c512205d1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -33,9 +33,10 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + #define MIPID_CMD_READ_DISP_ID 0x04 #define MIPID_CMD_READ_RED 0x06 #define MIPID_CMD_READ_GREEN 0x07 diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index bd8d85041926..e859b3f893f7 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -28,7 +28,8 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index d93175b03a12..66c6bbe6472b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -19,7 +19,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 7e4e5bebabbe..6a3ebfcd7223 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -35,8 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index f83608b69e68..535240fba671 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -41,8 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" #include "dispc.h" @@ -113,9 +112,14 @@ struct dispc_features { * never both, we can just use this flag for now. */ bool reverse_ilace_field_order:1; + + bool has_gamma_table:1; + + bool has_gamma_i734_bug:1; }; #define DISPC_MAX_NR_FIFOS 5 +#define DISPC_MAX_CHANNEL_GAMMA 4 static struct { struct platform_device *pdev; @@ -135,6 +139,8 @@ static struct { bool ctx_valid; u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; + u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA]; + const struct dispc_features *feat; bool is_enabled; @@ -178,11 +184,19 @@ struct dispc_reg_field { u8 low; }; +struct dispc_gamma_desc { + u32 len; + u32 bits; + u16 reg; + bool has_index; +}; + static const struct { const char *name; u32 vsync_irq; u32 framedone_irq; u32 sync_lost_irq; + struct dispc_gamma_desc gamma; struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; } mgr_desc[] = { [OMAP_DSS_CHANNEL_LCD] = { @@ -190,6 +204,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC, .framedone_irq = DISPC_IRQ_FRAMEDONE, .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE0, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, @@ -207,6 +227,12 @@ static const struct { .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, .framedone_irq = DISPC_IRQ_FRAMEDONETV, .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .gamma = { + .len = 1024, + .bits = 10, + .reg = DISPC_GAMMA_TABLE2, + .has_index = false, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, [DISPC_MGR_FLD_STNTFT] = { }, @@ -224,6 +250,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC2, .framedone_irq = DISPC_IRQ_FRAMEDONE2, .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE1, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, @@ -241,6 +273,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC3, .framedone_irq = DISPC_IRQ_FRAMEDONE3, .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE3, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, @@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane) return unit * 8; } -void dispc_enable_gamma_table(bool enable) -{ - /* - * This is partially implemented to support only disabling of - * the gamma table. - */ - if (enable) { - DSSWARN("Gamma table enabling for TV not yet supported"); - return; - } - - REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); -} - static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { if (channel == OMAP_DSS_CHANNEL_DIGIT) @@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div, static unsigned long dispc_fclk_rate(void) { - struct dss_pll *pll; - unsigned long r = 0; + unsigned long r; + enum dss_clk_source src; + + src = dss_get_dispc_clk_source(); - switch (dss_get_dispc_clk_source()) { - case OMAP_DSS_CLK_SRC_FCK: + if (src == DSS_CLK_SRC_FCK) { r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; + r = pll->cinfo.clkout[clkout_idx]; } return r; @@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void) static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) { - struct dss_pll *pll; int lcd; unsigned long r; - u32 l; - - if (dss_mgr_is_lcd(channel)) { - l = dispc_read_reg(DISPC_DIVISORo(channel)); + enum dss_clk_source src; - lcd = FLD_GET(l, 23, 16); + /* for TV, LCLK rate is the FCLK rate */ + if (!dss_mgr_is_lcd(channel)) + return dispc_fclk_rate(); - switch (dss_get_lcd_clk_source(channel)) { - case OMAP_DSS_CLK_SRC_FCK: - r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + src = dss_get_lcd_clk_source(channel); - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + if (src == DSS_CLK_SRC_FCK) { + r = dss_get_dispc_clk_rate(); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; - } + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - return r / lcd; - } else { - return dispc_fclk_rate(); + r = pll->cinfo.clkout[clkout_idx]; } + + lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16); + + return r / lcd; } static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) @@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane) static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; - enum omap_dss_clk_source lcd_clk_src; + enum dss_clk_source lcd_clk_src; seq_printf(s, "- %s -\n", mgr_desc[channel].name); lcd_clk_src = dss_get_lcd_clk_source(channel); - seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name, + dss_get_clk_source_name(lcd_clk_src)); dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); @@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s) { int lcd; u32 l; - enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); + enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); if (dispc_runtime_get()) return; seq_printf(s, "- DISPC -\n"); - seq_printf(s, "dispc fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dispc_clk_src), - dss_feat_get_clk_source_name(dispc_clk_src)); + seq_printf(s, "dispc fclk source = %s\n", + dss_get_clk_source_name(dispc_clk_src)); seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); @@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void) REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ } +u32 dispc_mgr_gamma_size(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + + if (!dispc.feat->has_gamma_table) + return 0; + + return gdesc->len; +} +EXPORT_SYMBOL(dispc_mgr_gamma_size); + +static void dispc_mgr_write_gamma_table(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + unsigned int i; + + DSSDBG("%s: channel %d\n", __func__, channel); + + for (i = 0; i < gdesc->len; ++i) { + u32 v = table[i]; + + if (gdesc->has_index) + v |= i << 24; + else if (i == 0) + v |= 1 << 31; + + dispc_write_reg(gdesc->reg, v); + } +} + +static void dispc_restore_gamma_tables(void) +{ + DSSDBG("%s()\n", __func__); + + if (!dispc.feat->has_gamma_table) + return; + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD); + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT); + + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2); + + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3); +} + +static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = { + { .red = 0, .green = 0, .blue = 0, }, + { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, }, +}; + +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + uint i; + + DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__, + channel, length, gdesc->len); + + if (!dispc.feat->has_gamma_table) + return; + + if (lut == NULL || length < 2) { + lut = dispc_mgr_gamma_default_lut; + length = ARRAY_SIZE(dispc_mgr_gamma_default_lut); + } + + for (i = 0; i < length - 1; ++i) { + uint first = i * (gdesc->len - 1) / (length - 1); + uint last = (i + 1) * (gdesc->len - 1) / (length - 1); + uint w = last - first; + u16 r, g, b; + uint j; + + if (w == 0) + continue; + + for (j = 0; j <= w; j++) { + r = (lut[i].red * (w - j) + lut[i+1].red * j) / w; + g = (lut[i].green * (w - j) + lut[i+1].green * j) / w; + b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w; + + r >>= 16 - gdesc->bits; + g >>= 16 - gdesc->bits; + b >>= 16 - gdesc->bits; + + table[first + j] = (r << (gdesc->bits * 2)) | + (g << gdesc->bits) | b; + } + } + + if (dispc.is_enabled) + dispc_mgr_write_gamma_table(channel); +} +EXPORT_SYMBOL(dispc_mgr_set_gamma); + +static int dispc_init_gamma_tables(void) +{ + int channel; + + if (!dispc.feat->has_gamma_table) + return 0; + + for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) { + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *gt; + + if (channel == OMAP_DSS_CHANNEL_LCD2 && + !dss_has_feature(FEAT_MGR_LCD2)) + continue; + + if (channel == OMAP_DSS_CHANNEL_LCD3 && + !dss_has_feature(FEAT_MGR_LCD3)) + continue; + + gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len, + sizeof(u32), GFP_KERNEL); + if (!gt) + return -ENOMEM; + + dispc.gamma_table[channel] = gt; + + dispc_mgr_set_gamma(channel, NULL, 0); + } + return 0; +} + static void _omap_dispc_initial_config(void) { u32 l; @@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void) dispc.core_clk_rate = dispc_fclk_rate(); } - /* FUNCGATED */ - if (dss_has_feature(FEAT_FUNCGATED)) + /* Use gamma table mode, instead of palette mode */ + if (dispc.feat->has_gamma_table) + REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3); + + /* For older DSS versions (FEAT_FUNCGATED) this enables + * func-clock auto-gating. For newer versions + * (dispc.feat->has_gamma_table) this enables tv-out gamma tables. + */ + if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table) REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); dispc_setup_color_conv_coef(); @@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static const struct dispc_features omap54xx_dispc_feats = { @@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static int dispc_init_features(struct platform_device *pdev) @@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id) } EXPORT_SYMBOL(dispc_free_irq); +/* + * Workaround for errata i734 in DSS dispc + * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled + * + * For gamma tables to work on LCD1 the GFX plane has to be used at + * least once after DSS HW has come out of reset. The workaround + * sets up a minimal LCD setup with GFX plane and waits for one + * vertical sync irq before disabling the setup and continuing with + * the context restore. The physical outputs are gated during the + * operation. This workaround requires that gamma table's LOADMODE + * is set to 0x2 in DISPC_CONTROL1 register. + * + * For details see: + * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata + * Literature Number: SWPZ037E + * Or some other relevant errata document for the DSS IP version. + */ + +static const struct dispc_errata_i734_data { + struct omap_video_timings timings; + struct omap_overlay_info ovli; + struct omap_overlay_manager_info mgri; + struct dss_lcd_mgr_config lcd_conf; +} i734 = { + .timings = { + .x_res = 8, .y_res = 1, + .pixelclock = 16000000, + .hsw = 8, .hfp = 4, .hbp = 4, + .vsw = 1, .vfp = 1, .vbp = 1, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .interlace = false, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .double_pixel = false, + }, + .ovli = { + .screen_width = 1, + .width = 1, .height = 1, + .color_mode = OMAP_DSS_COLOR_RGB24U, + .rotation = OMAP_DSS_ROT_0, + .rotation_type = OMAP_DSS_ROT_DMA, + .mirror = 0, + .pos_x = 0, .pos_y = 0, + .out_width = 0, .out_height = 0, + .global_alpha = 0xff, + .pre_mult_alpha = 0, + .zorder = 0, + }, + .mgri = { + .default_color = 0, + .trans_enabled = false, + .partial_alpha_enabled = false, + .cpr_enable = false, + }, + .lcd_conf = { + .io_pad_mode = DSS_IO_PAD_MODE_BYPASS, + .stallmode = false, + .fifohandcheck = false, + .clock_info = { + .lck_div = 1, + .pck_div = 2, + }, + .video_port_width = 24, + .lcden_sig_polarity = 0, + }, +}; + +static struct i734_buf { + size_t size; + dma_addr_t paddr; + void *vaddr; +} i734_buf; + +static int dispc_errata_i734_wa_init(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return 0; + + i734_buf.size = i734.ovli.width * i734.ovli.height * + color_mode_to_bpp(i734.ovli.color_mode) / 8; + + i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size, + &i734_buf.paddr, GFP_KERNEL); + if (!i734_buf.vaddr) { + dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed", + __func__); + return -ENOMEM; + } + + return 0; +} + +static void dispc_errata_i734_wa_fini(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return; + + dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr, + i734_buf.paddr); +} + +static void dispc_errata_i734_wa(void) +{ + u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD); + struct omap_overlay_info ovli; + struct dss_lcd_mgr_config lcd_conf; + u32 gatestate; + unsigned int count; + + if (!dispc.feat->has_gamma_i734_bug) + return; + + gatestate = REG_GET(DISPC_CONFIG, 8, 4); + + ovli = i734.ovli; + ovli.paddr = i734_buf.paddr; + lcd_conf = i734.lcd_conf; + + /* Gate all LCD1 outputs */ + REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4); + + /* Setup and enable GFX plane */ + dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD); + dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false); + dispc_ovl_enable(OMAP_DSS_GFX, true); + + /* Set up and enable display manager for LCD1 */ + dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri); + dispc_calc_clock_rates(dss_get_dispc_clk_rate(), + &lcd_conf.clock_info); + dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf); + dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings); + + dispc_clear_irqstatus(framedone_irq); + + /* Enable and shut the channel to produce just one frame */ + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true); + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false); + + /* Busy wait for framedone. We can't fiddle with irq handlers + * in PM resume. Typically the loop runs less than 5 times and + * waits less than a micro second. + */ + count = 0; + while (!(dispc_read_irqstatus() & framedone_irq)) { + if (count++ > 10000) { + dev_err(&dispc.pdev->dev, "%s: framedone timeout\n", + __func__); + break; + } + } + dispc_ovl_enable(OMAP_DSS_GFX, false); + + /* Clear all irq bits before continuing */ + dispc_clear_irqstatus(0xffffffff); + + /* Restore the original state to LCD1 output gates */ + REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4); +} + /* DISPC HW IP initialisation */ static int dispc_bind(struct device *dev, struct device *master, void *data) { @@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (r) return r; + r = dispc_errata_i734_wa_init(); + if (r) + return r; + dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); if (!dispc_mem) { DSSERR("can't get IORESOURCE_MEM DISPC\n"); @@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) } } + r = dispc_init_gamma_tables(); + if (r) + return r; + pm_runtime_enable(&pdev->dev); r = dispc_runtime_get(); @@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data) { pm_runtime_disable(dev); + + dispc_errata_i734_wa_fini(); } static const struct component_ops dispc_component_ops = { @@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev) if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { _omap_dispc_initial_config(); + dispc_errata_i734_wa(); + dispc_restore_context(); + + dispc_restore_gamma_tables(); } dispc.is_enabled = true; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h index 483744223dd1..bc1d8126ee87 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.h +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -42,6 +42,11 @@ #define DISPC_MSTANDBY_CTRL 0x0858 #define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C +#define DISPC_GAMMA_TABLE0 0x0630 +#define DISPC_GAMMA_TABLE1 0x0634 +#define DISPC_GAMMA_TABLE2 0x0638 +#define DISPC_GAMMA_TABLE3 0x0850 + /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ DISPC_BA0_OFFSET(n)) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c index 038c15b04215..34fad2376f8d 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -18,8 +18,8 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dispc.h" static const struct dispc_coef coef3_M8[8] = { diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 9f3dd09b0a6c..8dcdd7cf9937 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 97ea60257884..b268295b76cf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -34,17 +34,15 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" -#define HSDIV_DISPC 0 - struct dpi_data { struct platform_device *pdev; struct regulator *vdds_dsi_reg; + enum dss_clk_source clk_src; struct dss_pll *pll; struct mutex lock; @@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev) return dev_get_drvdata(&pdev->dev); } -static struct dss_pll *dpi_get_pll(enum omap_channel channel) +static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel) { /* * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL @@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel) case OMAPDSS_VER_OMAP3630: case OMAPDSS_VER_AM35xx: case OMAPDSS_VER_AM43xx: - return NULL; + return DSS_CLK_SRC_FCK; case OMAPDSS_VER_OMAP4430_ES1: case OMAPDSS_VER_OMAP4430_ES2: case OMAPDSS_VER_OMAP4: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_OMAP5: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; + case OMAP_DSS_CHANNEL_LCD2: default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_DRA7xx: switch (channel) { case OMAP_DSS_CHANNEL_LCD: + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("video0"); + return DSS_CLK_SRC_PLL1_3; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("video1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } default: - return NULL; - } -} - -static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel) -{ - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD2: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD3: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - default: - /* this shouldn't happen */ - WARN_ON(1); - return OMAP_DSS_CLK_SRC_FCK; + return DSS_CLK_SRC_FCK; } } struct dpi_clk_calc_ctx { struct dss_pll *pll; + unsigned clkout_idx; /* inputs */ @@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx { /* outputs */ - struct dss_pll_clock_info dsi_cinfo; + struct dss_pll_clock_info pll_cinfo; unsigned long fck; struct dispc_clock_info dispc_cinfo; }; @@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc, if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) return false; - ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; - ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc; + ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc; return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, dpi_calc_dispc_cb, ctx); @@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, { struct dpi_clk_calc_ctx *ctx = data; - ctx->dsi_cinfo.n = n; - ctx->dsi_cinfo.m = m; - ctx->dsi_cinfo.fint = fint; - ctx->dsi_cinfo.clkdco = clkdco; + ctx->pll_cinfo.n = n; + ctx->pll_cinfo.m = m; + ctx->pll_cinfo.fint = fint; + ctx->pll_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dpi_calc_hsdiv_cb, ctx); } @@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data) dpi_calc_dispc_cb, ctx); } -static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, +static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck, struct dpi_clk_calc_ctx *ctx) { unsigned long clkin; - unsigned long pll_min, pll_max; memset(ctx, 0, sizeof(*ctx)); ctx->pll = dpi->pll; - ctx->pck_min = pck - 1000; - ctx->pck_max = pck + 1000; + ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src); - pll_min = 0; - pll_max = 0; + clkin = clk_get_rate(dpi->pll->clkin); - clkin = clk_get_rate(ctx->pll->clkin); + if (dpi->pll->hw->type == DSS_PLL_TYPE_A) { + unsigned long pll_min, pll_max; - return dss_pll_calc(ctx->pll, clkin, - pll_min, pll_max, - dpi_calc_pll_cb, ctx); + ctx->pck_min = pck - 1000; + ctx->pck_max = pck + 1000; + + pll_min = 0; + pll_max = 0; + + return dss_pll_calc_a(ctx->pll, clkin, + pll_min, pll_max, + dpi_calc_pll_cb, ctx); + } else { /* DSS_PLL_TYPE_B */ + dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo); + + ctx->dispc_cinfo.lck_div = 1; + ctx->dispc_cinfo.pck_div = 1; + ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0]; + ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck; + + return true; + } } static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) @@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) -static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, +static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, int r; bool ok; - ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); + ok = dpi_pll_clk_calc(dpi, pck_req, &ctx); if (!ok) return -EINVAL; - r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); + r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo); if (r) return r; - dss_select_lcd_clk_source(channel, - dpi_get_alt_clk_src(channel)); + dss_select_lcd_clk_source(channel, dpi->clk_src); dpi->mgr_config.clock_info = ctx.dispc_cinfo; - *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; *lck_div = ctx.dispc_cinfo.lck_div; *pck_div = ctx.dispc_cinfo.pck_div; @@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi) int r = 0; if (dpi->pll) - r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, + r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck, &lck_div, &pck_div); else r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, @@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) if (dpi->pll) { r = dss_pll_enable(dpi->pll); if (r) - goto err_dsi_pll_init; + goto err_pll_init; } r = dpi_set_mode(dpi); @@ -442,7 +440,7 @@ err_mgr_enable: err_set_mode: if (dpi->pll) dss_pll_disable(dpi->pll); -err_dsi_pll_init: +err_pll_init: err_src_sel: dispc_runtime_put(); err_get_dispc: @@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) dss_mgr_disable(channel); if (dpi->pll) { - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); dss_pll_disable(dpi->pll); } @@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, return -EINVAL; if (dpi->pll) { - ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); + ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx); if (!ok) return -EINVAL; - fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; } else { ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); if (!ok) @@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) mutex_unlock(&dpi->lock); } -static int dpi_verify_dsi_pll(struct dss_pll *pll) +static int dpi_verify_pll(struct dss_pll *pll) { int r; @@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi) if (dpi->pll) return; - pll = dpi_get_pll(dpi->output.dispc_channel); + dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel); + + pll = dss_pll_find_by_src(dpi->clk_src); if (!pll) return; - /* On DRA7 we need to set a mux to use the PLL */ - if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) - dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel); - - if (dpi_verify_dsi_pll(pll)) { - DSSWARN("DSI PLL not operational\n"); + if (dpi_verify_pll(pll)) { + DSSWARN("PLL not operational\n"); return; } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 56c43f355ce3..e1be5e795cd8 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -42,9 +42,9 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> #include <video/mipi_display.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -1261,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev) unsigned long r; struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { + if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ r = clk_get_rate(dsi->dss_clk); } else { @@ -1474,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; - enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; + enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; struct dss_pll *pll = &dsi->pll; @@ -1494,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, cinfo->clkdco, cinfo->m); seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1), cinfo->clkout[HSDIV_DISPC], cinfo->mX[HSDIV_DISPC], - dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dispc_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2), cinfo->clkout[HSDIV_DSI], cinfo->mX[HSDIV_DSI], - dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dsi_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "- DSI%d -\n", dsi_module + 1); - seq_printf(s, "dsi fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dsi_clk_src), - dss_feat_get_clk_source_name(dsi_clk_src)); + seq_printf(s, "dsi fclk source = %s\n", + dss_get_clk_source_name(dsi_clk_src)); seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); @@ -4101,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev, int r; dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1); if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { r = dss_mgr_register_framedone_handler(channel, @@ -4149,7 +4148,7 @@ err1: dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); err: - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); return r; } @@ -4162,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev, dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); } static int dsi_configure_dsi_clocks(struct platform_device *dsidev) @@ -4196,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) goto err1; dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2); DSSDBG("PLL OK\n"); @@ -4229,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) err3: dsi_cio_uninit(dsidev); err2: - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); err1: dss_pll_disable(&dsi->pll); err0: @@ -4251,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev, dsi_vc_enable(dsidev, 2, 0); dsi_vc_enable(dsidev, 3, 0); - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsidev); dsi_pll_uninit(dsidev, disconnect_lanes); } @@ -4452,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_cm_calc_hsdiv_cb, ctx); } @@ -4491,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi, pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); pll_max = cfg->hs_clk_max * 4; - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_cm_calc_pll_cb, ctx); } @@ -4750,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_vm_calc_hsdiv_cb, ctx); } @@ -4792,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi, pll_max = byteclk_max * 4 * 4; } - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_vm_calc_pll_cb, ctx); } @@ -5138,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 7) - 1, .m_max = (1 << 11) - 1, .mX_max = (1 << 4) - 1, @@ -5163,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -5188,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index bf407b6ba15c..e256d879b25c 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -18,8 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" struct device_node * @@ -126,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port) static struct device_node *omapdss_of_get_remote_port(const struct device_node *node) { - struct device_node *np; + struct device_node *np, *np_parent; np = of_parse_phandle(node, "remote-endpoint", 0); if (!np) return NULL; - np = of_get_next_parent(np); + np_parent = of_get_next_parent(np); + of_node_put(np); - return np; + return np_parent; } struct device_node * diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 3303cfad4838..14887d5b02e5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -42,8 +42,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -76,6 +75,8 @@ struct dss_features { const enum omap_display_type *ports; int num_ports; int (*dpi_select_source)(int port, enum omap_channel channel); + int (*select_lcd_source)(enum omap_channel channel, + enum dss_clk_source clk_src); }; static struct { @@ -92,9 +93,9 @@ static struct { unsigned long cache_prate; struct dispc_clock_info cache_dispc_cinfo; - enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; - enum omap_dss_clk_source dispc_clk_source; - enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + enum dss_clk_source dsi_clk_source[MAX_NUM_DSI]; + enum dss_clk_source dispc_clk_source; + enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; bool ctx_valid; u32 ctx[DSS_SZ_REGS / sizeof(u32)]; @@ -106,11 +107,14 @@ static struct { } dss; static const char * const dss_generic_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", + [DSS_CLK_SRC_FCK] = "FCK", + [DSS_CLK_SRC_PLL1_1] = "PLL1:1", + [DSS_CLK_SRC_PLL1_2] = "PLL1:2", + [DSS_CLK_SRC_PLL1_3] = "PLL1:3", + [DSS_CLK_SRC_PLL2_1] = "PLL2:1", + [DSS_CLK_SRC_PLL2_2] = "PLL2:2", + [DSS_CLK_SRC_PLL2_3] = "PLL2:3", + [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL", }; static bool dss_initialized; @@ -203,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable) 1 << shift, val << shift); } -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, +static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src, enum omap_channel channel) { unsigned shift, val; if (!dss.syscon_pll_ctrl) - return; + return -EINVAL; switch (channel) { case OMAP_DSS_CHANNEL_LCD: shift = 3; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 1; break; default: DSSERR("error in PLL mux config for LCD\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD2: shift = 5; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_3: val = 0; break; - case DSS_PLL_VIDEO2: + case DSS_CLK_SRC_PLL2_3: val = 1; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD2\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD3: shift = 7; - switch (pll_id) { - case DSS_PLL_VIDEO1: - val = 1; break; - case DSS_PLL_VIDEO2: + switch (clk_src) { + case DSS_CLK_SRC_PLL2_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_PLL1_3: + val = 1; break; + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD3\n"); - return; + return -EINVAL; } break; default: DSSERR("error in PLL mux config\n"); - return; + return -EINVAL; } regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 0x3 << shift, val << shift); + + return 0; } void dss_sdi_init(int datapairs) @@ -354,14 +360,14 @@ void dss_sdi_disable(void) REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ } -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) +const char *dss_get_clk_source_name(enum dss_clk_source clk_src) { return dss_generic_clk_source_names[clk_src]; } void dss_dump_clocks(struct seq_file *s) { - const char *fclk_name, *fclk_real_name; + const char *fclk_name; unsigned long fclk_rate; if (dss_runtime_get()) @@ -369,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s) seq_printf(s, "- DSS -\n"); - fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); - fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK); fclk_rate = clk_get_rate(dss.dss_clk); - seq_printf(s, "%s (%s) = %lu\n", - fclk_name, fclk_real_name, + seq_printf(s, "%s = %lu\n", + fclk_name, fclk_rate); dss_runtime_put(); @@ -403,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s) #undef DUMPREG } -static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) +static int dss_get_channel_index(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 1; + case OMAP_DSS_CHANNEL_LCD3: + return 2; + default: + WARN_ON(1); + return 0; + } +} + +static void dss_select_dispc_clk_source(enum dss_clk_source clk_src) { int b; u8 start, end; + /* + * We always use PRCM clock as the DISPC func clock, except on DSS3, + * where we don't have separate DISPC and LCD clock sources. + */ + if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) && + clk_src != DSS_CLK_SRC_FCK)) + return; + switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL1_1: b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL2_1: b = 2; break; default: @@ -431,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) } void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { int b, pos; switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL1_2: BUG_ON(dsi_module != 0); b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL2_2: BUG_ON(dsi_module != 1); b = 1; break; @@ -458,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module, dss.dsi_clk_source[dsi_module] = clk_src; } +static int dss_lcd_clk_mux_dra7(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + int r; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + r = dss_ctrl_pll_set_control_mux(clk_src, channel); + if (r) + return r; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap5(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK, + [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap4(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return 0; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { - int b, ix, pos; + int idx = dss_get_channel_index(channel); + int r; if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { dss_select_dispc_clk_source(clk_src); + dss.lcd_clk_source[idx] = clk_src; return; } - switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: - b = 0; - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); - b = 1; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && - channel != OMAP_DSS_CHANNEL_LCD3); - b = 1; - break; - default: - BUG(); + r = dss.feat->select_lcd_source(channel, clk_src); + if (r) return; - } - - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); - REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - dss.lcd_clk_source[ix] = clk_src; + dss.lcd_clk_source[idx] = clk_src; } -enum omap_dss_clk_source dss_get_dispc_clk_source(void) +enum dss_clk_source dss_get_dispc_clk_source(void) { return dss.dispc_clk_source; } -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module) { return dss.dsi_clk_source[dsi_module]; } -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - return dss.lcd_clk_source[ix]; + int idx = dss_get_channel_index(channel); + return dss.lcd_clk_source[idx]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for * OMAP2 and OMAP3 */ @@ -859,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap4, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap4, }; static const struct dss_features omap54xx_dss_feats = { @@ -868,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap5, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap5, }; static const struct dss_features am43xx_dss_feats = { @@ -886,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_dra7xx, .ports = dra7xx_ports, .num_ports = ARRAY_SIZE(dra7xx_ports), + .select_lcd_source = &dss_lcd_clk_mux_dra7, }; static int dss_init_features(struct platform_device *pdev) @@ -1143,18 +1240,18 @@ static int dss_bind(struct device *dev) /* Select DPLL */ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); #ifdef CONFIG_OMAP2_DSS_VENC REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ #endif - dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dispc_clk_source = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; rev = dss_read_reg(DSS_REVISION); printk(KERN_INFO "OMAP DSS rev %d.%d\n", diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 38e6ab50142d..4fd06dc41cb3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -102,6 +102,20 @@ enum dss_writeback_channel { DSS_WB_LCD3_MGR = 7, }; +enum dss_clk_source { + DSS_CLK_SRC_FCK = 0, + + DSS_CLK_SRC_PLL1_1, + DSS_CLK_SRC_PLL1_2, + DSS_CLK_SRC_PLL1_3, + + DSS_CLK_SRC_PLL2_1, + DSS_CLK_SRC_PLL2_2, + DSS_CLK_SRC_PLL2_3, + + DSS_CLK_SRC_HDMI_PLL, +}; + enum dss_pll_id { DSS_PLL_DSI1, DSS_PLL_DSI2, @@ -114,6 +128,11 @@ struct dss_pll; #define DSS_PLL_MAX_HSDIVS 4 +enum dss_pll_type { + DSS_PLL_TYPE_A, + DSS_PLL_TYPE_B, +}; + /* * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. * Type-B PLLs: clkout[0] refers to m2. @@ -140,6 +159,8 @@ struct dss_pll_ops { }; struct dss_pll_hw { + enum dss_pll_type type; + unsigned n_max; unsigned m_min; unsigned m_max; @@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void); int dss_dpi_select_source(int port, enum omap_channel channel); void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); +const char *dss_get_clk_source_name(enum dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); /* DSS VIDEO PLL */ @@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s); #endif void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, - enum omap_channel channel); void dss_sdi_init(int datapairs); int dss_sdi_enable(void); void dss_sdi_disable(void); void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src); + enum dss_clk_source clk_src); void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src); -enum omap_dss_clk_source dss_get_dispc_clk_source(void); -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); + enum dss_clk_source clk_src); +enum dss_clk_source dss_get_dispc_clk_source(void); +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module); +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); void dss_set_venc_output(enum omap_dss_venc_type type); void dss_set_dac_pwrdn_bgz(bool enable); @@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc, int dss_pll_register(struct dss_pll *pll); void dss_pll_unregister(struct dss_pll *pll); struct dss_pll *dss_pll_find(const char *name); +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src); +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src); int dss_pll_enable(struct dss_pll *pll); void dss_pll_disable(struct dss_pll *pll); int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data); -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data); + +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo); + int dss_pll_write_config_type_a(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); int dss_pll_write_config_type_b(struct dss_pll *pll, diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c index c886a2927f73..ee5b93ce2763 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.c +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c @@ -23,8 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -50,7 +49,6 @@ struct omap_dss_features { const enum omap_dss_output_id *supported_outputs; const enum omap_color_mode *supported_color_modes; const enum omap_overlay_caps *overlay_caps; - const char * const *clksrc_names; const struct dss_param_range *dss_params; const enum omap_dss_rotation_type supported_rotation_types; @@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = { OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, }; -static const char * const omap2_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", -}; - -static const char * const omap3_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", - [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", -}; - -static const char * const omap4_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", -}; - -static const char * const omap5_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2", -}; - static const struct dss_param_range omap2_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, [FEAT_PARAM_DSS_PCD] = { 2, 255 }, @@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = { .supported_outputs = omap2_dss_supported_outputs, .supported_color_modes = omap2_dss_supported_color_modes, .overlay_caps = omap2_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = { .supported_outputs = am43xx_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = am43xx_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA, .buffer_size_unit = 1, @@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = { .supported_outputs = omap3630_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3630_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = { .supported_outputs = omap5_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap5_dss_clk_source_names, .dss_params = omap5_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, color_mode; } -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) -{ - return omap_current_dss_features->clksrc_names[id]; -} - u32 dss_feat_get_buffer_size_unit(void) { return omap_current_dss_features->buffer_size_unit; diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h index 3d67d39f192f..bb4b7f0e642b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.h +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h @@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param); enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode); -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ u32 dss_feat_get_burst_size_unit(void); /* in bytes */ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 53616b02b613..63e711545865 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -23,8 +23,9 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "dss.h" /* HDMI Wrapper */ @@ -240,6 +241,7 @@ struct hdmi_pll_data { void __iomem *base; + struct platform_device *pdev; struct hdmi_wp_data *wp; }; @@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); /* HDMI PLL funcs */ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi); int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp); void hdmi_pll_uninit(struct hdmi_pll_data *hpll); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 4d46cdf7a037..cbd28dfdb86a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -34,9 +34,9 @@ #include <linux/regulator/consumer.h> #include <linux/component.h> #include <linux/of.h> -#include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi4_core.h" #include "dss.h" #include "dss_features.h" @@ -177,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); r = dss_pll_enable(&hdmi.pll.pll); if (r) { @@ -204,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 9255c0e1e4a7..0c0a5139a301 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -39,9 +39,9 @@ #include <linux/regulator/consumer.h> #include <linux/component.h> #include <linux/of.h> -#include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi5_core.h" #include "dss.h" #include "dss_features.h" @@ -189,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); /* disable and clear irqs */ hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); @@ -221,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c index 1b8fcc6c4ba1..4dfb67fe5f6d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c @@ -4,8 +4,8 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "hdmi.h" int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index f98b750fc499..3ead47cccac5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -14,8 +14,8 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index f1015e8b8267..b8bf6a9e5557 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -17,9 +17,9 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/seq_file.h> +#include <linux/pm_runtime.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "hdmi.h" @@ -39,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) DUMPPLL(PLLCTRL_CFG4); } -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi) -{ - unsigned long fint, clkdco, clkout; - unsigned long target_bitclk, target_clkdco; - unsigned long min_dco; - unsigned n, m, mf, m2, sd; - unsigned long clkin; - const struct dss_pll_hw *hw = pll->pll.hw; - - clkin = clk_get_rate(pll->pll.clkin); - - DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds); - - target_bitclk = target_tmds * 10; - - /* Fint */ - n = DIV_ROUND_UP(clkin, hw->fint_max); - fint = clkin / n; - - /* adjust m2 so that the clkdco will be high enough */ - min_dco = roundup(hw->clkdco_min, fint); - m2 = DIV_ROUND_UP(min_dco, target_bitclk); - if (m2 == 0) - m2 = 1; - - target_clkdco = target_bitclk * m2; - m = target_clkdco / fint; - - clkdco = fint * m; - - /* adjust clkdco with fractional mf */ - if (WARN_ON(target_clkdco - clkdco > fint)) - mf = 0; - else - mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); - - if (mf > 0) - clkdco += (u32)div_u64((u64)mf * fint, 262144); - - clkout = clkdco / m2; - - /* sigma-delta */ - sd = DIV_ROUND_UP(fint * m, 250000000); - - DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", - n, m, mf, m2, sd); - DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); - - pi->n = n; - pi->m = m; - pi->mf = mf; - pi->mX[0] = m2; - pi->sd = sd; - - pi->fint = fint; - pi->clkdco = clkdco; - pi->clkout[0] = clkout; -} - static int hdmi_pll_enable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; - u16 r = 0; + int r; + + r = pm_runtime_get_sync(&pll->pdev->dev); + WARN_ON(r < 0); dss_ctrl_pll_enable(DSS_PLL_HDMI, true); @@ -118,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; + int r; hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); dss_ctrl_pll_enable(DSS_PLL_HDMI, false); + + r = pm_runtime_put_sync(&pll->pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); } static const struct dss_pll_ops dsi_pll_ops = { @@ -131,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 4095, @@ -154,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 2045, @@ -225,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, int r; struct resource *res; + pll->pdev = pdev; pll->wp = wp; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 055f62fca5dc..203694a52d18 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -15,8 +15,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index d7e7c909bbc2..6eaf1adbd606 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -18,7 +18,872 @@ #ifndef __OMAP_DRM_DSS_H #define __OMAP_DRM_DSS_H -#include <video/omapdss.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <video/videomode.h> +#include <linux/platform_data/omapdss.h> +#include <uapi/drm/drm_mode.h> + +#define DISPC_IRQ_FRAMEDONE (1 << 0) +#define DISPC_IRQ_VSYNC (1 << 1) +#define DISPC_IRQ_EVSYNC_EVEN (1 << 2) +#define DISPC_IRQ_EVSYNC_ODD (1 << 3) +#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4) +#define DISPC_IRQ_PROG_LINE_NUM (1 << 5) +#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6) +#define DISPC_IRQ_GFX_END_WIN (1 << 7) +#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8) +#define DISPC_IRQ_OCP_ERR (1 << 9) +#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10) +#define DISPC_IRQ_VID1_END_WIN (1 << 11) +#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12) +#define DISPC_IRQ_VID2_END_WIN (1 << 13) +#define DISPC_IRQ_SYNC_LOST (1 << 14) +#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) +#define DISPC_IRQ_WAKEUP (1 << 16) +#define DISPC_IRQ_SYNC_LOST2 (1 << 17) +#define DISPC_IRQ_VSYNC2 (1 << 18) +#define DISPC_IRQ_VID3_END_WIN (1 << 19) +#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20) +#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21) +#define DISPC_IRQ_FRAMEDONE2 (1 << 22) +#define DISPC_IRQ_FRAMEDONEWB (1 << 23) +#define DISPC_IRQ_FRAMEDONETV (1 << 24) +#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) +#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26) +#define DISPC_IRQ_SYNC_LOST3 (1 << 27) +#define DISPC_IRQ_VSYNC3 (1 << 28) +#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29) +#define DISPC_IRQ_FRAMEDONE3 (1 << 30) + +struct omap_dss_device; +struct omap_overlay_manager; +struct dss_lcd_mgr_config; +struct snd_aes_iec958; +struct snd_cea_861_aud_if; +struct hdmi_avi_infoframe; + +enum omap_display_type { + OMAP_DISPLAY_TYPE_NONE = 0, + OMAP_DISPLAY_TYPE_DPI = 1 << 0, + OMAP_DISPLAY_TYPE_DBI = 1 << 1, + OMAP_DISPLAY_TYPE_SDI = 1 << 2, + OMAP_DISPLAY_TYPE_DSI = 1 << 3, + OMAP_DISPLAY_TYPE_VENC = 1 << 4, + OMAP_DISPLAY_TYPE_HDMI = 1 << 5, + OMAP_DISPLAY_TYPE_DVI = 1 << 6, +}; + +enum omap_plane { + OMAP_DSS_GFX = 0, + OMAP_DSS_VIDEO1 = 1, + OMAP_DSS_VIDEO2 = 2, + OMAP_DSS_VIDEO3 = 3, + OMAP_DSS_WB = 4, +}; + +enum omap_channel { + OMAP_DSS_CHANNEL_LCD = 0, + OMAP_DSS_CHANNEL_DIGIT = 1, + OMAP_DSS_CHANNEL_LCD2 = 2, + OMAP_DSS_CHANNEL_LCD3 = 3, + OMAP_DSS_CHANNEL_WB = 4, +}; + +enum omap_color_mode { + OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */ + OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */ + OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */ + OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */ + OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */ + OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */ + OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */ + OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */ + OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */ + OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */ + OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */ + OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ + OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ + OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ + OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */ + OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */ + OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */ + OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */ + OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ +}; + +enum omap_dss_load_mode { + OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, + OMAP_DSS_LOAD_CLUT_ONLY = 1, + OMAP_DSS_LOAD_FRAME_ONLY = 2, + OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3, +}; + +enum omap_dss_trans_key_type { + OMAP_DSS_COLOR_KEY_GFX_DST = 0, + OMAP_DSS_COLOR_KEY_VID_SRC = 1, +}; + +enum omap_rfbi_te_mode { + OMAP_DSS_RFBI_TE_MODE_1 = 1, + OMAP_DSS_RFBI_TE_MODE_2 = 2, +}; + +enum omap_dss_signal_level { + OMAPDSS_SIG_ACTIVE_LOW, + OMAPDSS_SIG_ACTIVE_HIGH, +}; + +enum omap_dss_signal_edge { + OMAPDSS_DRIVE_SIG_FALLING_EDGE, + OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +enum omap_dss_venc_type { + OMAP_DSS_VENC_TYPE_COMPOSITE, + OMAP_DSS_VENC_TYPE_SVIDEO, +}; + +enum omap_dss_dsi_pixel_format { + OMAP_DSS_DSI_FMT_RGB888, + OMAP_DSS_DSI_FMT_RGB666, + OMAP_DSS_DSI_FMT_RGB666_PACKED, + OMAP_DSS_DSI_FMT_RGB565, +}; + +enum omap_dss_dsi_mode { + OMAP_DSS_DSI_CMD_MODE = 0, + OMAP_DSS_DSI_VIDEO_MODE, +}; + +enum omap_display_caps { + OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, + OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1, +}; + +enum omap_dss_display_state { + OMAP_DSS_DISPLAY_DISABLED = 0, + OMAP_DSS_DISPLAY_ACTIVE, +}; + +enum omap_dss_rotation_type { + OMAP_DSS_ROT_DMA = 1 << 0, + OMAP_DSS_ROT_VRFB = 1 << 1, + OMAP_DSS_ROT_TILER = 1 << 2, +}; + +/* clockwise rotation angle */ +enum omap_dss_rotation_angle { + OMAP_DSS_ROT_0 = 0, + OMAP_DSS_ROT_90 = 1, + OMAP_DSS_ROT_180 = 2, + OMAP_DSS_ROT_270 = 3, +}; + +enum omap_overlay_caps { + OMAP_DSS_OVL_CAP_SCALE = 1 << 0, + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1, + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2, + OMAP_DSS_OVL_CAP_ZORDER = 1 << 3, + OMAP_DSS_OVL_CAP_POS = 1 << 4, + OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, +}; + +enum omap_overlay_manager_caps { + OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */ +}; + +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + +enum omap_hdmi_flags { + OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0, +}; + +enum omap_dss_output_id { + OMAP_DSS_OUTPUT_DPI = 1 << 0, + OMAP_DSS_OUTPUT_DBI = 1 << 1, + OMAP_DSS_OUTPUT_SDI = 1 << 2, + OMAP_DSS_OUTPUT_DSI1 = 1 << 3, + OMAP_DSS_OUTPUT_DSI2 = 1 << 4, + OMAP_DSS_OUTPUT_VENC = 1 << 5, + OMAP_DSS_OUTPUT_HDMI = 1 << 6, +}; + +/* RFBI */ + +struct rfbi_timings { + int cs_on_time; + int cs_off_time; + int we_on_time; + int we_off_time; + int re_on_time; + int re_off_time; + int we_cycle_time; + int re_cycle_time; + int cs_pulse_width; + int access_time; + + int clk_div; + + u32 tim[5]; /* set by rfbi_convert_timings() */ + + int converted; +}; + +/* DSI */ + +enum omap_dss_dsi_trans_mode { + /* Sync Pulses: both sync start and end packets sent */ + OMAP_DSS_DSI_PULSE_MODE, + /* Sync Events: only sync start packets sent */ + OMAP_DSS_DSI_EVENT_MODE, + /* Burst: only sync start packets sent, pixels are time compressed */ + OMAP_DSS_DSI_BURST_MODE, +}; + +struct omap_dss_dsi_videomode_timings { + unsigned long hsclk; + + unsigned ndl; + unsigned bitspp; + + /* pixels */ + u16 hact; + /* lines */ + u16 vact; + + /* DSI video mode blanking data */ + /* Unit: byte clock cycles */ + u16 hss; + u16 hsa; + u16 hse; + u16 hfp; + u16 hbp; + /* Unit: line clocks */ + u16 vsa; + u16 vfp; + u16 vbp; + + /* DSI blanking modes */ + int blanking_mode; + int hsa_blanking_mode; + int hbp_blanking_mode; + int hfp_blanking_mode; + + enum omap_dss_dsi_trans_mode trans_mode; + + bool ddr_clk_always_on; + int window_sync; +}; + +struct omap_dss_dsi_config { + enum omap_dss_dsi_mode mode; + enum omap_dss_dsi_pixel_format pixel_format; + const struct omap_video_timings *timings; + + unsigned long hs_clk_min, hs_clk_max; + unsigned long lp_clk_min, lp_clk_max; + + bool ddr_clk_always_on; + enum omap_dss_dsi_trans_mode trans_mode; +}; + +struct omap_video_timings { + /* Unit: pixels */ + u16 x_res; + /* Unit: pixels */ + u16 y_res; + /* Unit: Hz */ + u32 pixelclock; + /* Unit: pixel clocks */ + u16 hsw; /* Horizontal synchronization pulse width */ + /* Unit: pixel clocks */ + u16 hfp; /* Horizontal front porch */ + /* Unit: pixel clocks */ + u16 hbp; /* Horizontal back porch */ + /* Unit: line clocks */ + u16 vsw; /* Vertical synchronization pulse width */ + /* Unit: line clocks */ + u16 vfp; /* Vertical front porch */ + /* Unit: line clocks */ + u16 vbp; /* Vertical back porch */ + + /* Vsync logic level */ + enum omap_dss_signal_level vsync_level; + /* Hsync logic level */ + enum omap_dss_signal_level hsync_level; + /* Interlaced or Progressive timings */ + bool interlace; + /* Pixel clock edge to drive LCD data */ + enum omap_dss_signal_edge data_pclk_edge; + /* Data enable logic level */ + enum omap_dss_signal_level de_level; + /* Pixel clock edges to drive HSYNC and VSYNC signals */ + enum omap_dss_signal_edge sync_pclk_edge; + + bool double_pixel; +}; + +/* Hardcoded timings for tv modes. Venc only uses these to + * identify the mode, and does not actually use the configs + * itself. However, the configs should be something that + * a normal monitor can also show */ +extern const struct omap_video_timings omap_dss_pal_timings; +extern const struct omap_video_timings omap_dss_ntsc_timings; + +struct omap_dss_cpr_coefs { + s16 rr, rg, rb; + s16 gr, gg, gb; + s16 br, bg, bb; +}; + +struct omap_overlay_info { + dma_addr_t paddr; + dma_addr_t p_uv_addr; /* for NV12 format */ + u16 screen_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + + u16 pos_x; + u16 pos_y; + u16 out_width; /* if 0, out_width == width */ + u16 out_height; /* if 0, out_height == height */ + u8 global_alpha; + u8 pre_mult_alpha; + u8 zorder; +}; + +struct omap_overlay { + struct kobject kobj; + struct list_head list; + + /* static fields */ + const char *name; + enum omap_plane id; + enum omap_color_mode supported_modes; + enum omap_overlay_caps caps; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + /* + * The following functions do not block: + * + * is_enabled + * set_overlay_info + * get_overlay_info + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*enable)(struct omap_overlay *ovl); + int (*disable)(struct omap_overlay *ovl); + bool (*is_enabled)(struct omap_overlay *ovl); + + int (*set_manager)(struct omap_overlay *ovl, + struct omap_overlay_manager *mgr); + int (*unset_manager)(struct omap_overlay *ovl); + + int (*set_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + void (*get_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + + int (*wait_for_go)(struct omap_overlay *ovl); + + struct omap_dss_device *(*get_device)(struct omap_overlay *ovl); +}; + +struct omap_overlay_manager_info { + u32 default_color; + + enum omap_dss_trans_key_type trans_key_type; + u32 trans_key; + bool trans_enabled; + + bool partial_alpha_enabled; + + bool cpr_enable; + struct omap_dss_cpr_coefs cpr_coefs; +}; + +struct omap_overlay_manager { + struct kobject kobj; + + /* static fields */ + const char *name; + enum omap_channel id; + enum omap_overlay_manager_caps caps; + struct list_head overlays; + enum omap_display_type supported_displays; + enum omap_dss_output_id supported_outputs; + + /* dynamic fields */ + struct omap_dss_device *output; + + /* + * The following functions do not block: + * + * set_manager_info + * get_manager_info + * apply + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*set_output)(struct omap_overlay_manager *mgr, + struct omap_dss_device *output); + int (*unset_output)(struct omap_overlay_manager *mgr); + + int (*set_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + void (*get_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + + int (*apply)(struct omap_overlay_manager *mgr); + int (*wait_for_go)(struct omap_overlay_manager *mgr); + int (*wait_for_vsync)(struct omap_overlay_manager *mgr); + + struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr); +}; + +/* 22 pins means 1 clk lane and 10 data lanes */ +#define OMAP_DSS_MAX_DSI_PINS 22 + +struct omap_dsi_pin_config { + int num_pins; + /* + * pin numbers in the following order: + * clk+, clk- + * data1+, data1- + * data2+, data2- + * ... + */ + int pins[OMAP_DSS_MAX_DSI_PINS]; +}; + +struct omap_dss_writeback_info { + u32 paddr; + u32 p_uv_addr; + u16 buf_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + u8 pre_mult_alpha; +}; + +struct omapdss_dpi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines); +}; + +struct omapdss_sdi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs); +}; + +struct omapdss_dvi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); +}; + +struct omapdss_atv_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_type)(struct omap_dss_device *dssdev, + enum omap_dss_venc_type type); + void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev, + bool invert_polarity); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); +}; + +struct omapdss_hdmi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +struct omapdss_dsi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, + bool enter_ulps); + + /* bus configuration */ + int (*set_config)(struct omap_dss_device *dssdev, + const struct omap_dss_dsi_config *cfg); + int (*configure_pins)(struct omap_dss_device *dssdev, + const struct omap_dsi_pin_config *pin_cfg); + + void (*enable_hs)(struct omap_dss_device *dssdev, int channel, + bool enable); + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + + int (*update)(struct omap_dss_device *dssdev, int channel, + void (*callback)(int, void *), void *data); + + void (*bus_lock)(struct omap_dss_device *dssdev); + void (*bus_unlock)(struct omap_dss_device *dssdev); + + int (*enable_video_output)(struct omap_dss_device *dssdev, int channel); + void (*disable_video_output)(struct omap_dss_device *dssdev, + int channel); + + int (*request_vc)(struct omap_dss_device *dssdev, int *channel); + int (*set_vc_id)(struct omap_dss_device *dssdev, int channel, + int vc_id); + void (*release_vc)(struct omap_dss_device *dssdev, int channel); + + /* data transfer */ + int (*dcs_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *data, int len); + + int (*gen_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_read)(struct omap_dss_device *dssdev, int channel, + u8 *reqdata, int reqlen, + u8 *data, int len); + + int (*bta_sync)(struct omap_dss_device *dssdev, int channel); + + int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev, + int channel, u16 plen); +}; + +struct omap_dss_device { + struct kobject kobj; + struct device *dev; + + struct module *owner; + + struct list_head panel_list; + + /* alias in the form of "display%d" */ + char alias[16]; + + enum omap_display_type type; + enum omap_display_type output_type; + + union { + struct { + u8 data_lines; + } dpi; + + struct { + u8 channel; + u8 data_lines; + } rfbi; + + struct { + u8 datapairs; + } sdi; + + struct { + int module; + } dsi; + + struct { + enum omap_dss_venc_type type; + bool invert_polarity; + } venc; + } phy; + + struct { + struct omap_video_timings timings; + + enum omap_dss_dsi_pixel_format dsi_pix_fmt; + enum omap_dss_dsi_mode dsi_mode; + } panel; + + struct { + u8 pixel_size; + struct rfbi_timings rfbi_timings; + } ctrl; + + const char *name; + + /* used to match device to driver */ + const char *driver_name; + + void *data; + + struct omap_dss_driver *driver; + + union { + const struct omapdss_dpi_ops *dpi; + const struct omapdss_sdi_ops *sdi; + const struct omapdss_dvi_ops *dvi; + const struct omapdss_hdmi_ops *hdmi; + const struct omapdss_atv_ops *atv; + const struct omapdss_dsi_ops *dsi; + } ops; + + /* helper variable for driver suspend/resume */ + bool activate_after_resume; + + enum omap_display_caps caps; + + struct omap_dss_device *src; + + enum omap_dss_display_state state; + + /* OMAP DSS output specific fields */ + + struct list_head list; + + /* DISPC channel for this output */ + enum omap_channel dispc_channel; + bool dispc_channel_connected; + + /* output instance */ + enum omap_dss_output_id id; + + /* the port number in the DT node */ + int port_num; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + struct omap_dss_device *dst; +}; + +struct omap_dss_driver { + int (*probe)(struct omap_dss_device *); + void (*remove)(struct omap_dss_device *); + + int (*connect)(struct omap_dss_device *dssdev); + void (*disconnect)(struct omap_dss_device *dssdev); + + int (*enable)(struct omap_dss_device *display); + void (*disable)(struct omap_dss_device *display); + int (*run_test)(struct omap_dss_device *display, int test); + + int (*update)(struct omap_dss_device *dssdev, + u16 x, u16 y, u16 w, u16 h); + int (*sync)(struct omap_dss_device *dssdev); + + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + int (*get_te)(struct omap_dss_device *dssdev); + + u8 (*get_rotate)(struct omap_dss_device *dssdev); + int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); + + bool (*get_mirror)(struct omap_dss_device *dssdev); + int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); + + int (*memory_read)(struct omap_dss_device *dssdev, + void *buf, size_t size, + u16 x, u16 y, u16 w, u16 h); + + void (*get_resolution)(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); + void (*get_dimensions)(struct omap_dss_device *dssdev, + u32 *width, u32 *height); + int (*get_recommended_bpp)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +enum omapdss_version omapdss_get_version(void); +bool omapdss_is_initialized(void); + +int omap_dss_register_driver(struct omap_dss_driver *); +void omap_dss_unregister_driver(struct omap_dss_driver *); + +int omapdss_register_display(struct omap_dss_device *dssdev); +void omapdss_unregister_display(struct omap_dss_device *dssdev); + +struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); +void omap_dss_put_device(struct omap_dss_device *dssdev); +#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) +struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); +struct omap_dss_device *omap_dss_find_device(void *data, + int (*match)(struct omap_dss_device *dssdev, void *data)); +const char *omapdss_get_default_display_name(void); + +void videomode_to_omap_video_timings(const struct videomode *vm, + struct omap_video_timings *ovt); +void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, + struct videomode *vm); + +int dss_feat_get_num_mgrs(void); +int dss_feat_get_num_ovls(void); +enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); + + + +int omap_dss_get_num_overlay_managers(void); +struct omap_overlay_manager *omap_dss_get_overlay_manager(int num); + +int omap_dss_get_num_overlays(void); +struct omap_overlay *omap_dss_get_overlay(int num); + +int omapdss_register_output(struct omap_dss_device *output); +void omapdss_unregister_output(struct omap_dss_device *output); +struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); +struct omap_dss_device *omap_dss_find_output(const char *name); +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); +int omapdss_output_set_device(struct omap_dss_device *out, + struct omap_dss_device *dssdev); +int omapdss_output_unset_device(struct omap_dss_device *out); + +struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); +struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev); + +void omapdss_default_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); +int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + +typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); +int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); +int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); + +int omapdss_compat_init(void); +void omapdss_compat_uninit(void); + +static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) +{ + return dssdev->src; +} + +static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) +{ + return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; +} + +struct device_node * +omapdss_of_get_next_port(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_next_endpoint(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_first_endpoint(const struct device_node *parent); + +struct omap_dss_device * +omapdss_of_find_source_for_first_ep(struct device_node *node); u32 dispc_read_irqstatus(void); void dispc_clear_irqstatus(u32 mask); @@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, const struct omap_video_timings *timings); void dispc_mgr_setup(enum omap_channel channel, const struct omap_overlay_manager_info *info); +u32 dispc_mgr_gamma_size(enum omap_channel channel); +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length); int dispc_ovl_enable(enum omap_plane plane, bool enable); bool dispc_ovl_enabled(enum omap_plane plane); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 829232ad8c81..24f859488201 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -21,8 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" static LIST_HEAD(output_list); diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index f974ddcd3b6e..0a76c89cdc2e 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -22,8 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #define PLL_CONTROL 0x0000 @@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name) return NULL; } +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src) +{ + struct dss_pll *pll; + + switch (src) { + default: + case DSS_CLK_SRC_FCK: + return NULL; + + case DSS_CLK_SRC_HDMI_PLL: + return dss_pll_find("hdmi"); + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL1_3: + pll = dss_pll_find("dsi0"); + if (!pll) + pll = dss_pll_find("video0"); + return pll; + + case DSS_CLK_SRC_PLL2_1: + case DSS_CLK_SRC_PLL2_2: + case DSS_CLK_SRC_PLL2_3: + pll = dss_pll_find("dsi1"); + if (!pll) + pll = dss_pll_find("video1"); + return pll; + } +} + +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src) +{ + switch (src) { + case DSS_CLK_SRC_HDMI_PLL: + return 0; + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL2_1: + return 0; + + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL2_2: + return 1; + + case DSS_CLK_SRC_PLL1_3: + case DSS_CLK_SRC_PLL2_3: + return 2; + + default: + return 0; + } +} + int dss_pll_enable(struct dss_pll *pll) { int r; @@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin return 0; } -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data) { @@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, return false; } -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +/* + * clkdco = clkin / n * m * 2 + * clkoutX = clkdco / mX + */ +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data) { @@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, return false; } +/* + * This calculates a PLL config that will provide the target_clkout rate + * for clkout. Additionally clkdco rate will be the same as clkout rate + * when clkout rate is >= min_clkdco. + * + * clkdco = clkin / n * m + clkin / n * mf / 262144 + * clkout = clkdco / m2 + */ +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo) +{ + unsigned long fint, clkdco, clkout; + unsigned long target_clkdco; + unsigned long min_dco; + unsigned n, m, mf, m2, sd; + const struct dss_pll_hw *hw = pll->hw; + + DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout); + + /* Fint */ + n = DIV_ROUND_UP(clkin, hw->fint_max); + fint = clkin / n; + + /* adjust m2 so that the clkdco will be high enough */ + min_dco = roundup(hw->clkdco_min, fint); + m2 = DIV_ROUND_UP(min_dco, target_clkout); + if (m2 == 0) + m2 = 1; + + target_clkdco = target_clkout * m2; + m = target_clkdco / fint; + + clkdco = fint * m; + + /* adjust clkdco with fractional mf */ + if (WARN_ON(target_clkdco - clkdco > fint)) + mf = 0; + else + mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); + + if (mf > 0) + clkdco += (u32)div_u64((u64)mf * fint, 262144); + + clkout = clkdco / m2; + + /* sigma-delta */ + sd = DIV_ROUND_UP(fint * m, 250000000); + + DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", + n, m, mf, m2, sd); + DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); + + cinfo->n = n; + cinfo->m = m; + cinfo->mf = mf; + cinfo->mX[0] = m2; + cinfo->sd = sd; + + cinfo->fint = fint; + cinfo->clkdco = clkdco; + cinfo->clkout[0] = clkout; + + return true; +} + static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) { unsigned long timeout; diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c index 3796576dfadf..cd53566d75eb 100644 --- a/drivers/gpu/drm/omapdrm/dss/rfbi.c +++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index cd6d3bfb041d..0a96c321ce62 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" static struct { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 08a2cc778ba9..6eedf2118708 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -37,8 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index b1ec59e42940..7429de928d4e 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = { }; static const struct dss_pll_hw dss_dra7_video_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = { .mX_lsb[0] = 21, .mX_msb[1] = 30, .mX_lsb[1] = 26, + .mX_msb[2] = 4, + .mX_lsb[2] = 0, + .mX_msb[3] = 9, + .mX_lsb[3] = 5, .has_refsel = true, }; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index ce2d67b6a8c7..137fe690a0da 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -32,7 +32,6 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *dssdev; - struct drm_encoder *encoder; bool hdmi_mode; }; @@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector, return ret; } -struct drm_encoder *omap_connector_attached_encoder( - struct drm_connector *connector) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - return omap_connector->encoder; -} - static const struct drm_connector_funcs omap_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, @@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = { static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .get_modes = omap_connector_get_modes, .mode_valid = omap_connector_mode_valid, - .best_encoder = omap_connector_attached_encoder, }; /* initialize connector */ @@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, goto fail; omap_connector->dssdev = dssdev; - omap_connector->encoder = encoder; connector = &omap_connector->base; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 075f2bb44867..180f644e861e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) copy_timings_drm_to_omap(&omap_crtc->timings, mode); } +static int omap_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + if (state->color_mgmt_changed && state->gamma_lut) { + uint length = state->gamma_lut->length / + sizeof(struct drm_color_lut); + + if (length < 2) + return -EINVAL; + } + + return 0; +} + static void omap_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON(omap_crtc->vblank_irq.registered); + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + if (dispc_mgr_is_enabled(omap_crtc->channel)) { DBG("%s: GO", omap_crtc->name); @@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = omap_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, + .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_property = drm_atomic_helper_crtc_set_property, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_nofb = omap_crtc_mode_set_nofb, .disable = omap_crtc_disable, .enable = omap_crtc_enable, + .atomic_check = omap_crtc_atomic_check, .atomic_begin = omap_crtc_atomic_begin, .atomic_flush = omap_crtc_atomic_flush, }; @@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); + /* The dispc API adapts to what ever size, but the HW supports + * 256 element gamma table for LCDs and 1024 element table for + * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma + * tables so lets use that. Size of HW gamma table can be + * extracted with dispc_mgr_gamma_size(). If it returns 0 + * gamma table is not supprted. + */ + if (dispc_mgr_gamma_size(channel)) { + uint gamma_lut_size = 256; + + drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size); + drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size); + } + omap_plane_install_properties(crtc->primary, &crtc->base); omap_crtcs[channel] = omap_crtc; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d86f5479345b..26c6134eb744 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev, { struct omap_drm_private *priv = dev->dev_private; struct omap_atomic_state_commit *commit; - unsigned int i; - int ret; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); @@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev, spin_unlock(&priv->commit.lock); /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); @@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev) return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: return DRM_MODE_CONNECTOR_DVID; + case OMAP_DISPLAY_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; default: return DRM_MODE_CONNECTOR_Unknown; } @@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = { .unload = dev_unload, .open = dev_open, .lastclose = dev_lastclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = omap_irq_enable_vblank, .disable_vblank = omap_irq_disable_vblank, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 3f823c368912..dcc30a98b9d4 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -24,7 +24,6 @@ #include <linux/platform_data/omap_drm.h> #include <linux/types.h> #include <linux/wait.h> -#include <video/omapdss.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, @@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, int x, int y, dma_addr_t *paddr); uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); size_t omap_gem_mmap_size(struct drm_gem_object *obj); -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h); int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); struct dma_buf *omap_gem_prime_export(struct drm_device *dev, @@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev, struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer); -static inline int align_pitch(int pitch, int width, int bpp) -{ - int bytespp = (bpp + 7) / 8; - /* in case someone tries to feed us a completely bogus stride: */ - pitch = max(pitch, width * bytespp); - /* PVR needs alignment to 8 pixels.. right now that is the most - * restrictive stride requirement.. - */ - return roundup(pitch, 8 * bytespp); -} - /* map crtc to vblank mask */ uint32_t pipe2vbl(struct drm_crtc *crtc); struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index f84570d1636c..31f5178c22c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -115,24 +115,16 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb) for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; - if (plane->bo) - drm_gem_object_unreference_unlocked(plane->bo); + + drm_gem_object_unreference_unlocked(plane->bo); } kfree(omap_fb); } -static int omap_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { .create_handle = omap_framebuffer_create_handle, .destroy = omap_framebuffer_destroy, - .dirty = omap_framebuffer_dirty, }; static uint32_t get_linear_addr(struct plane *plane, @@ -320,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb) mutex_unlock(&omap_fb->lock); } -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) -{ - struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - if (p >= drm_format_num_planes(fb->pixel_format)) - return NULL; - return omap_fb->planes[p].bo; -} - /* iterate thru all the connectors, returning ones that are attached * to the same fb.. */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 89da41ac64d2..adb10fbe918d 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = align_pitch( - mode_cmd.width * ((sizes->surface_bpp + 7) / 8), - mode_cmd.width, sizes->surface_bpp); + mode_cmd.pitches[0] = + DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8); fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; if (fbdev->ywrap_enabled) { @@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) if (ret) goto fini; - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, 32); if (ret) goto fini; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 03698b6c806c..505dee0db973 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -383,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) return size; } -/* get tiled size, returns -EINVAL if not tiled buffer */ -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (omap_obj->flags & OMAP_BO_TILED) { - *w = omap_obj->width; - *h = omap_obj->height; - return 0; - } - return -EINVAL; -} - /* ----------------------------------------------------------------------------- * Fault Handling */ @@ -661,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, { union omap_gem_size gsize; - args->pitch = align_pitch(0, args->width, args->bpp); + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + args->size = PAGE_ALIGN(args->pitch * args->height); gsize = (union omap_gem_size){ @@ -1407,7 +1396,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (ret) goto err_free; - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 3a7bdf1c842b..85143d1b9b31 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel) if (p->backlight) { p->backlight->props.power = FB_BLANK_POWERDOWN; + p->backlight->props.state |= BL_CORE_FBBLANK; backlight_update_status(p->backlight); } @@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel) msleep(p->desc->delay.enable); if (p->backlight) { + p->backlight->props.state &= ~BL_CORE_FBBLANK; p->backlight->props.power = FB_BLANK_UNBLANK; backlight_update_status(p->backlight); } @@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = { .num_modes = 1, .bpc = 6, .size = { - .width = 1024, - .height = 600, + .width = 154, + .height = 90, }, }; @@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, }; +static const struct drm_display_mode lg_lp079qx1_sp0v_mode = { + .clock = 200000, + .hdisplay = 1536, + .hsync_start = 1536 + 12, + .hsync_end = 1536 + 12 + 16, + .htotal = 1536 + 12 + 16 + 48, + .vdisplay = 2048, + .vsync_start = 2048 + 8, + .vsync_end = 2048 + 8 + 4, + .vtotal = 2048 + 8 + 4 + 8, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc lg_lp079qx1_sp0v = { + .modes = &lg_lp079qx1_sp0v_mode, + .num_modes = 1, + .size = { + .width = 129, + .height = 171, + }, +}; + +static const struct drm_display_mode lg_lp097qx1_spa1_mode = { + .clock = 205210, + .hdisplay = 2048, + .hsync_start = 2048 + 150, + .hsync_end = 2048 + 150 + 5, + .htotal = 2048 + 150 + 5 + 5, + .vdisplay = 1536, + .vsync_start = 1536 + 3, + .vsync_end = 1536 + 3 + 1, + .vtotal = 1536 + 3 + 1 + 9, + .vrefresh = 60, +}; + +static const struct panel_desc lg_lp097qx1_spa1 = { + .modes = &lg_lp097qx1_spa1_mode, + .num_modes = 1, + .size = { + .width = 208, + .height = 147, + }, +}; + static const struct drm_display_mode lg_lp120up1_mode = { .clock = 162300, .hdisplay = 1920, @@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode samsung_lsn122dl01_c01_mode = { + .clock = 271560, + .hdisplay = 2560, + .hsync_start = 2560 + 48, + .hsync_end = 2560 + 48 + 32, + .htotal = 2560 + 48 + 32 + 80, + .vdisplay = 1600, + .vsync_start = 1600 + 2, + .vsync_end = 1600 + 2 + 5, + .vtotal = 1600 + 2 + 5 + 57, + .vrefresh = 60, +}; + +static const struct panel_desc samsung_lsn122dl01_c01 = { + .modes = &samsung_lsn122dl01_c01_mode, + .num_modes = 1, + .size = { + .width = 263, + .height = 164, + }, +}; + static const struct drm_display_mode samsung_ltn101nt05_mode = { .clock = 54030, .hdisplay = 1024, @@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = { .num_modes = 1, .bpc = 6, .size = { - .width = 1024, - .height = 600, + .width = 223, + .height = 125, }, }; @@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = { }, }; +static const struct display_timing sharp_lq101k1ly04_timing = { + .pixelclock = { 60000000, 65000000, 80000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 20, 20 }, + .hback_porch = { 20, 20, 20 }, + .hsync_len = { 10, 10, 10 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 4, 4, 4 }, + .vback_porch = { 4, 4, 4 }, + .vsync_len = { 4, 4, 4 }, + .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE, +}; + +static const struct panel_desc sharp_lq101k1ly04 = { + .timings = &sharp_lq101k1ly04_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 217, + .height = 136, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, +}; + +static const struct drm_display_mode sharp_lq123p1jx31_mode = { + .clock = 252750, + .hdisplay = 2400, + .hsync_start = 2400 + 48, + .hsync_end = 2400 + 48 + 32, + .htotal = 2400 + 48 + 32 + 80, + .vdisplay = 1600, + .vsync_start = 1600 + 3, + .vsync_end = 1600 + 3 + 10, + .vtotal = 1600 + 3 + 10 + 33, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc sharp_lq123p1jx31 = { + .modes = &sharp_lq123p1jx31_mode, + .num_modes = 1, + .size = { + .width = 259, + .height = 173, + }, +}; + static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = { .clock = 33300, .hdisplay = 800, @@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode starry_kr122ea0sra_mode = { + .clock = 147000, + .hdisplay = 1920, + .hsync_start = 1920 + 16, + .hsync_end = 1920 + 16 + 16, + .htotal = 1920 + 16 + 16 + 32, + .vdisplay = 1200, + .vsync_start = 1200 + 15, + .vsync_end = 1200 + 15 + 2, + .vtotal = 1200 + 15 + 2 + 18, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc starry_kr122ea0sra = { + .modes = &starry_kr122ea0sra_mode, + .num_modes = 1, + .size = { + .width = 263, + .height = 164, + }, +}; + static const struct drm_display_mode tpk_f07a_0102_mode = { .clock = 33260, .hdisplay = 800, @@ -1457,6 +1596,12 @@ static const struct of_device_id platform_of_match[] = { .compatible = "lg,lb070wv8", .data = &lg_lb070wv8, }, { + .compatible = "lg,lp079qx1-sp0v", + .data = &lg_lp079qx1_sp0v, + }, { + .compatible = "lg,lp097qx1-spa1", + .data = &lg_lp097qx1_spa1, + }, { .compatible = "lg,lp120up1", .data = &lg_lp120up1, }, { @@ -1481,15 +1626,27 @@ static const struct of_device_id platform_of_match[] = { .compatible = "qiaodian,qd43003c0-40", .data = &qd43003c0_40, }, { + .compatible = "samsung,lsn122dl01-c01", + .data = &samsung_lsn122dl01_c01, + }, { .compatible = "samsung,ltn101nt05", .data = &samsung_ltn101nt05, }, { .compatible = "samsung,ltn140at29-301", .data = &samsung_ltn140at29_301, }, { + .compatible = "sharp,lq101k1ly04", + .data = &sharp_lq101k1ly04, + }, { + .compatible = "sharp,lq123p1jx31", + .data = &sharp_lq123p1jx31, + }, { .compatible = "shelly,sca07010-bfn-lnn", .data = &shelly_sca07010_bfn_lnn, }, { + .compatible = "starry,kr122ea0sra", + .data = &starry_kr122ea0sra, + }, { .compatible = "tpk,f07a-0102", .data = &tpk_f07a_0102, }, { @@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = { .lanes = 4, }; - static const struct of_device_id dsi_of_match[] = { { .compatible = "auo,b080uan01", diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 38c2bb72e456..da45b11b66b8 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -1,12 +1,7 @@ config DRM_QXL tristate "QXL virtual GPU" depends on DRM && PCI - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_DEFERRED_IO select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM select CRC32 help diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index b5d4b41361bd..04270f5d110c 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) { if (!qxl_check_idle(qdev->release_ring)) { - queue_work(qdev->gc_queue, &qdev->gc_work); + schedule_work(&qdev->gc_work); if (flush) flush_work(&qdev->gc_work); return true; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 8b5d54385892..3aef12742a53 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); @@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); - drm_vblank_get(dev, qcrtc->index); + drm_crtc_vblank_get(crtc); if (event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, qcrtc->index, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); } - drm_vblank_put(dev, qcrtc->index); + drm_crtc_vblank_put(crtc); ret = qxl_bo_reserve(bo, false); if (!ret) { @@ -469,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); - if (qxl_fb->obj) - drm_gem_object_unreference_unlocked(qxl_fb->obj); + drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); } @@ -730,7 +728,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id) drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); qxl_crtc->index = crtc_id; - drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 56e1d633875e..ffe885395145 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev, * the qxl_clip_rects. This is *not* the same as the memory allocated * on the device, it is offset to qxl_clip_rects.chunk.data */ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, - struct qxl_drawable *drawable, unsigned num_clips, struct qxl_bo *clips_bo) { @@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, * correctly globaly, since that would require * tracking all of our palettes. */ ret = qxl_bo_kmap(palette_bo, (void **)&pal); + if (ret) + return ret; pal->num_ents = 2; pal->unique = unique++; if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { @@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, if (ret) goto out_release_backoff; - rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); + rects = drawable_set_clipping(qdev, num_clips, clips_bo); if (!rects) goto out_release_backoff; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index dc9df5fe50ba..460bbceae297 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = { .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, - .gem_free_object = qxl_gem_object_free, + .gem_free_object_unlocked = qxl_gem_object_free, .gem_open_object = qxl_gem_object_open, .gem_close_object = qxl_gem_object_close, .fops = &qxl_fops, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3ad6604b34ce..8e633caa4078 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -321,7 +321,6 @@ struct qxl_device { struct qxl_bo *current_release_bo[3]; int current_release_bo_offset[3]; - struct workqueue_struct *gc_queue; struct work_struct gc_work; struct drm_property *hotplug_mode_update_property; diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 5ea57f6320b8..df2657051afd 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, int ret; int aligned_size, size; int height = mode_cmd->height; - int bpp; - int depth; - - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 2319800b7add..e642242728c0 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev, (unsigned long)qdev->surfaceram_size); - qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); INIT_WORK(&qdev->gc_work, qxl_gc_work); return 0; @@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev) qxl_bo_unref(&qdev->current_release_bo[0]); if (qdev->current_release_bo[1]) qxl_bo_unref(&qdev->current_release_bo[1]); - flush_workqueue(qdev->gc_queue); - destroy_workqueue(qdev->gc_queue); - qdev->gc_queue = NULL; - + flush_work(&qdev->gc_work); qxl_ring_free(qdev->command_ring); qxl_ring_free(qdev->cursor_ring); qxl_ring_free(qdev->release_ring); @@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags) struct qxl_device *qdev; int r; - /* require kms */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); if (qdev == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 4efa8e261baf..f599cd073b72 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -96,7 +96,7 @@ retry: return 0; if (have_drawable_releases && sc > 300) { - FENCE_WARN(fence, "failed to wait on release %d " + FENCE_WARN(fence, "failed to wait on release %llu " "after spincount %d\n", fence->context & ~0xf0000000, sc); goto signaled; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 0738d74c8d04..d50c9679e631 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; + int ret; + + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + return ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 259cd6e6d71c..a97abc8af657 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 587cae4e73c9..56bb758f4e33 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); else { diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 35e0fc3ae8a7..7ba450832e6b 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, if (i >= sclk_table->count) { pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; } else { - /* XXX check display min clock requirements */ + /* XXX The current code always reprogrammed the sclk levels, + * but we don't currently handle disp sclk requirements + * so just skip it. + */ if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ba192a35c607..0c1b9ff433af 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); MODULE_FIRMWARE("radeon/HAWAII_me.bin"); @@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); MODULE_FIRMWARE("radeon/KAVERI_me.bin"); @@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev) int new_fw = 0; int err; int num_fw; + bool new_smc = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_BONAIRE: chip_name = "BONAIRE"; + if ((rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x665f)) + new_smc = true; new_chip_name = "bonaire"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; @@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev) break; case CHIP_HAWAII: chip_name = "HAWAII"; + if (rdev->pdev->revision == 0x80) + new_smc = true; new_chip_name = "hawaii"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; @@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); + if (new_smc) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); @@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev) } } rdev->rlc.cs_data = ci_cs_data; - rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + rdev->rlc.cp_table_size += 64 * 1024; /* GDS */ r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 0d3f744de35a..d960d3915408 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } break; } + case PACKET3_PFP_SYNC_ME: + if (pkt->count) { + DRM_ERROR("bad PFP_SYNC_ME\n"); + return -EINVAL; + } + break; case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { DRM_ERROR("bad SURFACE_SYNC\n"); @@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, case PACKET3_MPEG_INDEX: case PACKET3_WAIT_REG_MEM: case PACKET3_MEM_WRITE: + case PACKET3_PFP_SYNC_ME: case PACKET3_SURFACE_SYNC: case PACKET3_EVENT_WRITE: case PACKET3_EVENT_WRITE_EOP: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 0b174e14e9a6..c8e3d394cde7 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1624,6 +1624,7 @@ */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) +#define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 80b24a495d6c..5633ee3eb46e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2386,7 +2386,7 @@ struct radeon_device { struct radeon_mman mman; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; wait_queue_head_t fence_queue; - unsigned fence_context; + u64 fence_context; struct mutex ring_lock; struct radeon_ring ring[RADEON_NUM_RINGS]; bool ib_pool_ready; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 59acd0e5c2c6..31c9a92d6a1b 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev) } atif->encoder_for_bl = target; - if (!target) { - /* Brightness change notification is enabled, but we - * didn't find a backlight controller, this should - * never happen. - */ - DRM_ERROR("Cannot find a backlight controller\n"); - } } if (atif->functions.sbios_requests && !atif->functions.system_params) { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f8097a0e7a79..5df3ec73021b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; - if (crev < 2) + if ((frev < 2) && (crev < 2)) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else @@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 95f4fea89302..6de342861202 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "radeon_acpi.h" @@ -27,6 +28,7 @@ struct radeon_atpx_functions { struct radeon_atpx { acpi_handle handle; struct radeon_atpx_functions functions; + bool is_hybrid; }; static struct radeon_atpx_priv { @@ -62,6 +64,14 @@ bool radeon_has_atpx(void) { return radeon_atpx_priv.atpx_detected; } +bool radeon_has_atpx_dgpu_power_cntl(void) { + return radeon_atpx_priv.atpx.functions.power_cntl; +} + +bool radeon_is_atpx_hybrid(void) { + return radeon_atpx_priv.atpx.is_hybrid; +} + /** * radeon_atpx_call - call an ATPX method * @@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas */ static int radeon_atpx_validate(struct radeon_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - if (atpx->functions.power_cntl == false) { - printk("ATPX dGPU power cntl not present, forcing\n"); - atpx->functions.power_cntl = true; - } + u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } @@ -505,7 +536,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler radeon_atpx_handler = { .switchto = radeon_atpx_switchto, .power_state = radeon_atpx_power_state, - .init = radeon_atpx_init, .get_client_id = radeon_atpx_get_client_id, }; @@ -541,6 +571,7 @@ static bool radeon_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; + radeon_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 81a63d7f5cd9..b79f3b002471 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev, RADEON_OUTPUT_CSC_BYPASS); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 21c44b2293bc..a00dd2f74527 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> +#include <linux/pm_runtime.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <linux/efi.h> @@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev, return 0; failed: + /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ + if (radeon_is_px(ddev)) + pm_runtime_put_noidle(ddev->dev); if (runtime) vga_switcheroo_fini_domain_pm_ops(rdev->dev); return r; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a41b4982647..c3206fb8f4cf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = radeon_crtc->lut_b[regno] << 6; } -static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { radeon_crtc->lut_r[i] = red[i] >> 6; radeon_crtc->lut_g[i] = green[i] >> 6; radeon_crtc->lut_b[i] = blue[i] >> 6; } radeon_crtc_load_lut(crtc); + + return 0; } static void radeon_crtc_destroy(struct drm_crtc *crtc) @@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); - drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(&radeon_crtc->base); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); queue_work(radeon_crtc->flip_queue, &work->unpin_work); } @@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, } work->base = base; - r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { @@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set) pm_runtime_put_autosuspend(dev->dev); return ret; } + static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set2 = radeon_crtc_cursor_set2, .cursor_move = radeon_crtc_cursor_move, @@ -711,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; - radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); + radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); rdev->mode_info.crtcs[index] = radeon_crtc; if (rdev->family >= CHIP_BONAIRE) { @@ -1321,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - if (radeon_fb->obj) { - drm_gem_object_unreference_unlocked(radeon_fb->obj); - } + drm_gem_object_unreference_unlocked(radeon_fb->obj); drm_framebuffer_cleanup(fb); kfree(radeon_fb); } @@ -1708,6 +1709,7 @@ void radeon_modeset_fini(struct radeon_device *rdev) radeon_afmt_fini(rdev); drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); + drm_crtc_force_disable_all(rdev->ddev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b55aa740171f..c01a7c6abb49 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -34,11 +34,9 @@ #include "radeon_drv.h" #include <drm/drm_pciids.h> -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_gem.h> @@ -95,9 +93,10 @@ * 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER * 2.44.0 - SET_APPEND_CNT packet3 support * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI + * 2.46.0 - Add PFP_SYNC_ME support on evergreen */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 45 +#define KMS_DRIVER_MINOR 46 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -164,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor); #if defined(CONFIG_VGA_SWITCHEROO) void radeon_register_atpx_handler(void); void radeon_unregister_atpx_handler(void); +bool radeon_has_atpx_dgpu_power_cntl(void); +bool radeon_is_atpx_hybrid(void); #else static inline void radeon_register_atpx_handler(void) {} static inline void radeon_unregister_atpx_handler(void) {} +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool radeon_is_atpx_hybrid(void) { return false; } #endif int radeon_no_wb; @@ -340,13 +343,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret == -EPROBE_DEFER) return ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* Get rid of things like offb */ @@ -412,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (radeon_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!radeon_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -429,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (radeon_is_atpx_hybrid() || + !radeon_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 414953c46a38..835563c1f0ed 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev) if (rdev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (radeon_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } radeon_kfd_device_fini(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 478d4099b0d0..d0de4022fff9 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); } if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 38226d925a5b..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) static void radeon_pm_set_clocks(struct radeon_device *rdev) { + struct drm_crtc *crtc; int i, r; /* no need to take locks, etc. if nothing's going to change */ @@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) radeon_unmap_vram_bos(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.active_crtcs & (1 << i)) { /* This can fail if a modeset is in progress */ - if (drm_vblank_get(rdev->ddev, i) == 0) + if (drm_crtc_vblank_get(crtc) == 0) rdev->pm.req_vblank |= (1 << i); else DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", i); } + i++; } } radeon_set_power_state(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.req_vblank & (1 << i)) { rdev->pm.req_vblank &= ~(1 << i); - drm_vblank_put(rdev->ddev, i); + drm_crtc_vblank_put(crtc); } + i++; } } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 590b0377fbe2..ffdad81ef964 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (IS_ERR(fence)) return PTR_ERR(fence); - r = ttm_bo_move_accel_cleanup(bo, &fence->base, - evict, no_wait_gpu, new_mem); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); radeon_fence_unref(&fence); return r; } @@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; int r; + r = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (r) + return r; + /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); if (WARN_ON_ONCE(rbo->pin_count > 0)) @@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b30e719dd56d..2523ca96c6c7 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin"); MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); MODULE_FIRMWARE("radeon/tahiti_smc.bin"); +MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); @@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); @@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin"); MODULE_FIRMWARE("radeon/verde_mc.bin"); MODULE_FIRMWARE("radeon/verde_rlc.bin"); MODULE_FIRMWARE("radeon/verde_smc.bin"); +MODULE_FIRMWARE("radeon/verde_k_smc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); @@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin"); MODULE_FIRMWARE("radeon/oland_mc.bin"); MODULE_FIRMWARE("radeon/oland_rlc.bin"); MODULE_FIRMWARE("radeon/oland_smc.bin"); +MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); @@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin"); MODULE_FIRMWARE("radeon/hainan_mc.bin"); MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); @@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev) char fw_name[30]; int err; int new_fw = 0; + bool new_smc = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; + /* XXX: figure out which Tahitis need the new ucode */ + if (0) + new_smc = true; new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811) || + (rdev->pdev->device == 0x6816) || + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + new_smc = true; new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_VERDE: chip_name = "VERDE"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6820) || + (rdev->pdev->device == 0x6821) || + (rdev->pdev->device == 0x6822) || + (rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682A) || + (rdev->pdev->device == 0x682B)) + new_smc = true; new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_OLAND: chip_name = "OLAND"; + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) + new_smc = true; new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_HAINAN: chip_name = "HAINAN"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6664) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)) + new_smc = true; new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); + if (new_smc) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 7fc3ca5ce6c7..4c2fd056dd6d 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -6,7 +6,6 @@ config DRM_RCAR_DU select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_FB_HELPER select VIDEOMODE_HELPERS help Choose this option if you have an R-Car chipset. diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 827711e28226..d3b44651061a 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -7,8 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_plane.o \ rcar_du_vgacon.o -rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \ - rcar_du_hdmienc.o +rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o + rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 0d8bdda736f9..e39fcef2e033 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); if (status & DSSR_FRM) { - drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); + drm_crtc_handle_vblank(&rcrtc->crtc); rcar_du_crtc_finish_page_flip(rcrtc); ret = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index fb9242d27883..899ef7a2a7b4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev) struct rcar_du_device *rcdu = platform_get_drvdata(pdev); struct drm_device *ddev = rcdu->ddev; - drm_connector_unregister_all(ddev); drm_dev_unregister(ddev); if (rcdu->fbdev) @@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev) if (!ddev) return -ENOMEM; - drm_dev_set_unique(ddev, dev_name(&pdev->dev)); - rcdu->ddev = ddev; ddev->dev_private = rcdu; @@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev) * disabled for all CRTCs. */ ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize vblank\n"); + if (ret < 0) goto error; - } /* DRM/KMS objects */ ret = rcar_du_modeset_init(rcdu); if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to initialize DRM/KMS (%d)\n", ret); goto error; } @@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev) if (ret) goto error; - ret = drm_connector_register_all(ddev); - if (ret < 0) - goto error; - DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 4e939e41f030..ab8645c57e2d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -19,7 +19,6 @@ #include "rcar_du_drv.h" #include "rcar_du_encoder.h" -#include "rcar_du_hdmicon.h" #include "rcar_du_hdmienc.h" #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" @@ -27,18 +26,6 @@ #include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- - * Common connector functions - */ - -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector) -{ - struct rcar_du_connector *rcon = to_rcar_connector(connector); - - return rcar_encoder_to_drm_encoder(rcon->encoder); -} - -/* ----------------------------------------------------------------------------- * Encoder */ @@ -186,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, break; case DRM_MODE_ENCODER_TMDS: - ret = rcar_du_hdmi_connector_init(rcdu, renc); + /* connector managed by the bridge driver */ break; default: diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 719b6f2a031c..7fc10a9c34c3 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -15,7 +15,6 @@ #define __RCAR_DU_ENCODER_H__ #include <drm/drm_crtc.h> -#include <drm/drm_encoder_slave.h> struct rcar_du_device; struct rcar_du_hdmienc; @@ -30,16 +29,16 @@ enum rcar_du_encoder_type { }; struct rcar_du_encoder { - struct drm_encoder_slave slave; + struct drm_encoder base; enum rcar_du_output output; struct rcar_du_hdmienc *hdmi; struct rcar_du_lvdsenc *lvds; }; #define to_rcar_encoder(e) \ - container_of(e, struct rcar_du_encoder, slave.base) + container_of(e, struct rcar_du_encoder, base) -#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base) +#define rcar_encoder_to_drm_encoder(e) (&(e)->base) struct rcar_du_connector { struct drm_connector connector; @@ -49,9 +48,6 @@ struct rcar_du_connector { #define to_rcar_connector(c) \ container_of(c, struct rcar_du_connector, connector) -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector); - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_encoder_type type, enum rcar_du_output output, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c deleted file mode 100644 index 6c927144b5c9..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * R-Car Display Unit HDMI Connector - * - * Copyright (C) 2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_encoder_slave.h> - -#include "rcar_du_drv.h" -#include "rcar_du_encoder.h" -#include "rcar_du_hdmicon.h" -#include "rcar_du_kms.h" - -#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs) - -static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector) -{ - struct rcar_du_connector *con = to_rcar_connector(connector); - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - - if (sfuncs->get_modes == NULL) - return 0; - - return sfuncs->get_modes(encoder, connector); -} - -static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct rcar_du_connector *con = to_rcar_connector(connector); - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - - if (sfuncs->mode_valid == NULL) - return MODE_OK; - - return sfuncs->mode_valid(encoder, mode); -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = rcar_du_hdmi_connector_get_modes, - .mode_valid = rcar_du_hdmi_connector_mode_valid, - .best_encoder = rcar_du_connector_best_encoder, -}; - -static enum drm_connector_status -rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) -{ - struct rcar_du_connector *con = to_rcar_connector(connector); - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - - if (sfuncs->detect == NULL) - return connector_status_unknown; - - return sfuncs->detect(encoder, connector); -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, - .reset = drm_atomic_helper_connector_reset, - .detect = rcar_du_hdmi_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc) -{ - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); - struct rcar_du_connector *rcon; - struct drm_connector *connector; - int ret; - - rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL); - if (rcon == NULL) - return -ENOMEM; - - connector = &rcon->connector; - connector->display_info.width_mm = 0; - connector->display_info.height_mm = 0; - connector->interlace_allowed = true; - connector->polled = DRM_CONNECTOR_POLL_HPD; - - ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - if (ret < 0) - return ret; - - drm_connector_helper_add(connector, &connector_helper_funcs); - - connector->dpms = DRM_MODE_DPMS_OFF; - drm_object_property_set_value(&connector->base, - rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); - - ret = drm_mode_connector_attach_encoder(connector, encoder); - if (ret < 0) - return ret; - - rcon->encoder = renc; - - return 0; -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h deleted file mode 100644 index 87daa949227f..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * R-Car Display Unit HDMI Connector - * - * Copyright (C) 2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_HDMICON_H__ -#define __RCAR_DU_HDMICON_H__ - -struct rcar_du_device; -struct rcar_du_encoder; - -#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI) -int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc); -#else -static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc) -{ - return -ENOSYS; -} -#endif - -#endif /* __RCAR_DU_HDMICON_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 461662d231e2..4de3ff0dbebd 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -16,7 +16,6 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_encoder_slave.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" @@ -25,20 +24,14 @@ struct rcar_du_hdmienc { struct rcar_du_encoder *renc; - struct device *dev; bool enabled; }; #define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi) -#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs) static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - - if (sfuncs->dpms) - sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF); if (hdmienc->renc->lvds) rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, @@ -50,15 +43,11 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (hdmienc->renc->lvds) rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, true); - if (sfuncs->dpms) - sfuncs->dpms(encoder, DRM_MODE_DPMS_ON); - hdmienc->enabled = true; } @@ -67,29 +56,21 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; - const struct drm_display_mode *mode = &crtc_state->mode; if (hdmienc->renc->lvds) rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds, adjusted_mode); - if (sfuncs->mode_fixup == NULL) - return 0; - - return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL; + return 0; } + static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - - if (sfuncs->mode_set) - sfuncs->mode_set(encoder, mode, adjusted_mode); rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output); } @@ -109,7 +90,6 @@ static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder) rcar_du_hdmienc_disable(encoder); drm_encoder_cleanup(encoder); - put_device(hdmienc->dev); } static const struct drm_encoder_funcs encoder_funcs = { @@ -120,8 +100,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, struct rcar_du_encoder *renc, struct device_node *np) { struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); - struct drm_i2c_encoder_driver *driver; - struct i2c_client *i2c_slave; + struct drm_bridge *bridge; struct rcar_du_hdmienc *hdmienc; int ret; @@ -129,44 +108,29 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, if (hdmienc == NULL) return -ENOMEM; - /* Locate the slave I2C device and driver. */ - i2c_slave = of_find_i2c_device_by_node(np); - if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { - dev_dbg(rcdu->dev, - "can't get I2C slave for %s, deferring probe\n", - of_node_full_name(np)); + /* Locate drm bridge from the hdmi encoder DT node */ + bridge = of_drm_find_bridge(np); + if (!bridge) return -EPROBE_DEFER; - } - - hdmienc->dev = &i2c_slave->dev; - - if (hdmienc->dev->driver == NULL) { - dev_dbg(rcdu->dev, - "I2C slave %s not probed yet, deferring probe\n", - dev_name(hdmienc->dev)); - ret = -EPROBE_DEFER; - goto error; - } - - /* Initialize the slave encoder. */ - driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver)); - ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave); - if (ret < 0) - goto error; ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret < 0) - goto error; + return ret; drm_encoder_helper_add(encoder, &encoder_helper_funcs); renc->hdmi = hdmienc; hdmienc->renc = renc; - return 0; + /* Link drm_bridge to encoder */ + bridge->encoder = encoder; + + ret = drm_bridge_attach(rcdu->ddev, bridge); + if (ret) { + drm_encoder_cleanup(encoder); + return ret; + } -error: - put_device(hdmienc->dev); - return ret; + return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index e70a4f33d970..6bb032d8ac6b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, { struct rcar_du_device *rcdu = dev->dev_private; struct rcar_du_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; unsigned int i; int ret; @@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); spin_lock(&rcdu->commit.wait.lock); ret = wait_event_interruptible_locked(rcdu->commit.wait, @@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index e905f5da7aaa..6afd0af312ba 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_lvds_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index d445e67f78e1..bfe31ca870cc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, bool needs_realloc = false; unsigned int groups = 0; unsigned int i; + struct drm_plane *drm_plane; + struct drm_plane_state *drm_plane_state; /* Check if hardware planes need to be reallocated. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int index; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); @@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, } /* Reallocate hardware planes for each plane that needs it. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int crtc_planes; unsigned int free; int idx; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index d2f66068e52c..fedb0161e234 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -195,9 +195,10 @@ #define DEFR6_ODPM12_DISP (2 << 8) #define DEFR6_ODPM12_CDE (3 << 8) #define DEFR6_ODPM12_MASK (3 << 8) -#define DEFR6_TCNE2 (1 << 6) +#define DEFR6_TCNE1 (1 << 6) +#define DEFR6_TCNE0 (1 << 4) #define DEFR6_MLOS1 (1 << 2) -#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) +#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1) /* ----------------------------------------------------------------------------- * R8A7790-only Control Registers diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 9d7e5c99caf6..8d6125c1c0f9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_vga_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status @@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - rcon->encoder = renc; - return 0; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index d30bdc38a760..3c58669a06ce 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -2,12 +2,9 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU depends on RESET_CONTROLLER + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_PANEL - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select VIDEOMODE_HELPERS help Choose this option if you have a Rockchip soc chipset. diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 7f6a55cae27a..89aadbf465f8 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -14,6 +14,7 @@ #include <linux/component.h> #include <linux/mfd/syscon.h> +#include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -33,13 +34,28 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" +#define RK3288_GRF_SOC_CON6 0x25c +#define RK3288_EDP_LCDC_SEL BIT(5) +#define RK3399_GRF_SOC_CON20 0x6250 +#define RK3399_EDP_LCDC_SEL BIT(5) + +#define HIWORD_UPDATE(val, mask) (val | (mask) << 16) + #define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm) -/* dp grf register offset */ -#define GRF_SOC_CON6 0x025c -#define GRF_EDP_LCD_SEL_MASK BIT(5) -#define GRF_EDP_SEL_VOP_LIT BIT(5) -#define GRF_EDP_SEL_VOP_BIG 0 +/** + * struct rockchip_dp_chip_data - splite the grf setting of kind of chips + * @lcdsel_grf_reg: grf register offset of lcdc select + * @lcdsel_big: reg value of selecting vop big for eDP + * @lcdsel_lit: reg value of selecting vop little for eDP + * @chip_type: specific chip type + */ +struct rockchip_dp_chip_data { + u32 lcdsel_grf_reg; + u32 lcdsel_big; + u32 lcdsel_lit; + u32 chip_type; +}; struct rockchip_dp_device { struct drm_device *drm_dev; @@ -48,9 +64,12 @@ struct rockchip_dp_device { struct drm_display_mode mode; struct clk *pclk; + struct clk *grfclk; struct regmap *grf; struct reset_control *rst; + const struct rockchip_dp_chip_data *data; + struct analogix_dp_plat_data plat_data; }; @@ -77,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) ret = rockchip_dp_pre_init(dp); if (ret < 0) { dev_err(dp->dev, "failed to dp pre init %d\n", ret); + clk_disable_unprepare(dp->pclk); return ret; } @@ -92,6 +112,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data) return 0; } +static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data, + struct drm_connector *connector) +{ + struct drm_display_info *di = &connector->display_info; + /* VOP couldn't output YUV video format for eDP rightly */ + u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422; + + if ((di->color_formats & mask)) { + DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n"); + di->color_formats &= ~mask; + di->color_formats |= DRM_COLOR_FORMAT_RGB444; + di->bpc = 8; + } + + return 0; +} + static bool rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, @@ -119,17 +156,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder) return; if (ret) - val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16); + val = dp->data->lcdsel_lit; else - val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16); + val = dp->data->lcdsel_big; dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); - ret = regmap_write(dp->grf, GRF_SOC_CON6, val); - if (ret != 0) { - dev_err(dp->dev, "Could not write to GRF: %d\n", ret); + ret = clk_prepare_enable(dp->grfclk); + if (ret < 0) { + dev_err(dp->dev, "failed to enable grfclk %d\n", ret); return; } + + ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val); + if (ret != 0) + dev_err(dp->dev, "Could not write to GRF: %d\n", ret); + + clk_disable_unprepare(dp->grfclk); } static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder) @@ -143,22 +186,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + struct rockchip_dp_device *dp = to_dp(encoder); + int ret; /* - * FIXME(Yakir): driver should configure the CRTC output video - * mode with the display information which indicated the monitor - * support colorimetry. - * - * But don't know why the CRTC driver seems could only output the - * RGBaaa rightly. For example, if connect the "innolux,n116bge" - * eDP screen, EDID would indicated that screen only accepted the - * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP - * screen would show a blue picture (RGB888 show a green picture). - * But if I configure CTRC to RGBaaa, and eDP driver still keep - * RGB666 input video mode, then screen would works prefect. + * The hardware IC designed that VOP must output the RGB10 video + * format to eDP controller, and if eDP panel only support RGB8, + * then eDP controller should cut down the video data, not via VOP + * controller, that's why we need to hardcode the VOP output mode + * to RGA10 here. */ + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; s->output_type = DRM_MODE_CONNECTOR_eDP; + if (dp->data->chip_type == RK3399_EDP) { + /* + * For RK3399, VOP Lit must code the out mode to RGB888, + * VOP Big must code the out mode to RGB10. + */ + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, + encoder); + if (ret > 0) + s->output_mode = ROCKCHIP_OUT_MODE_P888; + } return 0; } @@ -192,6 +242,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp) return PTR_ERR(dp->grf); } + dp->grfclk = devm_clk_get(dev, "grf"); + if (PTR_ERR(dp->grfclk) == -ENOENT) { + dp->grfclk = NULL; + } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (IS_ERR(dp->grfclk)) { + dev_err(dev, "failed to get grf clock\n"); + return PTR_ERR(dp->grfclk); + } + dp->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(dp->pclk)) { dev_err(dev, "failed to get pclk property\n"); @@ -213,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp) ret = rockchip_dp_pre_init(dp); if (ret < 0) { dev_err(dp->dev, "failed to pre init %d\n", ret); + clk_disable_unprepare(dp->pclk); return ret; } @@ -246,6 +307,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, void *data) { struct rockchip_dp_device *dp = dev_get_drvdata(dev); + const struct rockchip_dp_chip_data *dp_data; struct drm_device *drm_dev = data; int ret; @@ -256,10 +318,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, */ dev_set_drvdata(dev, NULL); + dp_data = of_device_get_match_data(dev); + if (!dp_data) + return -ENODEV; + ret = rockchip_dp_init(dp); if (ret < 0) return ret; + dp->data = dp_data; dp->drm_dev = drm_dev; ret = rockchip_dp_drm_create_encoder(dp); @@ -270,9 +337,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.encoder = &dp->encoder; - dp->plat_data.dev_type = RK3288_DP; + dp->plat_data.dev_type = dp->data->chip_type; dp->plat_data.power_on = rockchip_dp_poweron; dp->plat_data.power_off = rockchip_dp_powerdown; + dp->plat_data.get_modes = rockchip_dp_get_modes; return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); } @@ -292,38 +360,33 @@ static int rockchip_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *panel_node, *port, *endpoint; + struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; - struct drm_panel *panel; port = of_graph_get_port_by_id(dev->of_node, 1); - if (!port) { - dev_err(dev, "can't find output port\n"); - return -EINVAL; - } - - endpoint = of_get_child_by_name(port, "endpoint"); - of_node_put(port); - if (!endpoint) { - dev_err(dev, "no output endpoint found\n"); - return -EINVAL; - } - - panel_node = of_graph_get_remote_port_parent(endpoint); - of_node_put(endpoint); - if (!panel_node) { - dev_err(dev, "no output node found\n"); - return -EINVAL; - } - - panel = of_drm_find_panel(panel_node); - if (!panel) { - DRM_ERROR("failed to find panel\n"); + if (port) { + endpoint = of_get_child_by_name(port, "endpoint"); + of_node_put(port); + if (!endpoint) { + dev_err(dev, "no output endpoint found\n"); + return -EINVAL; + } + + panel_node = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!panel_node) { + dev_err(dev, "no output node found\n"); + return -EINVAL; + } + + panel = of_drm_find_panel(panel_node); of_node_put(panel_node); - return -EPROBE_DEFER; + if (!panel) { + DRM_ERROR("failed to find panel\n"); + return -EPROBE_DEFER; + } } - of_node_put(panel_node); - dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); if (!dp) return -ENOMEM; @@ -349,24 +412,30 @@ static int rockchip_dp_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP -static int rockchip_dp_suspend(struct device *dev) -{ - return analogix_dp_suspend(dev); -} - -static int rockchip_dp_resume(struct device *dev) -{ - return analogix_dp_resume(dev); -} + .suspend = analogix_dp_suspend, + .resume_early = analogix_dp_resume, #endif +}; -static const struct dev_pm_ops rockchip_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume) +static const struct rockchip_dp_chip_data rk3399_edp = { + .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, + .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL), + .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL), + .chip_type = RK3399_EDP, +}; + +static const struct rockchip_dp_chip_data rk3288_dp = { + .lcdsel_grf_reg = RK3288_GRF_SOC_CON6, + .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL), + .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL), + .chip_type = RK3288_DP, }; static const struct of_device_id rockchip_dp_dt_ids[] = { - {.compatible = "rockchip,rk3288-dp",}, + {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp }, + {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp }, {} }; MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids); diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index dedc65b40f36..ca22e5ee89ca 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid( return mode_status; } -static struct drm_encoder *dw_mipi_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct dw_mipi_dsi *dsi = con_to_dsi(connector); - - return &dsi->encoder; -} - static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { .get_modes = dw_mipi_dsi_connector_get_modes, .mode_valid = dw_mipi_dsi_mode_valid, - .best_encoder = dw_mipi_dsi_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 801110f65a63..0665fb915579 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -15,7 +15,6 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_encoder_slave.h> #include <drm/bridge/dw_hdmi.h> #include "rockchip_drm_drv.h" diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f8b4feb60b25..006260de9dbd 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -inno_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct inno_hdmi *hdmi = to_inno_hdmi(connector); - - return &hdmi->encoder; -} - static int inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY) @@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = { static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { .get_modes = inno_hdmi_connector_get_modes, .mode_valid = inno_hdmi_connector_mode_valid, - .best_encoder = inno_hdmi_connector_best_encoder, }; static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a409d1f703cb..a822d49a255a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -19,11 +19,13 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/component.h> +#include <linux/console.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" @@ -37,6 +39,7 @@ #define DRIVER_MINOR 0 static bool is_support_iommu = true; +static struct drm_driver rockchip_drm_driver; /* * Attach a (component) device to the shared drm dma mapping from master drm @@ -76,7 +79,7 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc, int pipe = drm_crtc_index(crtc); struct rockchip_drm_private *priv = crtc->dev->dev_private; - if (pipe > ROCKCHIP_MAX_CRTC) + if (pipe >= ROCKCHIP_MAX_CRTC) return -EINVAL; priv->crtc_funcs[pipe] = crtc_funcs; @@ -89,7 +92,7 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) int pipe = drm_crtc_index(crtc); struct rockchip_drm_private *priv = crtc->dev->dev_private; - if (pipe > ROCKCHIP_MAX_CRTC) + if (pipe >= ROCKCHIP_MAX_CRTC) return; priv->crtc_funcs[pipe] = NULL; @@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, priv->crtc_funcs[pipe]->disable_vblank(crtc); } -static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) +static int rockchip_drm_bind(struct device *dev) { + struct drm_device *drm_dev; struct rockchip_drm_private *private; struct dma_iommu_mapping *mapping = NULL; - struct device *dev = drm_dev->dev; - struct drm_connector *connector; int ret; - private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); - if (!private) + drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); + if (!drm_dev) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); + dev_set_drvdata(dev, drm_dev); + + private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); + if (!private) { + ret = -ENOMEM; + goto err_free; + } drm_dev->dev_private = private; @@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_detach_device; - /* - * All components are now added, we can publish the connector sysfs - * entries to userspace. This will generate hotplug events and so - * userspace will expect to be able to access DRM at this point. - */ - list_for_each_entry(connector, &drm_dev->mode_config.connector_list, - head) { - ret = drm_connector_register(connector); - if (ret) { - dev_err(drm_dev->dev, - "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", - connector->base.id, - connector->name, ret); - goto err_unbind; - } - } - /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_vblank_cleanup; + ret = drm_dev_register(drm_dev, 0); + if (ret) + goto err_fbdev_fini; + if (is_support_iommu) arm_iommu_release_mapping(mapping); return 0; +err_fbdev_fini: + rockchip_drm_fbdev_fini(drm_dev); err_vblank_cleanup: drm_vblank_cleanup(drm_dev); err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); -err_unbind: component_unbind_all(dev, drm_dev); err_detach_device: if (is_support_iommu) @@ -240,12 +235,14 @@ err_release_mapping: err_config_cleanup: drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; +err_free: + drm_dev_unref(drm_dev); return ret; } -static int rockchip_drm_unload(struct drm_device *drm_dev) +static void rockchip_drm_unbind(struct device *dev) { - struct device *dev = drm_dev->dev; + struct drm_device *drm_dev = dev_get_drvdata(dev); rockchip_drm_fbdev_fini(drm_dev); drm_vblank_cleanup(drm_dev); @@ -255,32 +252,12 @@ static int rockchip_drm_unload(struct drm_device *drm_dev) arm_iommu_detach_device(dev); drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; - - return 0; -} - -static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct rockchip_drm_private *priv = crtc->dev->dev_private; - int pipe = drm_crtc_index(crtc); - - if (pipe < ROCKCHIP_MAX_CRTC && - priv->crtc_funcs[pipe] && - priv->crtc_funcs[pipe]->cancel_pending_vblank) - priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); -} - -static void rockchip_drm_preclose(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); + drm_dev_unregister(drm_dev); + drm_dev_unref(drm_dev); + dev_set_drvdata(dev, NULL); } -void rockchip_drm_lastclose(struct drm_device *dev) +static void rockchip_drm_lastclose(struct drm_device *dev) { struct rockchip_drm_private *priv = dev->dev_private; @@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = { .release = drm_release, }; -const struct vm_operations_struct rockchip_drm_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = rockchip_drm_load, - .unload = rockchip_drm_unload, - .preclose = rockchip_drm_preclose, .lastclose = rockchip_drm_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rockchip_drm_crtc_enable_vblank, .disable_vblank = rockchip_drm_crtc_disable_vblank, - .gem_vm_ops = &rockchip_drm_vm_ops, - .gem_free_object = rockchip_gem_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .gem_free_object_unlocked = rockchip_gem_free_object, .dumb_create = rockchip_gem_dumb_create, .dumb_map_offset = rockchip_gem_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, @@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = { }; #ifdef CONFIG_PM_SLEEP -static int rockchip_drm_sys_suspend(struct device *dev) +void rockchip_drm_fb_suspend(struct drm_device *drm) { - struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; + struct rockchip_drm_private *priv = drm->dev_private; - if (!drm) - return 0; + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 1); + console_unlock(); +} - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int old_dpms = connector->dpms; +void rockchip_drm_fb_resume(struct drm_device *drm) +{ + struct rockchip_drm_private *priv = drm->dev_private; - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 0); + console_unlock(); +} - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; +static int rockchip_drm_sys_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct rockchip_drm_private *priv = drm->dev_private; + + drm_kms_helper_poll_disable(drm); + rockchip_drm_fb_suspend(drm); + + priv->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(priv->state)) { + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); + return PTR_ERR(priv->state); } - drm_modeset_unlock_all(drm); return 0; } @@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev) static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; - enum drm_connector_status status; - bool changed = false; - - if (!drm) - return 0; + struct rockchip_drm_private *priv = drm->dev_private; - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int desired_mode = connector->dpms; - - /* - * at suspend time, we save dpms to connector->dpms, - * restore the old_dpms, and at current time, the connector - * dpms status must be DRM_MODE_DPMS_OFF. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - - /* - * If the connector has been disconnected during suspend, - * disconnect it from the encoder and leave it off. We'll notify - * userspace at the end. - */ - if (desired_mode == DRM_MODE_DPMS_ON) { - status = connector->funcs->detect(connector, true); - if (status == connector_status_disconnected) { - connector->encoder = NULL; - connector->status = status; - changed = true; - continue; - } - } - if (connector->funcs->dpms) - connector->funcs->dpms(connector, desired_mode); - } - drm_modeset_unlock_all(drm); - - drm_helper_resume_force_mode(drm); - - if (changed) - drm_kms_helper_hotplug_event(drm); + drm_atomic_helper_resume(drm, priv->state); + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); return 0; } @@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev, } } -static int rockchip_drm_bind(struct device *dev) -{ - struct drm_device *drm; - int ret; - - drm = drm_dev_alloc(&rockchip_drm_driver, dev); - if (!drm) - return -ENOMEM; - - ret = drm_dev_register(drm, 0); - if (ret) - goto err_free; - - dev_set_drvdata(dev, drm); - - return 0; - -err_free: - drm_dev_unref(drm); - return ret; -} - -static void rockchip_drm_unbind(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - - drm_dev_unregister(drm); - drm_dev_unref(drm); - dev_set_drvdata(dev, NULL); -} - static const struct component_master_ops rockchip_drm_ops = { .bind = rockchip_drm_bind, .unbind = rockchip_drm_unbind, @@ -518,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) is_support_iommu = false; } + of_node_put(iommu); component_match_add(dev, &match, compare_of, port->parent); of_node_put(port); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 56f43a364c7f..ea3932940061 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -40,14 +40,6 @@ struct rockchip_crtc_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); void (*wait_for_update)(struct drm_crtc *crtc); - void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv); -}; - -struct rockchip_atomic_commit { - struct work_struct work; - struct drm_atomic_state *state; - struct drm_device *dev; - struct mutex lock; }; struct rockchip_crtc_state { @@ -68,11 +60,9 @@ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; - - struct rockchip_atomic_commit commit; + struct drm_atomic_state *state; }; -void rockchip_drm_atomic_work(struct work_struct *work); int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs); void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 755cfdba61cd..55c52734c52d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc_helper.h> #include "rockchip_drm_drv.h" +#include "rockchip_drm_fb.h" #include "rockchip_drm_gem.h" #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) @@ -43,14 +44,10 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) { struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); - struct drm_gem_object *obj; int i; - for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) { - obj = rockchip_fb->obj[i]; - if (obj) - drm_gem_object_unreference_unlocked(obj); - } + for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) + drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(rockchip_fb); @@ -228,87 +225,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat } static void -rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) +rockchip_atomic_commit_tail(struct drm_atomic_state *state) { - struct drm_atomic_state *state = commit->state; - struct drm_device *dev = commit->dev; + struct drm_device *dev = state->dev; - /* - * TODO: do fence wait here. - */ - - /* - * Rockchip crtc support runtime PM, can't update display planes - * when crtc is disabled. - * - * drm_atomic_helper_commit comments detail that: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * - * drm_atomic_helper_commit_modeset_enables(dev, state); - * - * drm_atomic_helper_commit_planes(dev, state, true); - * - * See the kerneldoc entries for these three functions for more details. - */ drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_hw_done(state); + rockchip_atomic_wait_for_complete(dev, state); drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); -} - -void rockchip_drm_atomic_work(struct work_struct *work) -{ - struct rockchip_atomic_commit *commit = container_of(work, - struct rockchip_atomic_commit, work); - - rockchip_atomic_commit_complete(commit); } -int rockchip_drm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct rockchip_atomic_commit *commit = &private->commit; - int ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - /* serialize outstanding nonblocking commits */ - mutex_lock(&commit->lock); - flush_work(&commit->work); - - drm_atomic_helper_swap_state(dev, state); - - commit->dev = dev; - commit->state = state; - - if (nonblock) - schedule_work(&commit->work); - else - rockchip_atomic_commit_complete(commit); - - mutex_unlock(&commit->lock); - - return 0; -} +static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = { + .atomic_commit_tail = rockchip_atomic_commit_tail, +}; static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { .fb_create = rockchip_user_fb_create, .output_poll_changed = rockchip_drm_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = rockchip_drm_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; struct drm_framebuffer * @@ -339,4 +281,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev) dev->mode_config.max_height = 4096; dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; + dev->mode_config.helper_private = &rockchip_mode_config_helpers; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index f261512bb4a0..207e01de6e32 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fbi->screen_size = rk_obj->base.size; fbi->fix.smem_len = rk_obj->base.size; - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", fb->width, fb->height, fb->depth, rk_obj->kvaddr, offset, size); @@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev) goto err_drm_fb_helper_fini; } - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); if (ret < 0) { dev_err(dev->dev, "Failed to set initial hw config - %d.\n", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 9c2d8a894093..059e902f872d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, &rk_obj->dma_addr, GFP_KERNEL, &rk_obj->dma_attrs); if (!rk_obj->kvaddr) { - DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); + DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); return -ENOMEM; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 1c4d5b5a70a2..91305eb7d312 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -98,7 +98,9 @@ struct vop_win { const struct vop_win_data *data; struct vop *vop; - struct vop_plane_state state; + /* protected by dev->event_lock */ + bool enable; + dma_addr_t yrgb_mst; }; struct vop { @@ -112,6 +114,8 @@ struct vop { bool vsync_work_pending; struct completion dsp_hold_completion; struct completion wait_update_complete; + + /* protected by dev->event_lock */ struct drm_pending_vblank_event *event; const struct vop_data *data; @@ -324,9 +328,9 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, scl_cal_scale2(src_h, dst_h)); if (is_yuv) { VOP_SCL_SET(vop, win, scale_cbcr_x, - scl_cal_scale2(src_w, dst_w)); + scl_cal_scale2(cbcr_src_w, dst_w)); VOP_SCL_SET(vop, win, scale_cbcr_y, - scl_cal_scale2(src_h, dst_h)); + scl_cal_scale2(cbcr_src_h, dst_h)); } return; } @@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int ret; - if (vop->is_enabled) - return; - ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); @@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int i; - if (!vop->is_enabled) - return; + WARN_ON(vop->event); /* * We need to make sure that all windows are disabled before we @@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc) clk_disable(vop->aclk); clk_disable(vop->hclk); pm_runtime_put(vop->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } } static void vop_plane_destroy(struct drm_plane *plane) @@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, ret = drm_plane_helper_check_update(plane, crtc, state->fb, src, dest, &clip, + state->rotation, min_scale, max_scale, true, true, &visible); @@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, if (!old_state->crtc) return; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = false; + vop_win->yrgb_mst = 0; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, enable, 0); @@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, /* * can't update plane when vop is disabled. */ - if (!crtc) + if (WARN_ON(!crtc)) return; if (WARN_ON(!vop->is_enabled)) @@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane, offset += (src->y1 >> 16) * fb->pitches[0]; vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = true; + vop_win->yrgb_mst = vop_plane_state->yrgb_mst; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, vop_plane_state->format); @@ -779,7 +798,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = { .atomic_disable = vop_plane_atomic_disable, }; -void vop_atomic_plane_reset(struct drm_plane *plane) +static void vop_atomic_plane_reset(struct drm_plane *plane) { struct vop_plane_state *vop_plane_state = to_vop_plane_state(plane->state); @@ -796,7 +815,7 @@ void vop_atomic_plane_reset(struct drm_plane *plane) plane->state->plane = plane; } -struct drm_plane_state * +static struct drm_plane_state * vop_atomic_plane_duplicate_state(struct drm_plane *plane) { struct vop_plane_state *old_vop_plane_state; @@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc) WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); } -static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct drm_device *drm = crtc->dev; - struct vop *vop = to_vop(crtc); - struct drm_pending_vblank_event *e; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - e = vop->event; - if (e && e->base.file_priv == file_priv) { - vop->event = NULL; - - e->base.destroy(&e->base); - file_priv->event_space += sizeof(e->event); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static const struct rockchip_crtc_funcs private_crtc_funcs = { .enable_vblank = vop_crtc_enable_vblank, .disable_vblank = vop_crtc_disable_vblank, .wait_for_update = vop_crtc_wait_for_update, - .cancel_pending_vblank = vop_crtc_cancel_pending_vblank, }; static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, @@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc) u16 vact_end = vact_st + vdisplay; uint32_t val; + WARN_ON(vop->event); + vop_enable(crtc); /* * If dclk rate is zero, mean that scanout is stop, @@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, { struct vop *vop = to_vop(crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); + WARN_ON(vop->event); vop->event = crtc->state->event; crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { @@ -1048,6 +1052,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); } +static void vop_crtc_reset(struct drm_crtc *crtc) +{ + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc->state); + kfree(crtc->state); + + crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL); + if (crtc->state) + crtc->state->crtc = crtc; +} + static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) { struct rockchip_crtc_state *rockchip_state; @@ -1073,23 +1088,21 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .destroy = vop_crtc_destroy, - .reset = drm_atomic_helper_crtc_reset, + .reset = vop_crtc_reset, .atomic_duplicate_state = vop_crtc_duplicate_state, .atomic_destroy_state = vop_crtc_destroy_state, }; static bool vop_win_pending_is_complete(struct vop_win *vop_win) { - struct drm_plane *plane = &vop_win->base; - struct vop_plane_state *state = to_vop_plane_state(plane->state); dma_addr_t yrgb_mst; - if (!state->enable) + if (!vop_win->enable) return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); - return yrgb_mst == state->yrgb_mst; + return yrgb_mst == vop_win->yrgb_mst; } static void vop_handle_vblank(struct vop *vop) @@ -1104,15 +1117,16 @@ static void vop_handle_vblank(struct vop *vop) return; } + spin_lock_irqsave(&drm->event_lock, flags); if (vop->event) { - spin_lock_irqsave(&drm->event_lock, flags); drm_crtc_send_vblank_event(crtc, vop->event); drm_crtc_vblank_put(crtc); vop->event = NULL; - spin_unlock_irqrestore(&drm->event_lock, flags); } + spin_unlock_irqrestore(&drm->event_lock, flags); + if (!completion_done(&vop->wait_update_complete)) complete(&vop->wait_update_complete); } diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 3166b46a5893..919992cdc97e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -190,7 +190,7 @@ static const struct vop_data rk3288_vop = { .win_size = ARRAY_SIZE(rk3288_vop_win_data), }; -static const struct vop_scl_regs rk3066_win_scl = { +static const struct vop_scl_regs rk3036_win_scl = { .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), @@ -198,7 +198,7 @@ static const struct vop_scl_regs rk3066_win_scl = { }; static const struct vop_win_phy rk3036_win0_data = { - .scl = &rk3066_win_scl, + .scl = &rk3036_win_scl, .data_formats = formats_win_full, .nformats = ARRAY_SIZE(formats_win_full), .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), @@ -210,6 +210,7 @@ static const struct vop_win_phy rk3036_win0_data = { .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), + .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16), }; static const struct vop_win_phy rk3036_win1_data = { @@ -299,7 +300,7 @@ static int vop_remove(struct platform_device *pdev) return 0; } -struct platform_driver vop_platform_driver = { +static struct platform_driver vop_platform_driver = { .probe = vop_probe, .remove = vop_remove, .driver = { diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index 8d17d00ddb4b..c987c826daa3 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -6,7 +6,6 @@ config DRM_SHMOBILE select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_LCD_SUPPORT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 1e154fc779d5..6547b1db460a 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) scrtc->event = NULL; if (event) { drm_crtc_send_vblank_event(&scrtc->crtc, event); - drm_vblank_put(dev, 0); + drm_crtc_vblank_put(&scrtc->crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, if (event) { event->pipe = 0; - drm_vblank_get(dev, 0); + drm_crtc_vblank_get(&scrtc->crtc); spin_lock_irqsave(&dev->event_lock, flags); scrtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 7700ff172079..f0492603ea88 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = { | DRIVER_PRIME, .load = shmob_drm_load, .unload = shmob_drm_unload, - .set_busid = drm_platform_set_busid, .irq_handler = shmob_drm_irq, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = shmob_drm_enable_vblank, .disable_vblank = shmob_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 93ad8a5704d1..03defda77766 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, struct sis_file_private *file_priv = file->driver_priv; struct sis_memblock *entry, *next; - if (!(file->minor->master && file->master->lock.hw_lock)) + if (!(dev->master && file->master->lock.hw_lock)) return; drm_legacy_idlelock_take(&file->master->lock); diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 5ad43a1bb260..494ab257f77c 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -7,5 +7,6 @@ config DRM_STI select DRM_KMS_CMA_HELPER select DRM_PANEL select FW_LOADER + select SND_SOC_HDMI_CODEC if SND_SOC help Choose this option to enable DRM on STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c index a516eb869f6f..2da7d6866d5d 100644 --- a/drivers/gpu/drm/sti/sti_awg_utils.c +++ b/drivers/gpu/drm/sti/sti_awg_utils.c @@ -6,6 +6,8 @@ #include "sti_awg_utils.h" +#define AWG_DELAY (-5) + #define AWG_OPCODE_OFFSET 10 #define AWG_MAX_ARG 0x3ff @@ -125,7 +127,7 @@ static int awg_generate_line_signal( val = timing->blanking_level; ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams); - val = timing->trailing_pixels - 1; + val = timing->trailing_pixels - 1 + AWG_DELAY; ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams); } diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 3d2fa3ab33df..134201ecc6fd 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = { }, }; +int sti_compositor_debufs_init(struct sti_compositor *compo, + struct drm_minor *minor) +{ + int ret = 0, i; + + for (i = 0; compo->vid[i]; i++) { + ret = vid_debugfs_init(compo->vid[i], minor); + if (ret) + return ret; + } + + for (i = 0; compo->mixer[i]; i++) { + ret = sti_mixer_debugfs_init(compo->mixer[i], minor); + if (ret) + return ret; + } + + return 0; +} + static int sti_compositor_bind(struct device *dev, struct device *master, void *data) @@ -234,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev) } /* Get reset resources */ - compo->rst_main = devm_reset_control_get(dev, "compo-main"); + compo->rst_main = devm_reset_control_get_shared(dev, "compo-main"); /* Take compo main out of reset */ if (!IS_ERR(compo->rst_main)) reset_control_deassert(compo->rst_main); - compo->rst_aux = devm_reset_control_get(dev, "compo-aux"); + compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux"); /* Take compo aux out of reset */ if (!IS_ERR(compo->rst_aux)) reset_control_deassert(compo->rst_aux); @@ -247,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev) vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); if (vtg_np) compo->vtg_main = of_vtg_find(vtg_np); + of_node_put(vtg_np); vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); if (vtg_np) compo->vtg_aux = of_vtg_find(vtg_np); + of_node_put(vtg_np); platform_set_drvdata(pdev, compo); diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h index 1a4a73dab11e..24444ef42a98 100644 --- a/drivers/gpu/drm/sti/sti_compositor.h +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -81,4 +81,7 @@ struct sti_compositor { struct notifier_block vtg_vblank_nb; }; +int sti_compositor_debufs_init(struct sti_compositor *compo, + struct drm_minor *minor); + #endif diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index e04deedabd4a..c7d734dc3cf4 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -23,22 +23,11 @@ static void sti_crtc_enable(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); - struct device *dev = mixer->dev; - struct sti_compositor *compo = dev_get_drvdata(dev); DRM_DEBUG_DRIVER("\n"); mixer->status = STI_MIXER_READY; - /* Prepare and enable the compo IP clock */ - if (mixer->id == STI_MIXER_MAIN) { - if (clk_prepare_enable(compo->clk_compo_main)) - DRM_INFO("Failed to prepare/enable compo_main clk\n"); - } else { - if (clk_prepare_enable(compo->clk_compo_aux)) - DRM_INFO("Failed to prepare/enable compo_aux clk\n"); - } - drm_crtc_vblank_on(crtc); } @@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - struct clk *clk; + struct clk *compo_clk, *pix_clk; int rate = mode->clock * 1000; - int res; DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer), @@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - /* Set rate and prepare/enable pixel clock */ - if (mixer->id == STI_MIXER_MAIN) - clk = compo->clk_pix_main; - else - clk = compo->clk_pix_aux; + if (mixer->id == STI_MIXER_MAIN) { + compo_clk = compo->clk_compo_main; + pix_clk = compo->clk_pix_main; + } else { + compo_clk = compo->clk_compo_aux; + pix_clk = compo->clk_pix_aux; + } + + /* Prepare and enable the compo IP clock */ + if (clk_prepare_enable(compo_clk)) { + DRM_INFO("Failed to prepare/enable compositor clk\n"); + goto compo_error; + } - res = clk_set_rate(clk, rate); - if (res < 0) { + /* Set rate and prepare/enable pixel clock */ + if (clk_set_rate(pix_clk, rate) < 0) { DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate); - return -EINVAL; + goto pix_error; } - if (clk_prepare_enable(clk)) { + if (clk_prepare_enable(pix_clk)) { DRM_ERROR("Failed to prepare/enable pix clk\n"); - return -EINVAL; + goto pix_error; } sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &crtc->mode); - res = sti_mixer_active_video_area(mixer, &crtc->mode); - if (res) { + if (sti_mixer_active_video_area(mixer, &crtc->mode)) { DRM_ERROR("Can't set active video area\n"); - return -EINVAL; + goto mixer_error; } - return res; + return 0; + +mixer_error: + clk_disable_unprepare(pix_clk); +pix_error: + clk_disable_unprepare(compo_clk); +compo_error: + return -EINVAL; } static void sti_crtc_disable(struct drm_crtc *crtc) @@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc) static void sti_crtc_mode_set_nofb(struct drm_crtc *crtc) { - sti_crtc_enable(crtc); sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); } @@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, - .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = sti_crtc_mode_set_nofb, - .mode_set_base = drm_helper_crtc_mode_set_base, .atomic_begin = sti_crtc_atomic_begin, .atomic_flush = sti_crtc_atomic_flush, }; @@ -331,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe) } } +static int sti_crtc_late_register(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct sti_compositor *compo = dev_get_drvdata(mixer->dev); + + if (drm_crtc_index(crtc) == 0) + return sti_compositor_debufs_init(compo, crtc->dev->primary); + + return 0; +} + static const struct drm_crtc_funcs sti_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -339,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .late_register = sti_crtc_late_register, }; bool sti_crtc_is_main(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 4e990299735c..a263bbba4119 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&cursor->plane), cursor->regs); @@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(CUR_AWE); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { .atomic_disable = sti_cursor_atomic_disable, }; +static void sti_cursor_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_cursor_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_cursor *cursor = to_sti_cursor(plane); + + return cursor_debugfs_init(cursor, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_cursor_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_cursor_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_cursor_late_register, +}; + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, struct device *dev, int desc, void __iomem *baseaddr, @@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, possible_crtcs, - &sti_plane_helpers_funcs, + &sti_cursor_plane_helpers_funcs, cursor_supported_formats, ARRAY_SIZE(cursor_supported_formats), DRM_PLANE_TYPE_CURSOR, NULL); @@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); - if (cursor_debugfs_init(cursor, drm_dev->primary)) - DRM_ERROR("CURSOR debugfs setup failed\n"); - return &cursor->plane.drm_plane; err_plane: diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 872495e72294..96bd3d08b2d4 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) struct drm_info_node *node = s->private; struct drm_device *dev = node->minor->dev; struct drm_plane *p; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; list_for_each_entry(p, &dev->mode_config.plane_list, head) { struct sti_plane *plane = to_sti_plane(p); @@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) plane->fps_info.fips_str); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) sti_atomic_schedule(private, state); @@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm, return 0; } +static void sti_output_poll_changed(struct drm_device *ddev) +{ + struct sti_private *private = ddev->dev_private; + + if (!ddev->mode_config.num_connector) + return; + + if (private->fbdev) { + drm_fbdev_cma_hotplug_event(private->fbdev); + return; + } + + private->fbdev = drm_fbdev_cma_init(ddev, 32, + ddev->mode_config.num_crtc, + ddev->mode_config.num_connector); + if (IS_ERR(private->fbdev)) + private->fbdev = NULL; +} + static const struct drm_mode_config_funcs sti_mode_config_funcs = { .fb_create = drm_fb_cma_create, + .output_poll_changed = sti_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = sti_atomic_commit, }; @@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev) dev->mode_config.funcs = &sti_mode_config_funcs; } -static int sti_load(struct drm_device *dev, unsigned long flags) -{ - struct sti_private *private; - int ret; - - private = kzalloc(sizeof(*private), GFP_KERNEL); - if (!private) { - DRM_ERROR("Failed to allocate private\n"); - return -ENOMEM; - } - dev->dev_private = (void *)private; - private->drm_dev = dev; - - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, sti_atomic_work); - - drm_mode_config_init(dev); - drm_kms_helper_poll_init(dev); - - sti_mode_config_init(dev); - - ret = component_bind_all(dev->dev, dev); - if (ret) { - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - kfree(private); - return ret; - } - - drm_mode_config_reset(dev); - - drm_helper_disable_unused_functions(dev); - drm_fbdev_cma_init(dev, 32, - dev->mode_config.num_crtc, - dev->mode_config.num_connector); - - return 0; -} - static const struct file_operations sti_driver_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = { static struct drm_driver sti_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = sti_load, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, @@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } +static int sti_init(struct drm_device *ddev) +{ + struct sti_private *private; + + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + ddev->dev_private = (void *)private; + dev_set_drvdata(ddev->dev, ddev); + private->drm_dev = ddev; + + mutex_init(&private->commit.lock); + INIT_WORK(&private->commit.work, sti_atomic_work); + + drm_mode_config_init(ddev); + + sti_mode_config_init(ddev); + + drm_kms_helper_poll_init(ddev); + + return 0; +} + +static void sti_cleanup(struct drm_device *ddev) +{ + struct sti_private *private = ddev->dev_private; + + if (private->fbdev) { + drm_fbdev_cma_fini(private->fbdev); + private->fbdev = NULL; + } + + drm_kms_helper_poll_fini(ddev); + drm_vblank_cleanup(ddev); + kfree(private); + ddev->dev_private = NULL; +} + static int sti_bind(struct device *dev) { - return drm_platform_init(&sti_driver, to_platform_device(dev)); + struct drm_device *ddev; + int ret; + + ddev = drm_dev_alloc(&sti_driver, dev); + if (!ddev) + return -ENOMEM; + + ddev->platformdev = to_platform_device(dev); + + ret = sti_init(ddev); + if (ret) + goto err_drm_dev_unref; + + ret = component_bind_all(ddev->dev, ddev); + if (ret) + goto err_cleanup; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_register; + + drm_mode_config_reset(ddev); + + return 0; + +err_register: + drm_mode_config_cleanup(ddev); +err_cleanup: + sti_cleanup(ddev); +err_drm_dev_unref: + drm_dev_unref(ddev); + return ret; } static void sti_unbind(struct device *dev) { - drm_put_dev(dev_get_drvdata(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); + + drm_dev_unregister(ddev); + sti_cleanup(ddev); + drm_dev_unref(ddev); } static const struct component_master_ops sti_ops = { diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h index 30ddc20841c3..78ebe5e30f53 100644 --- a/drivers/gpu/drm/sti/sti_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h @@ -24,6 +24,7 @@ struct sti_private { struct sti_compositor *compo; struct drm_property *plane_zorder_property; struct drm_device *drm_dev; + struct drm_fbdev_cma *fbdev; struct { struct drm_atomic_state *state; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 25f76632002c..00881eb4536e 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); @@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector) -{ - struct sti_dvo_connector *dvo_connector - = to_sti_dvo_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return dvo_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { .get_modes = sti_dvo_connector_get_modes, .mode_valid = sti_dvo_connector_mode_valid, - .best_encoder = sti_dvo_best_encoder, }; static enum drm_connector_status @@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } -static void sti_dvo_connector_destroy(struct drm_connector *connector) +static int sti_dvo_late_register(struct drm_connector *connector) { struct sti_dvo_connector *dvo_connector = to_sti_dvo_connector(connector); + struct sti_dvo *dvo = dvo_connector->dvo; - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(dvo_connector); + if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) { + DRM_ERROR("DVO debugfs setup failed\n"); + return -EINVAL; + } + + return 0; } static const struct drm_connector_funcs sti_dvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_dvo_connector_detect, - .destroy = sti_dvo_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_dvo_late_register, }; static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev) @@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_dvo_connector_helper_funcs); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; } - if (dvo_debugfs_init(dvo, drm_dev->primary)) - DRM_ERROR("DVO debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: drm_bridge_remove(bridge); - drm_connector_cleanup(drm_connector); return -EINVAL; } @@ -602,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev) dvo->panel_node = of_parse_phandle(np, "sti,panel", 0); if (!dvo->panel_node) DRM_ERROR("No panel associated to the dvo output\n"); + of_node_put(dvo->panel_node); platform_set_drvdata(pdev, dvo); diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index ff33c38da197..bf63086a3dc8 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_plane *drm_plane = &gdp->plane.drm_plane; struct drm_crtc *crtc = drm_plane->crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&gdp->plane), gdp->regs); @@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data) seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; unsigned int b; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; for (b = 0; b < GDP_NODE_NB_BANK; b++) { seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); @@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) gdp_node_dump_node(s, gdp->node_list[b].btm_field); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { .atomic_disable = sti_gdp_atomic_disable, }; +static void sti_gdp_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_gdp_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_gdp *gdp = to_sti_gdp(plane); + + return gdp_debugfs_init(gdp, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_gdp_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_gdp_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_gdp_late_register, +}; + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, struct device *dev, int desc, void __iomem *baseaddr, @@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, possible_crtcs, - &sti_plane_helpers_funcs, + &sti_gdp_plane_helpers_funcs, gdp_supported_formats, ARRAY_SIZE(gdp_supported_formats), type, NULL); @@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, sti_plane_init_property(&gdp->plane, type); - if (gdp_debugfs_init(gdp, drm_dev->primary)) - DRM_ERROR("GDP debugfs setup failed\n"); - return &gdp->plane.drm_plane; err: diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index f7d3464cdf09..8505569f75de 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); DBGFS_DUMP(HDA_ANA_CFG); @@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector) -{ - struct sti_hda_connector *hda_connector - = to_sti_hda_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hda_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { .get_modes = sti_hda_connector_get_modes, .mode_valid = sti_hda_connector_mode_valid, - .best_encoder = sti_hda_best_encoder, }; static enum drm_connector_status @@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static void sti_hda_connector_destroy(struct drm_connector *connector) +static int sti_hda_late_register(struct drm_connector *connector) { struct sti_hda_connector *hda_connector = to_sti_hda_connector(connector); + struct sti_hda *hda = hda_connector->hda; + + if (hda_debugfs_init(hda, hda->drm_dev->primary)) { + DRM_ERROR("HDA debugfs setup failed\n"); + return -EINVAL; + } - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(hda_connector); + return 0; } static const struct drm_connector_funcs sti_hda_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hda_connector_detect, - .destroy = sti_hda_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_hda_late_register, }; static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) @@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_hda_connector_helper_funcs); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); @@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) /* force to disable hd dacs at startup */ hda_enable_hd_dacs(hda, false); - if (hda_debugfs_init(hda, drm_dev->primary)) - DRM_ERROR("HDA debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: - drm_connector_cleanup(drm_connector); + drm_bridge_remove(bridge); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 6ef0715bd5b9..fedc17f98d9b 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -18,6 +18,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <sound/hdmi-codec.h> + #include "sti_hdmi.h" #include "sti_hdmi_tx3g4c28phy.h" #include "sti_hdmi_tx3g0c55phy.h" @@ -35,6 +37,8 @@ #define HDMI_DFLT_CHL0_DAT 0x0110 #define HDMI_DFLT_CHL1_DAT 0x0114 #define HDMI_DFLT_CHL2_DAT 0x0118 +#define HDMI_AUDIO_CFG 0x0200 +#define HDMI_SPDIF_FIFO_STATUS 0x0204 #define HDMI_SW_DI_1_HEAD_WORD 0x0210 #define HDMI_SW_DI_1_PKT_WORD0 0x0214 #define HDMI_SW_DI_1_PKT_WORD1 0x0218 @@ -44,6 +48,9 @@ #define HDMI_SW_DI_1_PKT_WORD5 0x0228 #define HDMI_SW_DI_1_PKT_WORD6 0x022C #define HDMI_SW_DI_CFG 0x0230 +#define HDMI_SAMPLE_FLAT_MASK 0x0244 +#define HDMI_AUDN 0x0400 +#define HDMI_AUD_CTS 0x0404 #define HDMI_SW_DI_2_HEAD_WORD 0x0600 #define HDMI_SW_DI_2_PKT_WORD0 0x0604 #define HDMI_SW_DI_2_PKT_WORD1 0x0608 @@ -103,6 +110,7 @@ #define HDMI_INT_DLL_LCK BIT(5) #define HDMI_INT_NEW_FRAME BIT(6) #define HDMI_INT_GENCTRL_PKT BIT(7) +#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8) #define HDMI_INT_SINK_TERM_PRESENT BIT(11) #define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \ @@ -111,6 +119,7 @@ | HDMI_INT_GLOBAL) #define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \ + | HDMI_INT_AUDIO_FIFO_XRUN \ | HDMI_INT_GENCTRL_PKT \ | HDMI_INT_NEW_FRAME \ | HDMI_INT_DLL_LCK \ @@ -121,6 +130,27 @@ #define HDMI_STA_SW_RST BIT(1) +#define HDMI_AUD_CFG_8CH BIT(0) +#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1) +#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2) +#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2)) +#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12) +#define HDMI_AUD_CFG_DTS_INVALID BIT(16) +#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21)) +#define HDMI_AUD_CFG_CH12_VALID BIT(28) +#define HDMI_AUD_CFG_CH34_VALID BIT(29) +#define HDMI_AUD_CFG_CH56_VALID BIT(30) +#define HDMI_AUD_CFG_CH78_VALID BIT(31) + +/* sample flat mask */ +#define HDMI_SAMPLE_FLAT_NO 0 +#define HDMI_SAMPLE_FLAT_SP0 BIT(0) +#define HDMI_SAMPLE_FLAT_SP1 BIT(1) +#define HDMI_SAMPLE_FLAT_SP2 BIT(2) +#define HDMI_SAMPLE_FLAT_SP3 BIT(3) +#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\ + HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3) + #define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) #define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) #define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) @@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg) wake_up_interruptible(&hdmi->wait_event); } + /* Audio FIFO underrun IRQ */ + if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN) + DRM_INFO("Warning: audio FIFO underrun occurs!"); + return IRQ_HANDLED; } @@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) */ static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) { - struct hdmi_audio_infoframe infofame; + struct hdmi_audio_params *audio = &hdmi->audio; u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; - int ret; - - ret = hdmi_audio_infoframe_init(&infofame); - if (ret < 0) { - DRM_ERROR("failed to setup audio infoframe: %d\n", ret); - return ret; - } - - infofame.channels = 2; - - ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); - if (ret < 0) { - DRM_ERROR("failed to pack audio infoframe: %d\n", ret); - return ret; + int ret, val; + + DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__, + audio->enabled ? "enable" : "disable"); + if (audio->enabled) { + /* set audio parameters stored*/ + ret = hdmi_audio_infoframe_pack(&audio->cea, buffer, + sizeof(buffer)); + if (ret < 0) { + DRM_ERROR("failed to pack audio infoframe: %d\n", ret); + return ret; + } + hdmi_infoframe_write_infopack(hdmi, buffer, ret); + } else { + /*disable audio info frame transmission */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, + HDMI_IFRAME_SLOT_AUDIO); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); } - hdmi_infoframe_write_infopack(hdmi, buffer, ret); - return 0; } @@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); DBGFS_DUMP("\n", HDMI_CFG); @@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP("", HDMI_SW_DI_CFG); hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG)); + DBGFS_DUMP("\n", HDMI_AUDIO_CFG); + DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS); + DBGFS_DUMP("\n", HDMI_AUDN); + seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):", HDMI_IFRAME_SLOT_AVI); DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI); @@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) count = drm_add_edid_modes(connector, edid); drm_mode_connector_update_edid_property(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return count; @@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector) -{ - struct sti_hdmi_connector *hdmi_connector - = to_sti_hdmi_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hdmi_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { .get_modes = sti_hdmi_connector_get_modes, .mode_valid = sti_hdmi_connector_mode_valid, - .best_encoder = sti_hdmi_best_encoder, }; /* get detection status of display device */ @@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } -static void sti_hdmi_connector_destroy(struct drm_connector *connector) -{ - struct sti_hdmi_connector *hdmi_connector - = to_sti_hdmi_connector(connector); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(hdmi_connector); -} - static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, struct drm_connector *connector) { @@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector, return -EINVAL; } +static int sti_hdmi_late_register(struct drm_connector *connector) +{ + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + struct sti_hdmi *hdmi = hdmi_connector->hdmi; + + if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) { + DRM_ERROR("HDMI debugfs setup failed\n"); + return -EINVAL; + } + + return 0; +} + static const struct drm_connector_funcs sti_hdmi_connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hdmi_connector_detect, - .destroy = sti_hdmi_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .set_property = drm_atomic_helper_connector_set_property, .atomic_set_property = sti_hdmi_connector_set_property, .atomic_get_property = sti_hdmi_connector_get_property, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_hdmi_late_register, }; static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) @@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) return NULL; } +/** + * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent + * clocks. None-coherent clocks means that audio and TMDS clocks have not the + * same source (drifts between clocks). In this case assumption is that CTS is + * automatically calculated by hardware. + * + * @audio_fs: audio frame clock frequency in Hz + * + * Values computed are based on table described in HDMI specification 1.4b + * + * Returns n value. + */ +static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs) +{ + unsigned int n; + + switch (audio_fs) { + case 32000: + n = 4096; + break; + case 44100: + n = 6272; + break; + case 48000: + n = 6144; + break; + case 88200: + n = 6272 * 2; + break; + case 96000: + n = 6144 * 2; + break; + case 176400: + n = 6272 * 4; + break; + case 192000: + n = 6144 * 4; + break; + default: + /* Not pre-defined, recommended value: 128 * fs / 1000 */ + n = (audio_fs * 128) / 1000; + } + + return n; +} + +static int hdmi_audio_configure(struct sti_hdmi *hdmi, + struct hdmi_audio_params *params) +{ + int audio_cfg, n; + struct hdmi_audio_infoframe *info = ¶ms->cea; + + DRM_DEBUG_DRIVER("\n"); + + if (!hdmi->enabled) + return 0; + + /* update N parameter */ + n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate); + + DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n", + params->sample_rate, hdmi->mode.clock * 1000, n); + hdmi_write(hdmi, n, HDMI_AUDN); + + /* update HDMI registers according to configuration */ + audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | + HDMI_AUD_CFG_ONE_BIT_INVALID; + + switch (info->channels) { + case 8: + audio_cfg |= HDMI_AUD_CFG_CH78_VALID; + case 6: + audio_cfg |= HDMI_AUD_CFG_CH56_VALID; + case 4: + audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; + case 2: + audio_cfg |= HDMI_AUD_CFG_CH12_VALID; + break; + default: + DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n", + info->channels); + return -EINVAL; + } + + hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); + + hdmi->audio = *params; + + return hdmi_audio_infoframe_config(hdmi); +} + +static void hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + int audio_cfg; + + DRM_DEBUG_DRIVER("\n"); + + /* disable audio */ + audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | + HDMI_AUD_CFG_ONE_BIT_INVALID; + hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); + + hdmi->audio.enabled = 0; + hdmi_audio_infoframe_config(hdmi); +} + +static int hdmi_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + int ret; + struct hdmi_audio_params audio = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .cea = params->cea, + }; + + DRM_DEBUG_DRIVER("\n"); + + if (!hdmi->enabled) + return 0; + + if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv || + daifmt->frame_clk_inv || daifmt->bit_clk_master || + daifmt->frame_clk_master) { + dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__, + daifmt->bit_clk_inv, daifmt->frame_clk_inv, + daifmt->bit_clk_master, + daifmt->frame_clk_master); + return -EINVAL; + } + + audio.enabled = 1; + + ret = hdmi_audio_configure(hdmi, &audio); + if (ret < 0) + return ret; + + return 0; +} + +static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + + DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable"); + + if (enable) + hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK); + else + hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK); + + return 0; +} + +static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + struct drm_connector *connector = hdmi->drm_connector; + + DRM_DEBUG_DRIVER("\n"); + memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops audio_codec_ops = { + .hw_params = hdmi_audio_hw_params, + .audio_shutdown = hdmi_audio_shutdown, + .digital_mute = hdmi_audio_digital_mute, + .get_eld = hdmi_audio_get_eld, +}; + +static int sti_hdmi_register_audio_driver(struct device *dev, + struct sti_hdmi *hdmi) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, + }; + + DRM_DEBUG_DRIVER("\n"); + + hdmi->audio.enabled = 0; + + hdmi->audio_pdev = platform_device_register_data( + dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev)); + + return 0; +} + static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) { struct sti_hdmi *hdmi = dev_get_drvdata(dev); @@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) /* initialise property */ sti_hdmi_connector_init_property(drm_dev, drm_connector); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; + hdmi->drm_connector = drm_connector; err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { @@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) goto err_sysfs; } + err = sti_hdmi_register_audio_driver(dev, hdmi); + if (err) { + DRM_ERROR("Failed to attach an audio codec\n"); + goto err_sysfs; + } + + /* Initialize audio infoframe */ + err = hdmi_audio_infoframe_init(&hdmi->audio.cea); + if (err) { + DRM_ERROR("Failed to init audio infoframe\n"); + goto err_sysfs; + } + /* Enable default interrupts */ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); - if (hdmi_debugfs_init(hdmi, drm_dev->primary)) - DRM_ERROR("HDMI debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: - drm_connector_cleanup(drm_connector); - + drm_bridge_remove(bridge); + hdmi->drm_connector = NULL; return -EINVAL; } @@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev) struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); i2c_put_adapter(hdmi->ddc_adapt); + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); component_del(&pdev->dev, &sti_hdmi_ops); return 0; diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index ef3a94583bbd..119bc3582ac7 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -23,6 +23,13 @@ struct hdmi_phy_ops { void (*stop)(struct sti_hdmi *hdmi); }; +struct hdmi_audio_params { + bool enabled; + unsigned int sample_width; + unsigned int sample_rate; + struct hdmi_audio_infoframe cea; +}; + /* values for the framing mode property */ enum sti_hdmi_modes { HDMI_MODE_HDMI, @@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = { * @ddc_adapt: i2c ddc adapter * @colorspace: current colorspace selected * @hdmi_mode: select framing for HDMI or DVI + * @audio_pdev: ASoC hdmi-codec platform device + * @audio: hdmi audio parameters. + * @drm_connector: hdmi connector */ struct sti_hdmi { struct device dev; @@ -89,6 +99,9 @@ struct sti_hdmi { struct i2c_adapter *ddc_adapt; enum hdmi_colorspace colorspace; enum sti_hdmi_modes hdmi_mode; + struct platform_device *audio_pdev; + struct hdmi_audio_params audio; + struct drm_connector *drm_connector; }; u32 hdmi_read(struct sti_hdmi *hdmi, int offset); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 1edec29b9e45..b03232247966 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; int cmd, cmd_offset, infoxp70; void *virt; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&hqvdp->plane), hqvdp->regs); @@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { .atomic_disable = sti_hqvdp_atomic_disable, }; +static void sti_hqvdp_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_hqvdp_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); + + return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_hqvdp_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_hqvdp_late_register, +}; + static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, struct device *dev, int desc) { @@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, sti_hqvdp_init(hqvdp); res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, - &sti_plane_helpers_funcs, + &sti_hqvdp_plane_helpers_funcs, hqvdp_supported_formats, ARRAY_SIZE(hqvdp_supported_formats), DRM_PLANE_TYPE_OVERLAY, NULL); @@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); - if (hqvdp_debugfs_init(hqvdp, drm_dev->primary)) - DRM_ERROR("HQVDP debugfs setup failed\n"); - return &hqvdp->plane.drm_plane; } @@ -1346,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev) vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); if (vtg_np) hqvdp->vtg = of_vtg_find(vtg_np); + of_node_put(vtg_np); platform_set_drvdata(pdev, hqvdp); diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index aed7801b51f7..1885c7ab5a8b 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_mixer_to_str(mixer), mixer->regs); @@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = { { "mixer_aux", mixer_dbg_show, 0, NULL }, }; -static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) +int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) { unsigned int i; struct drm_info_list *mixer_debugfs_files; @@ -400,8 +393,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev, DRM_DEBUG_DRIVER("%s created. Regs=%p\n", sti_mixer_to_str(mixer), mixer->regs); - if (mixer_debugfs_init(mixer, drm_dev->primary)) - DRM_ERROR("MIXER debugfs setup failed\n"); - return mixer; } diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index 6f35fc086873..830a3c42d886 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer, void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); +int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); + /* depth in Cross-bar control = z order */ #define GAM_MIXER_NB_DEPTH_LEVEL 6 diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c index f10c98d3f012..0cf3335ef37c 100644 --- a/drivers/gpu/drm/sti/sti_plane.c +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane) #define STI_FPS_INTERVAL_MS 3000 -static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs) -{ - struct timespec tmp_ts = timespec_sub(lhs, rhs); - u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts); - - do_div(tmp_ns, NSEC_PER_MSEC); - - return (u32)tmp_ns; -} - void sti_plane_update_fps(struct sti_plane *plane, bool new_frame, bool new_field) { - struct timespec now; + ktime_t now; struct sti_fps_info *fps; int fpks, fipks, ms_since_last, num_frames, num_fields; - getrawmonotonic(&now); + now = ktime_get(); /* Compute number of frame updates */ fps = &plane->fps_info; @@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane, return; fps->curr_frame_counter++; - ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp); + ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp)); num_frames = fps->curr_frame_counter - fps->last_frame_counter; if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS) @@ -106,17 +96,9 @@ void sti_plane_update_fps(struct sti_plane *plane, plane->fps_info.fips_str); } -static void sti_plane_destroy(struct drm_plane *drm_plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_helper_disable(drm_plane); - drm_plane_cleanup(drm_plane); -} - -static int sti_plane_set_property(struct drm_plane *drm_plane, - struct drm_property *property, - uint64_t val) +int sti_plane_set_property(struct drm_plane *drm_plane, + struct drm_property *property, + uint64_t val) { struct drm_device *dev = drm_plane->dev; struct sti_private *private = dev->dev_private; @@ -170,13 +152,3 @@ void sti_plane_init_property(struct sti_plane *plane, plane->drm_plane.base.id, sti_plane_to_str(plane), plane->zorder); } - -struct drm_plane_funcs sti_plane_helpers_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_plane_destroy, - .set_property = sti_plane_set_property, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h index c50a3b9f5d37..e0ea1dd3bb88 100644 --- a/drivers/gpu/drm/sti/sti_plane.h +++ b/drivers/gpu/drm/sti/sti_plane.h @@ -11,8 +11,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> -extern struct drm_plane_funcs sti_plane_helpers_funcs; - #define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) #define STI_PLANE_TYPE_SHIFT 8 @@ -57,7 +55,7 @@ struct sti_fps_info { unsigned int last_frame_counter; unsigned int curr_field_counter; unsigned int last_field_counter; - struct timespec last_timestamp; + ktime_t last_timestamp; char fps_str[FPS_LENGTH]; char fips_str[FPS_LENGTH]; }; @@ -83,6 +81,11 @@ const char *sti_plane_to_str(struct sti_plane *plane); void sti_plane_update_fps(struct sti_plane *plane, bool new_frame, bool new_field); + +int sti_plane_set_property(struct drm_plane *drm_plane, + struct drm_property *property, + uint64_t val); + void sti_plane_init_property(struct sti_plane *plane, enum drm_plane_type type); #endif diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index f983db5a59da..e25995b35715 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -112,6 +112,7 @@ struct sti_tvout { struct drm_encoder *hdmi; struct drm_encoder *hda; struct drm_encoder *dvo; + bool debugfs_registered; }; struct sti_tvout_encoder { @@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_crtc *crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); @@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder) kfree(sti_encoder); } +static int sti_tvout_late_register(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + int ret; + + if (tvout->debugfs_registered) + return 0; + + ret = tvout_debugfs_init(tvout, encoder->dev->primary); + if (ret) + return ret; + + tvout->debugfs_registered = true; + return 0; +} + +static void sti_tvout_early_unregister(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + if (!tvout->debugfs_registered) + return; + + tvout_debugfs_exit(tvout, encoder->dev->primary); + tvout->debugfs_registered = false; +} + static const struct drm_encoder_funcs sti_tvout_encoder_funcs = { .destroy = sti_tvout_encoder_destroy, + .late_register = sti_tvout_late_register, + .early_unregister = sti_tvout_early_unregister, }; static void sti_dvo_encoder_enable(struct drm_encoder *encoder) @@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data) sti_tvout_create_encoders(drm_dev, tvout); - if (tvout_debugfs_init(tvout, drm_dev->primary)) - DRM_ERROR("TVOUT debugfs setup failed\n"); - return 0; } @@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master, void *data) { struct sti_tvout *tvout = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; sti_tvout_destroy_encoders(tvout); - - tvout_debugfs_exit(tvout, drm_dev->primary); } static const struct component_ops sti_tvout_ops = { diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 523ed19f5ac6..47634a0251fc 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); @@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) DBGFS_DUMP(VID_CSAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = { { "vid", vid_dbg_show, 0, NULL }, }; -static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) +int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) { unsigned int i; @@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, sti_vid_init(vid); - if (vid_debugfs_init(vid, drm_dev->primary)) - DRM_ERROR("VID debugfs setup failed\n"); - return vid; } diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h index 6c842344f3d8..fdc90f922a05 100644 --- a/drivers/gpu/drm/sti/sti_vid.h +++ b/drivers/gpu/drm/sti/sti_vid.h @@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid); struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, int id, void __iomem *baseaddr); +int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); + #endif diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 6bf4ce466d20..0bdc385eec17 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -65,7 +65,7 @@ #define HDMI_DELAY (5) /* Delay introduced by the DVO in nb of pixel */ -#define DVO_DELAY (2) +#define DVO_DELAY (7) /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ #define AWG_DELAY_HD (-9) @@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev) np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); if (np) { vtg->slave = of_vtg_find(np); + of_node_put(np); if (!vtg->slave) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 41cacecbea9a..4a192210574f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct sun4i_drv *drv = scrtc->drv; + struct drm_pending_vblank_event *event = crtc->state->event; DRM_DEBUG_DRIVER("Committing plane changes\n"); sun4i_backend_commit(drv->backend); + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void sun4i_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 937394cbc241..7092daaf6c43 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -75,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = { .dumb_create = drm_gem_cma_dumb_create, .dumb_destroy = drm_gem_dumb_destroy, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, /* PRIME Operations */ @@ -123,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev) if (!drm) return -ENOMEM; - ret = drm_dev_set_unique(drm, dev_name(drm->dev)); - if (ret) - goto free_drm; - drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) { ret = -ENOMEM; @@ -178,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto free_drm; - ret = drm_connector_register_all(drm); - if (ret) - goto unregister_drm; - return 0; -unregister_drm: - drm_dev_unregister(drm); free_drm: drm_dev_unref(drm); return ret; @@ -195,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - drm_connector_unregister_all(drm); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c index a0b30c216a5b..70688febd7ac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c +++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c @@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm) { struct sun4i_drv *drv = drm->dev_private; - if (drv->fbdev) - drm_fbdev_cma_hotplug_event(drv->fbdev); + drm_fbdev_cma_hotplug_event(drv->fbdev); } static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = { diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index aaffe9e64ffb..f5bbac6efb4c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -104,19 +104,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_rgb_best_encoder(struct drm_connector *connector) -{ - struct sun4i_rgb *rgb = - drm_connector_to_sun4i_rgb(connector); - - return &rgb->encoder; -} - static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { .get_modes = sun4i_rgb_get_modes, .mode_valid = sun4i_rgb_mode_valid, - .best_encoder = sun4i_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index bc047f923508..b84147896294 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_tv_comp_best_encoder(struct drm_connector *connector) -{ - struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector); - - return &tv->encoder; -} - static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { .get_modes = sun4i_tv_comp_get_modes, .mode_valid = sun4i_tv_comp_mode_valid, - .best_encoder = sun4i_tv_comp_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 39940f5b7c91..8495bd01b544 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/iommu.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> #include <soc/tegra/pmc.h> @@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) tegra_dc_stats_reset(&dc->stats); drm_crtc_vblank_off(crtc); + + pm_runtime_put_sync(dc->dev); } static void tegra_crtc_enable(struct drm_crtc *crtc) @@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc) struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; + pm_runtime_get_sync(dc->dev); + + /* initialize display controller */ + if (dc->syncpt) { + u32 syncpt = host1x_syncpt_id(dc->syncpt); + + value = SYNCPT_CNTRL_NO_STALL; + tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); + + value = SYNCPT_VSYNC_ENABLE | syncpt; + tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); + } + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); + + /* initialize timer */ + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | + WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); + + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | + WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); + + value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_MASK); + + if (dc->soc->supports_border_color) + tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + + /* apply PLL and pixel clock changes */ tegra_dc_commit_state(dc, state); /* program display mode */ @@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client) struct tegra_drm *tegra = drm->dev_private; struct drm_plane *primary = NULL; struct drm_plane *cursor = NULL; - u32 value; int err; dc->syncpt = host1x_syncpt_request(dc->dev, flags); @@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client) goto cleanup; } - /* initialize display controller */ - if (dc->syncpt) { - u32 syncpt = host1x_syncpt_id(dc->syncpt); - - value = SYNCPT_CNTRL_NO_STALL; - tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - - value = SYNCPT_VSYNC_ENABLE | syncpt; - tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); - } - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); - - /* initialize timer */ - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | - WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); - - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | - WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); - - if (dc->soc->supports_border_color) - tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); - - tegra_dc_stats_reset(&dc->stats); - return 0; cleanup: @@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev) return PTR_ERR(dc->rst); } + reset_control_assert(dc->rst); + if (dc->soc->has_powergate) { if (dc->pipe == 0) dc->powergate = TEGRA_POWERGATE_DIS; else dc->powergate = TEGRA_POWERGATE_DISB; - err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, - dc->rst); - if (err < 0) { - dev_err(&pdev->dev, "failed to power partition: %d\n", - err); - return err; - } - } else { - err = clk_prepare_enable(dc->clk); - if (err < 0) { - dev_err(&pdev->dev, "failed to enable clock: %d\n", - err); - return err; - } - - err = reset_control_deassert(dc->rst); - if (err < 0) { - dev_err(&pdev->dev, "failed to deassert reset: %d\n", - err); - return err; - } + tegra_powergate_power_off(dc->powergate); } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev) return -ENXIO; } - INIT_LIST_HEAD(&dc->client.list); - dc->client.ops = &dc_client_ops; - dc->client.dev = &pdev->dev; - err = tegra_dc_rgb_probe(dc); if (err < 0 && err != -ENODEV) { dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err); return err; } + platform_set_drvdata(pdev, dc); + pm_runtime_enable(&pdev->dev); + + INIT_LIST_HEAD(&dc->client.list); + dc->client.ops = &dc_client_ops; + dc->client.dev = &pdev->dev; + err = host1x_client_register(&dc->client); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", @@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; } - platform_set_drvdata(pdev, dc); - return 0; } @@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev) return err; } - reset_control_assert(dc->rst); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM +static int tegra_dc_suspend(struct device *dev) +{ + struct tegra_dc *dc = dev_get_drvdata(dev); + int err; + + err = reset_control_assert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } if (dc->soc->has_powergate) tegra_powergate_power_off(dc->powergate); @@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev) return 0; } +static int tegra_dc_resume(struct device *dev) +{ + struct tegra_dc *dc = dev_get_drvdata(dev); + int err; + + if (dc->soc->has_powergate) { + err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, + dc->rst); + if (err < 0) { + dev_err(dev, "failed to power partition: %d\n", err); + return err; + } + } else { + err = clk_prepare_enable(dc->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + return err; + } + + err = reset_control_deassert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + return err; + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops tegra_dc_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL) +}; + struct platform_driver tegra_dc_driver = { .driver = { .name = "tegra-dc", .of_match_table = tegra_dc_of_match, + .pm = &tegra_dc_pm_ops, }, .probe = tegra_dc_probe, .remove = tegra_dc_remove, diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index b24a0f14821a..059f409556d5 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -12,6 +12,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of_gpio.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/regulator/consumer.h> @@ -44,6 +47,11 @@ struct tegra_dpaux { struct completion complete; struct work_struct work; struct list_head list; + +#ifdef CONFIG_GENERIC_PINCONF + struct pinctrl_dev *pinctrl; + struct pinctrl_desc desc; +#endif }; static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux) @@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data) return ret; } +enum tegra_dpaux_functions { + DPAUX_PADCTL_FUNC_AUX, + DPAUX_PADCTL_FUNC_I2C, + DPAUX_PADCTL_FUNC_OFF, +}; + +static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux) +{ + u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + + value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); +} + +static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux) +{ + u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); + + value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; + + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); +} + +static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) +{ + u32 value; + + switch (function) { + case DPAUX_PADCTL_FUNC_AUX: + value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | + DPAUX_HYBRID_PADCTL_MODE_AUX; + break; + + case DPAUX_PADCTL_FUNC_I2C: + value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | + DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | + DPAUX_HYBRID_PADCTL_MODE_I2C; + break; + + case DPAUX_PADCTL_FUNC_OFF: + tegra_dpaux_pad_power_down(dpaux); + return 0; + + default: + return -ENOTSUPP; + } + + tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL); + tegra_dpaux_pad_power_up(dpaux); + + return 0; +} + +#ifdef CONFIG_GENERIC_PINCONF +static const struct pinctrl_pin_desc tegra_dpaux_pins[] = { + PINCTRL_PIN(0, "DP_AUX_CHx_P"), + PINCTRL_PIN(1, "DP_AUX_CHx_N"), +}; + +static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 }; + +static const char * const tegra_dpaux_groups[] = { + "dpaux-io", +}; + +static const char * const tegra_dpaux_functions[] = { + "aux", + "i2c", + "off", +}; + +static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl) +{ + return ARRAY_SIZE(tegra_dpaux_groups); +} + +static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl, + unsigned int group) +{ + return tegra_dpaux_groups[group]; +} + +static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl, + unsigned group, const unsigned **pins, + unsigned *num_pins) +{ + *pins = tegra_dpaux_pin_numbers; + *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers); + + return 0; +} + +static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = { + .get_groups_count = tegra_dpaux_get_groups_count, + .get_group_name = tegra_dpaux_get_group_name, + .get_group_pins = tegra_dpaux_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_group, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl) +{ + return ARRAY_SIZE(tegra_dpaux_functions); +} + +static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl, + unsigned int function) +{ + return tegra_dpaux_functions[function]; +} + +static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl, + unsigned int function, + const char * const **groups, + unsigned * const num_groups) +{ + *num_groups = ARRAY_SIZE(tegra_dpaux_groups); + *groups = tegra_dpaux_groups; + + return 0; +} + +static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl, + unsigned int function, unsigned int group) +{ + struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl); + + return tegra_dpaux_pad_config(dpaux, function); +} + +static const struct pinmux_ops tegra_dpaux_pinmux_ops = { + .get_functions_count = tegra_dpaux_get_functions_count, + .get_function_name = tegra_dpaux_get_function_name, + .get_function_groups = tegra_dpaux_get_function_groups, + .set_mux = tegra_dpaux_set_mux, +}; +#endif + static int tegra_dpaux_probe(struct platform_device *pdev) { struct tegra_dpaux *dpaux; @@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return -ENXIO; } - dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); - if (IS_ERR(dpaux->rst)) { - dev_err(&pdev->dev, "failed to get reset control: %ld\n", - PTR_ERR(dpaux->rst)); - return PTR_ERR(dpaux->rst); + if (!pdev->dev.pm_domain) { + dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); + if (IS_ERR(dpaux->rst)) { + dev_err(&pdev->dev, + "failed to get reset control: %ld\n", + PTR_ERR(dpaux->rst)); + return PTR_ERR(dpaux->rst); + } } dpaux->clk = devm_clk_get(&pdev->dev, NULL); @@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return err; } - reset_control_deassert(dpaux->rst); + if (dpaux->rst) + reset_control_deassert(dpaux->rst); dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(dpaux->clk_parent)) { dev_err(&pdev->dev, "failed to get parent clock: %ld\n", PTR_ERR(dpaux->clk_parent)); - return PTR_ERR(dpaux->clk_parent); + err = PTR_ERR(dpaux->clk_parent); + goto assert_reset; } err = clk_prepare_enable(dpaux->clk_parent); if (err < 0) { dev_err(&pdev->dev, "failed to enable parent clock: %d\n", err); - return err; + goto assert_reset; } err = clk_set_rate(dpaux->clk_parent, 270000000); if (err < 0) { dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n", err); - return err; + goto disable_parent_clk; } dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(dpaux->vdd)) { dev_err(&pdev->dev, "failed to get VDD supply: %ld\n", PTR_ERR(dpaux->vdd)); - return PTR_ERR(dpaux->vdd); + err = PTR_ERR(dpaux->vdd); + goto disable_parent_clk; } err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0, @@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (err < 0) { dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n", dpaux->irq, err); - return err; + goto disable_parent_clk; } disable_irq(dpaux->irq); @@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) err = drm_dp_aux_register(&dpaux->aux); if (err < 0) - return err; + goto disable_parent_clk; /* * Assume that by default the DPAUX/I2C pads will be used for HDMI, @@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev) * is no possibility to perform the I2C mode configuration in the * HDMI path. */ - value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); - value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); - - value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL); - value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | - DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | - DPAUX_HYBRID_PADCTL_MODE_I2C; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL); + err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C); + if (err < 0) + return err; +#ifdef CONFIG_GENERIC_PINCONF + dpaux->desc.name = dev_name(&pdev->dev); + dpaux->desc.pins = tegra_dpaux_pins; + dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins); + dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops; + dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops; + dpaux->desc.owner = THIS_MODULE; + + dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux); + if (!dpaux->pinctrl) { + dev_err(&pdev->dev, "failed to register pincontrol\n"); + return -ENODEV; + } +#endif /* enable and clear all interrupts */ value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT | DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT; @@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dpaux); return 0; + +disable_parent_clk: + clk_disable_unprepare(dpaux->clk_parent); +assert_reset: + if (dpaux->rst) + reset_control_assert(dpaux->rst); + + clk_disable_unprepare(dpaux->clk); + + return err; } static int tegra_dpaux_remove(struct platform_device *pdev) { struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); - u32 value; /* make sure pads are powered down when not in use */ - value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); - value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); + tegra_dpaux_pad_power_down(dpaux); drm_dp_aux_unregister(&dpaux->aux); @@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev) cancel_work_sync(&dpaux->work); clk_disable_unprepare(dpaux->clk_parent); - reset_control_assert(dpaux->rst); + + if (dpaux->rst) + reset_control_assert(dpaux->rst); + clk_disable_unprepare(dpaux->clk); return 0; @@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux) int drm_dp_aux_enable(struct drm_dp_aux *aux) { struct tegra_dpaux *dpaux = to_dpaux(aux); - u32 value; - - value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | - DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | - DPAUX_HYBRID_PADCTL_MODE_AUX; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL); - - value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); - value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); - return 0; + return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX); } int drm_dp_aux_disable(struct drm_dp_aux *aux) { struct tegra_dpaux *dpaux = to_dpaux(aux); - u32 value; - value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); - value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; - tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); + tegra_dpaux_pad_power_down(dpaux); return 0; } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index b59c3bf0df44..755264d9db22 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra, */ drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_planes(drm, state, false); drm_atomic_helper_commit_modeset_enables(drm, state); + drm_atomic_helper_commit_planes(drm, state, true); drm_atomic_helper_wait_for_vblanks(drm, state); @@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) tegra_atomic_schedule(tegra, state); diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index f52d6cb24ff5..0ddcce1b420d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output); void tegra_output_exit(struct tegra_output *output); int tegra_output_connector_get_modes(struct drm_connector *connector); -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector); enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force); void tegra_output_connector_destroy(struct drm_connector *connector); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index d1239ebc190f..3d228ad90e0f 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -13,6 +13,7 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/regulator/consumer.h> @@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi) tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); } +static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) +{ + u32 value; + + value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); + + return 0; +} + +static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) +{ + u32 value; + + /* + * XXX Is this still needed? The module reset is deasserted right + * before this function is called. + */ + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3); + tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4); + + /* start calibration */ + tegra_dsi_pad_enable(dsi); + + value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) | + DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) | + DSI_PAD_OUT_CLK(0x0); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); + + value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) | + DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3); + tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3); + + return tegra_mipi_calibrate(dsi->mipi); +} + static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, unsigned int vrefresh) { @@ -794,7 +834,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_dsi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { @@ -837,7 +876,7 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) tegra_dsi_disable(dsi); - return; + pm_runtime_put(dsi->dev); } static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) @@ -848,6 +887,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) struct tegra_dsi *dsi = to_dsi(output); struct tegra_dsi_state *state; u32 value; + int err; + + pm_runtime_get_sync(dsi->dev); + + err = tegra_dsi_pad_calibrate(dsi); + if (err < 0) + dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); state = tegra_dsi_get_state(dsi); @@ -876,8 +922,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) if (output->panel) drm_panel_enable(output->panel); - - return; } static int @@ -967,55 +1011,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { .atomic_check = tegra_dsi_encoder_atomic_check, }; -static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) -{ - u32 value; - - value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); - tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); - - return 0; -} - -static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) -{ - u32 value; - - tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); - tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); - tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2); - tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3); - tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4); - - /* start calibration */ - tegra_dsi_pad_enable(dsi); - - value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) | - DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) | - DSI_PAD_OUT_CLK(0x0); - tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2); - - value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) | - DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3); - tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3); - - return tegra_mipi_calibrate(dsi->mipi); -} - static int tegra_dsi_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_dsi *dsi = host1x_client_to_dsi(client); int err; - reset_control_deassert(dsi->rst); - - err = tegra_dsi_pad_calibrate(dsi); - if (err < 0) { - dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); - goto reset; - } - /* Gangsters must not register their own outputs. */ if (!dsi->master) { dsi->output.dev = client->dev; @@ -1038,12 +1039,9 @@ static int tegra_dsi_init(struct host1x_client *client) drm_connector_register(&dsi->output.connector); err = tegra_output_init(drm, &dsi->output); - if (err < 0) { - dev_err(client->dev, - "failed to initialize output: %d\n", + if (err < 0) + dev_err(dsi->dev, "failed to initialize output: %d\n", err); - goto reset; - } dsi->output.encoder.possible_crtcs = 0x3; } @@ -1055,10 +1053,6 @@ static int tegra_dsi_init(struct host1x_client *client) } return 0; - -reset: - reset_control_assert(dsi->rst); - return err; } static int tegra_dsi_exit(struct host1x_client *client) @@ -1070,7 +1064,7 @@ static int tegra_dsi_exit(struct host1x_client *client) if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_dsi_debugfs_exit(dsi); - reset_control_assert(dsi->rst); + regulator_disable(dsi->vdd); return 0; } @@ -1494,74 +1488,50 @@ static int tegra_dsi_probe(struct platform_device *pdev) dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; - dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); - if (IS_ERR(dsi->rst)) - return PTR_ERR(dsi->rst); + if (!pdev->dev.pm_domain) { + dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); + if (IS_ERR(dsi->rst)) + return PTR_ERR(dsi->rst); + } dsi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dsi->clk)) { dev_err(&pdev->dev, "cannot get DSI clock\n"); - err = PTR_ERR(dsi->clk); - goto reset; - } - - err = clk_prepare_enable(dsi->clk); - if (err < 0) { - dev_err(&pdev->dev, "cannot enable DSI clock\n"); - goto reset; + return PTR_ERR(dsi->clk); } dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); if (IS_ERR(dsi->clk_lp)) { dev_err(&pdev->dev, "cannot get low-power clock\n"); - err = PTR_ERR(dsi->clk_lp); - goto disable_clk; - } - - err = clk_prepare_enable(dsi->clk_lp); - if (err < 0) { - dev_err(&pdev->dev, "cannot enable low-power clock\n"); - goto disable_clk; + return PTR_ERR(dsi->clk_lp); } dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(dsi->clk_parent)) { dev_err(&pdev->dev, "cannot get parent clock\n"); - err = PTR_ERR(dsi->clk_parent); - goto disable_clk_lp; + return PTR_ERR(dsi->clk_parent); } dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); if (IS_ERR(dsi->vdd)) { dev_err(&pdev->dev, "cannot get VDD supply\n"); - err = PTR_ERR(dsi->vdd); - goto disable_clk_lp; - } - - err = regulator_enable(dsi->vdd); - if (err < 0) { - dev_err(&pdev->dev, "cannot enable VDD supply\n"); - goto disable_clk_lp; + return PTR_ERR(dsi->vdd); } err = tegra_dsi_setup_clocks(dsi); if (err < 0) { dev_err(&pdev->dev, "cannot setup clocks\n"); - goto disable_vdd; + return err; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->regs = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(dsi->regs)) { - err = PTR_ERR(dsi->regs); - goto disable_vdd; - } + if (IS_ERR(dsi->regs)) + return PTR_ERR(dsi->regs); dsi->mipi = tegra_mipi_request(&pdev->dev); - if (IS_ERR(dsi->mipi)) { - err = PTR_ERR(dsi->mipi); - goto disable_vdd; - } + if (IS_ERR(dsi->mipi)) + return PTR_ERR(dsi->mipi); dsi->host.ops = &tegra_dsi_host_ops; dsi->host.dev = &pdev->dev; @@ -1572,6 +1542,9 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto mipi_free; } + platform_set_drvdata(pdev, dsi); + pm_runtime_enable(&pdev->dev); + INIT_LIST_HEAD(&dsi->client.list); dsi->client.ops = &dsi_client_ops; dsi->client.dev = &pdev->dev; @@ -1583,22 +1556,12 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto unregister; } - platform_set_drvdata(pdev, dsi); - return 0; unregister: mipi_dsi_host_unregister(&dsi->host); mipi_free: tegra_mipi_free(dsi->mipi); -disable_vdd: - regulator_disable(dsi->vdd); -disable_clk_lp: - clk_disable_unprepare(dsi->clk_lp); -disable_clk: - clk_disable_unprepare(dsi->clk); -reset: - reset_control_assert(dsi->rst); return err; } @@ -1607,6 +1570,8 @@ static int tegra_dsi_remove(struct platform_device *pdev) struct tegra_dsi *dsi = platform_get_drvdata(pdev); int err; + pm_runtime_disable(&pdev->dev); + err = host1x_client_unregister(&dsi->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", @@ -1619,14 +1584,82 @@ static int tegra_dsi_remove(struct platform_device *pdev) mipi_dsi_host_unregister(&dsi->host); tegra_mipi_free(dsi->mipi); - regulator_disable(dsi->vdd); + return 0; +} + +#ifdef CONFIG_PM +static int tegra_dsi_suspend(struct device *dev) +{ + struct tegra_dsi *dsi = dev_get_drvdata(dev); + int err; + + if (dsi->rst) { + err = reset_control_assert(dsi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + } + + usleep_range(1000, 2000); + clk_disable_unprepare(dsi->clk_lp); clk_disable_unprepare(dsi->clk); - reset_control_assert(dsi->rst); + + regulator_disable(dsi->vdd); return 0; } +static int tegra_dsi_resume(struct device *dev) +{ + struct tegra_dsi *dsi = dev_get_drvdata(dev); + int err; + + err = regulator_enable(dsi->vdd); + if (err < 0) { + dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err); + return err; + } + + err = clk_prepare_enable(dsi->clk); + if (err < 0) { + dev_err(dev, "cannot enable DSI clock: %d\n", err); + goto disable_vdd; + } + + err = clk_prepare_enable(dsi->clk_lp); + if (err < 0) { + dev_err(dev, "cannot enable low-power clock: %d\n", err); + goto disable_clk; + } + + usleep_range(1000, 2000); + + if (dsi->rst) { + err = reset_control_deassert(dsi->rst); + if (err < 0) { + dev_err(dev, "cannot assert reset: %d\n", err); + goto disable_clk_lp; + } + } + + return 0; + +disable_clk_lp: + clk_disable_unprepare(dsi->clk_lp); +disable_clk: + clk_disable_unprepare(dsi->clk); +disable_vdd: + regulator_disable(dsi->vdd); + return err; +} +#endif + +static const struct dev_pm_ops tegra_dsi_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL) +}; + static const struct of_device_id tegra_dsi_of_match[] = { { .compatible = "nvidia,tegra210-dsi", }, { .compatible = "nvidia,tegra132-dsi", }, @@ -1640,6 +1673,7 @@ struct platform_driver tegra_dsi_driver = { .driver = { .name = "tegra-dsi", .of_match_table = tegra_dsi_of_match, + .pm = &tegra_dsi_pm_ops, }, .probe = tegra_dsi_probe, .remove = tegra_dsi_remove, diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 1b12aa7a715e..e6d71fa4028e 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) struct tegra_bo *bo = fb->planes[i]; if (bo) { - if (bo->pages && bo->vaddr) + if (bo->pages) vunmap(bo->vaddr); drm_gem_object_unreference_unlocked(&bo->gem); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index b7ef4929e347..cda0491ed6bf 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -11,6 +11,7 @@ #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/hdmi.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -18,10 +19,14 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <sound/hda_verbs.h> + #include "hdmi.h" #include "drm.h" #include "dc.h" +#define HDMI_ELD_BUFFER_SIZE 96 + struct tmds_config { unsigned int pclk; u32 pll0; @@ -39,6 +44,8 @@ struct tegra_hdmi_config { u32 fuse_override_value; bool has_sor_io_peak_current; + bool has_hda; + bool has_hbr; }; struct tegra_hdmi { @@ -60,7 +67,10 @@ struct tegra_hdmi { const struct tegra_hdmi_config *config; unsigned int audio_source; - unsigned int audio_freq; + unsigned int audio_sample_rate; + unsigned int audio_channels; + + unsigned int pixel_clock; bool stereo; bool dvi; @@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = { }; static const struct tegra_hdmi_audio_config * -tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) +tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk) { const struct tegra_hdmi_audio_config *table; - switch (audio_freq) { + switch (sample_rate) { case 32000: table = tegra_hdmi_audio_32k; break; @@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) } } -static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) +static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value) +{ + static const struct { + unsigned int sample_rate; + unsigned int offset; + } regs[] = { + { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 }, + { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 }, + { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 }, + { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 }, + { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 }, + { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 }, + { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + if (regs[i].sample_rate == hdmi->audio_sample_rate) { + tegra_hdmi_writel(hdmi, value, regs[i].offset); + break; + } + } +} + +static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi) { - struct device_node *node = hdmi->dev->of_node; const struct tegra_hdmi_audio_config *config; - unsigned int offset = 0; - u32 value; + u32 source, value; switch (hdmi->audio_source) { case HDA: - value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; + if (hdmi->config->has_hda) + source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL; + else + return -EINVAL; + break; case SPDIF: - value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; + if (hdmi->config->has_hda) + source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; + else + source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; break; default: - value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; + if (hdmi->config->has_hda) + source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO; + else + source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; break; } - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); - } else { - value |= AUDIO_CNTRL0_INJECT_NULLSMPL; + /* + * Tegra30 and later use a slightly modified version of the register + * layout to accomodate for changes related to supporting HDA as the + * audio input source for HDMI. The source select field has moved to + * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames + * per block fields remain in the AUDIO_CNTRL0 register. + */ + if (hdmi->config->has_hda) { + /* + * Inject null samples into the audio FIFO for every frame in + * which the codec did not receive any samples. This applies + * to stereo LPCM only. + * + * XXX: This seems to be a remnant of MCP days when this was + * used to work around issues with monitors not being able to + * play back system startup sounds early. It is possibly not + * needed on Linux at all. + */ + if (hdmi->audio_channels == 2) + value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL; + else + value = 0; + + value |= source; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); + } - value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); + /* + * On Tegra20, HDA is not a supported audio source and the source + * select field is part of the AUDIO_CNTRL0 register. + */ + value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) | + AUDIO_CNTRL0_ERROR_TOLERANCE(6); + + if (!hdmi->config->has_hda) + value |= source; + + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); + + /* + * Advertise support for High Bit-Rate on Tegra114 and later. + */ + if (hdmi->config->has_hbr) { + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0); + value |= SOR_AUDIO_SPARE0_HBR_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0); } - config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); + config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate, + hdmi->pixel_clock); if (!config) { - dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", - hdmi->audio_freq, pclk); + dev_err(hdmi->dev, + "cannot set audio to %u Hz at %u Hz pixel clock\n", + hdmi->audio_sample_rate, hdmi->pixel_clock); return -EINVAL; } @@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); - value = ACR_SUBPACK_CTS(config->cts); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); + tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts), + HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); @@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) value &= ~AUDIO_N_RESETF; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - switch (hdmi->audio_freq) { - case 32000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320; - break; + if (hdmi->config->has_hda) + tegra_hdmi_write_aval(hdmi, config->aval); - case 44100: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441; - break; + tegra_hdmi_setup_audio_fs_tables(hdmi); - case 48000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; - break; + return 0; +} - case 88200: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; - break; +static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi) +{ + u32 value; - case 96000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; - break; + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + value &= ~GENERIC_CTRL_AUDIO; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); +} - case 176400: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; - break; +static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi) +{ + u32 value; - case 192000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; - break; - } + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + value |= GENERIC_CTRL_AUDIO; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); +} - tegra_hdmi_writel(hdmi, config->aval, offset); - } +static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi) +{ + size_t length = drm_eld_size(hdmi->output.connector.eld), i; + u32 value; - tegra_hdmi_setup_audio_fs_tables(hdmi); + for (i = 0; i < length; i++) + tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i], + HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); - return 0; + /* + * The HDA codec will always report an ELD buffer size of 96 bytes and + * the HDA codec driver will check that each byte read from the buffer + * is valid. Therefore every byte must be written, even if no 96 bytes + * were parsed from EDID. + */ + for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++) + tegra_hdmi_writel(hdmi, i << 8 | 0, + HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); + + value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); } static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size) @@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, u8 buffer[17]; ssize_t err; - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); - return; - } - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); @@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, } tegra_hdmi_write_infopack(hdmi, buffer, err); +} + +static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); + value &= ~INFOFRAME_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); +} + +static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); + value |= INFOFRAME_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); } static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) @@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) u8 buffer[14]; ssize_t err; - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); - return; - } - err = hdmi_audio_infoframe_init(&frame); if (err < 0) { dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n", @@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) return; } - frame.channels = 2; + frame.channels = hdmi->audio_channels; err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { @@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) * bytes can be programmed. */ tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err)); +} - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); +static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); + value &= ~INFOFRAME_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); +} + +static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); + value |= INFOFRAME_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); } static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) @@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) struct hdmi_vendor_infoframe frame; u8 buffer[10]; ssize_t err; - u32 value; - - if (!hdmi->stereo) { - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - value &= ~GENERIC_CTRL_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - return; - } hdmi_vendor_infoframe_init(&frame); frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING; @@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) } tegra_hdmi_write_infopack(hdmi, buffer, err); +} + +static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + value &= ~GENERIC_CTRL_ENABLE; + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); +} + +static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi) +{ + u32 value; value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); value |= GENERIC_CTRL_ENABLE; @@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output) return drm_detect_hdmi_monitor(edid); } +static enum drm_connector_status +tegra_hdmi_connector_detect(struct drm_connector *connector, bool force) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_hdmi *hdmi = to_hdmi(output); + enum drm_connector_status status; + + status = tegra_output_connector_detect(connector, force); + if (status == connector_status_connected) + return status; + + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); + return status; +} + static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, - .detect = tegra_output_connector_detect, + .detect = tegra_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = tegra_output_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -806,7 +935,6 @@ static const struct drm_connector_helper_funcs tegra_hdmi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_hdmi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { @@ -815,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) { + struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_hdmi *hdmi = to_hdmi(output); u32 value; /* @@ -829,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) tegra_dc_commit(dc); } + + if (!hdmi->dvi) { + if (hdmi->stereo) + tegra_hdmi_disable_stereo_infoframe(hdmi); + + tegra_hdmi_disable_audio_infoframe(hdmi); + tegra_hdmi_disable_avi_infoframe(hdmi); + tegra_hdmi_disable_audio(hdmi); + } + + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE); + tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK); + + pm_runtime_put(hdmi->dev); } static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) @@ -837,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct device_node *node = output->dev->of_node; struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned int pulse_start, div82, pclk; + unsigned int pulse_start, div82; int retries = 1000; u32 value; int err; - hdmi->dvi = !tegra_output_is_hdmi(output); + pm_runtime_get_sync(hdmi->dev); - pclk = mode->clock * 1000; + /* + * Enable and unmask the HDA codec SCRATCH0 register interrupt. This + * is used for interoperability between the HDA codec driver and the + * HDMI driver. + */ + tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE); + tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK); + + hdmi->pixel_clock = mode->clock * 1000; h_sync_width = mode->hsync_end - mode->hsync_start; h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; - err = clk_set_rate(hdmi->clk, pclk); + err = clk_set_rate(hdmi->clk, hdmi->pixel_clock); if (err < 0) { dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", err); @@ -910,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); + hdmi->dvi = !tegra_output_is_hdmi(output); if (!hdmi->dvi) { - err = tegra_hdmi_setup_audio(hdmi, pclk); + err = tegra_hdmi_setup_audio(hdmi); if (err < 0) hdmi->dvi = true; } - if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { - /* - * TODO: add ELD support - */ - } + if (hdmi->config->has_hda) + tegra_hdmi_write_eld(hdmi); rekey = HDMI_REKEY_DEFAULT; value = HDMI_CTRL_REKEY(rekey); @@ -932,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); - if (hdmi->dvi) - tegra_hdmi_writel(hdmi, 0x0, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - else - tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); + if (!hdmi->dvi) { + tegra_hdmi_setup_avi_infoframe(hdmi, mode); + tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_setup_avi_infoframe(hdmi, mode); - tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_setup_stereo_infoframe(hdmi); + if (hdmi->stereo) + tegra_hdmi_setup_stereo_infoframe(hdmi); + } /* TMDS CONFIG */ for (i = 0; i < hdmi->config->num_tmds; i++) { - if (pclk <= hdmi->config->tmds[i].pclk) { + if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) { tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]); break; } @@ -1032,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) tegra_dc_commit(dc); + if (!hdmi->dvi) { + tegra_hdmi_enable_avi_infoframe(hdmi); + tegra_hdmi_enable_audio_infoframe(hdmi); + tegra_hdmi_enable_audio(hdmi); + + if (hdmi->stereo) + tegra_hdmi_enable_stereo_infoframe(hdmi); + } + /* TODO: add HDCP support */ } @@ -1236,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0); + DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); + DUMP_REG(HDMI_NV_PDISP_INT_STATUS); + DUMP_REG(HDMI_NV_PDISP_INT_MASK); + DUMP_REG(HDMI_NV_PDISP_INT_ENABLE); DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); #undef DUMP_REG @@ -1361,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client) return err; } - err = clk_prepare_enable(hdmi->clk); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable clock: %d\n", err); - return err; - } - - reset_control_deassert(hdmi->rst); - return 0; } @@ -1378,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client) tegra_output_exit(&hdmi->output); - reset_control_assert(hdmi->rst); - clk_disable_unprepare(hdmi->clk); - regulator_disable(hdmi->vdd); regulator_disable(hdmi->pll); regulator_disable(hdmi->hdmi); @@ -1402,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = { .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, .fuse_override_value = 1 << 31, .has_sor_io_peak_current = false, + .has_hda = false, + .has_hbr = false, }; static const struct tegra_hdmi_config tegra30_hdmi_config = { @@ -1410,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = { .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, .fuse_override_value = 1 << 31, .has_sor_io_peak_current = false, + .has_hda = true, + .has_hbr = false, }; static const struct tegra_hdmi_config tegra114_hdmi_config = { @@ -1418,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = { .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, .fuse_override_value = 1 << 31, .has_sor_io_peak_current = true, + .has_hda = true, + .has_hbr = true, }; static const struct tegra_hdmi_config tegra124_hdmi_config = { @@ -1426,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = { .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, .fuse_override_value = 1 << 31, .has_sor_io_peak_current = true, + .has_hda = true, + .has_hbr = true, }; static const struct of_device_id tegra_hdmi_of_match[] = { @@ -1437,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = { }; MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match); +static void hda_format_parse(unsigned int format, unsigned int *rate, + unsigned int *channels) +{ + unsigned int mul, div; + + if (format & AC_FMT_BASE_44K) + *rate = 44100; + else + *rate = 48000; + + mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT; + div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT; + + *rate = *rate * (mul + 1) / (div + 1); + + *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT; +} + +static irqreturn_t tegra_hdmi_irq(int irq, void *data) +{ + struct tegra_hdmi *hdmi = data; + u32 value; + int err; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS); + tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS); + + if (value & INT_CODEC_SCRATCH0) { + unsigned int format; + u32 value; + + value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0); + + if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) { + unsigned int sample_rate, channels; + + format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK; + + hda_format_parse(format, &sample_rate, &channels); + + hdmi->audio_sample_rate = sample_rate; + hdmi->audio_channels = channels; + + err = tegra_hdmi_setup_audio(hdmi); + if (err < 0) { + tegra_hdmi_disable_audio_infoframe(hdmi); + tegra_hdmi_disable_audio(hdmi); + } else { + tegra_hdmi_setup_audio_infoframe(hdmi); + tegra_hdmi_enable_audio_infoframe(hdmi); + tegra_hdmi_enable_audio(hdmi); + } + } else { + tegra_hdmi_disable_audio_infoframe(hdmi); + tegra_hdmi_disable_audio(hdmi); + } + } + + return IRQ_HANDLED; +} + static int tegra_hdmi_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -1454,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev) hdmi->config = match->data; hdmi->dev = &pdev->dev; + hdmi->audio_source = AUTO; - hdmi->audio_freq = 44100; + hdmi->audio_sample_rate = 48000; + hdmi->audio_channels = 2; hdmi->stereo = false; hdmi->dvi = false; @@ -1516,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev) hdmi->irq = err; + err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0, + dev_name(hdmi->dev), hdmi); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", + hdmi->irq, err); + return err; + } + + platform_set_drvdata(pdev, hdmi); + pm_runtime_enable(&pdev->dev); + INIT_LIST_HEAD(&hdmi->client.list); hdmi->client.ops = &hdmi_client_ops; hdmi->client.dev = &pdev->dev; @@ -1527,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev) return err; } - platform_set_drvdata(pdev, hdmi); - return 0; } @@ -1537,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev) struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); int err; + pm_runtime_disable(&pdev->dev); + err = host1x_client_unregister(&hdmi->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", @@ -1546,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev) tegra_output_remove(&hdmi->output); - clk_disable_unprepare(hdmi->clk_parent); + return 0; +} + +#ifdef CONFIG_PM +static int tegra_hdmi_suspend(struct device *dev) +{ + struct tegra_hdmi *hdmi = dev_get_drvdata(dev); + int err; + + err = reset_control_assert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + usleep_range(1000, 2000); + clk_disable_unprepare(hdmi->clk); return 0; } +static int tegra_hdmi_resume(struct device *dev) +{ + struct tegra_hdmi *hdmi = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + return err; + } + + usleep_range(1000, 2000); + + err = reset_control_deassert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + clk_disable_unprepare(hdmi->clk); + return err; + } + + return 0; +} +#endif + +static const struct dev_pm_ops tegra_hdmi_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL) +}; + struct platform_driver tegra_hdmi_driver = { .driver = { .name = "tegra-hdmi", - .owner = THIS_MODULE, .of_match_table = tegra_hdmi_of_match, + .pm = &tegra_hdmi_pm_ops, }, .probe = tegra_hdmi_probe, .remove = tegra_hdmi_remove, diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h index a882514389cd..2339f134a09a 100644 --- a/drivers/gpu/drm/tegra/hdmi.h +++ b/drivers/gpu/drm/tegra/hdmi.h @@ -468,9 +468,20 @@ #define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 #define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac -#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) +#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20) +#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20) +#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20) +#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) +#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae +#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27) +#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba +#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30) +#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff +#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb #define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc #define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd +#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1) +#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0) #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0 @@ -481,6 +492,14 @@ #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 +#define HDMI_NV_PDISP_INT_STATUS 0xcc +#define INT_SCRATCH (1 << 3) +#define INT_CP_REQUEST (1 << 2) +#define INT_CODEC_SCRATCH1 (1 << 1) +#define INT_CODEC_SCRATCH0 (1 << 0) +#define HDMI_NV_PDISP_INT_MASK 0xcd +#define HDMI_NV_PDISP_INT_ENABLE 0xce + #define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1 #define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0) #define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8) diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 46664b622270..595d1ec3e02e 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -36,20 +36,13 @@ int tegra_output_connector_get_modes(struct drm_connector *connector) if (edid) { err = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); } return err; } -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - - return &output->encoder; -} - enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index e246334e0252..a131b44e2d6f 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_rgb_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 757c6e8603af..74d0540b8d4c 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -7,11 +7,13 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -149,6 +151,8 @@ struct tegra_sor_soc { const struct tegra_sor_hdmi_settings *settings; unsigned int num_settings; + + const u8 *xbar_cfg; }; struct tegra_sor; @@ -169,7 +173,9 @@ struct tegra_sor { struct reset_control *rst; struct clk *clk_parent; + struct clk *clk_brick; struct clk *clk_safe; + struct clk *clk_src; struct clk *clk_dp; struct clk *clk; @@ -190,6 +196,18 @@ struct tegra_sor { struct regulator *hdmi_supply; }; +struct tegra_sor_state { + struct drm_connector_state base; + + unsigned int bpc; +}; + +static inline struct tegra_sor_state * +to_sor_state(struct drm_connector_state *state) +{ + return container_of(state, struct tegra_sor_state, base); +} + struct tegra_sor_config { u32 bits_per_pixel; @@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value, writel(value, sor->regs + (offset << 2)); } +static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent) +{ + int err; + + clk_disable_unprepare(sor->clk); + + err = clk_set_parent(sor->clk, parent); + if (err < 0) + return err; + + err = clk_prepare_enable(sor->clk); + if (err < 0) + return err; + + return 0; +} + +struct tegra_clk_sor_brick { + struct clk_hw hw; + struct tegra_sor *sor; +}; + +static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw) +{ + return container_of(hw, struct tegra_clk_sor_brick, hw); +} + +static const char * const tegra_clk_sor_brick_parents[] = { + "pll_d2_out0", "pll_dp" +}; + +static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index) +{ + struct tegra_clk_sor_brick *brick = to_brick(hw); + struct tegra_sor *sor = brick->sor; + u32 value; + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + + switch (index) { + case 0: + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK; + break; + + case 1: + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; + break; + } + + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + return 0; +} + +static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw) +{ + struct tegra_clk_sor_brick *brick = to_brick(hw); + struct tegra_sor *sor = brick->sor; + u8 parent = U8_MAX; + u32 value; + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + + switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) { + case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK: + case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK: + parent = 0; + break; + + case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK: + case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK: + parent = 1; + break; + } + + return parent; +} + +static const struct clk_ops tegra_clk_sor_brick_ops = { + .set_parent = tegra_clk_sor_brick_set_parent, + .get_parent = tegra_clk_sor_brick_get_parent, +}; + +static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor, + const char *name) +{ + struct tegra_clk_sor_brick *brick; + struct clk_init_data init; + struct clk *clk; + + brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL); + if (!brick) + return ERR_PTR(-ENOMEM); + + brick->sor = sor; + + init.name = name; + init.flags = 0; + init.parent_names = tegra_clk_sor_brick_parents; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents); + init.ops = &tegra_clk_sor_brick_ops; + + brick->hw.init = &init; + + clk = devm_clk_register(sor->dev, &brick->hw); + if (IS_ERR(clk)) + kfree(brick); + + return clk; +} + static int tegra_sor_dp_train_fast(struct tegra_sor *sor, struct drm_dp_link *link) { @@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor, return false; } -static int tegra_sor_calc_config(struct tegra_sor *sor, - const struct drm_display_mode *mode, - struct tegra_sor_config *config, - struct drm_dp_link *link) +static int tegra_sor_compute_config(struct tegra_sor *sor, + const struct drm_display_mode *mode, + struct tegra_sor_config *config, + struct drm_dp_link *link) { const u64 f = 100000, link_rate = link->rate * 1000; const u64 pclk = mode->clock * 1000; @@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor, return 0; } +static void tegra_sor_apply_config(struct tegra_sor *sor, + const struct tegra_sor_config *config) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; + value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size); + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + value = tegra_sor_readl(sor, SOR_DP_CONFIG0); + value &= ~SOR_DP_CONFIG_WATERMARK_MASK; + value |= SOR_DP_CONFIG_WATERMARK(config->watermark); + + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK; + value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count); + + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK; + value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac); + + if (config->active_polarity) + value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; + else + value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; + + value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; + value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; + tegra_sor_writel(sor, value, SOR_DP_CONFIG0); + + value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); + value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; + value |= config->hblank_symbols & 0xffff; + tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS); + + value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS); + value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK; + value |= config->vblank_symbols & 0xffff; + tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); +} + +static void tegra_sor_mode_set(struct tegra_sor *sor, + const struct drm_display_mode *mode, + struct tegra_sor_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc); + unsigned int vbe, vse, hbe, hse, vbs, hbs; + u32 value; + + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK; + value &= ~SOR_STATE_ASY_CRC_MODE_MASK; + value &= ~SOR_STATE_ASY_OWNER_MASK; + + value |= SOR_STATE_ASY_CRC_MODE_COMPLETE | + SOR_STATE_ASY_OWNER(dc->pipe + 1); + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value &= ~SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value &= ~SOR_STATE_ASY_VSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL; + + switch (state->bpc) { + case 16: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444; + break; + + case 12: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444; + break; + + case 10: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444; + break; + + case 8: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; + break; + + case 6: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444; + break; + + default: + value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; + break; + } + + tegra_sor_writel(sor, value, SOR_STATE1); + + /* + * TODO: The video timing programming below doesn't seem to match the + * register definitions. + */ + + value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe)); + + /* sync end = sync width - 1 */ + vse = mode->vsync_end - mode->vsync_start - 1; + hse = mode->hsync_end - mode->hsync_start - 1; + + value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe)); + + /* blank end = sync end + back porch */ + vbe = vse + (mode->vtotal - mode->vsync_end); + hbe = hse + (mode->htotal - mode->hsync_end); + + value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe)); + + /* blank start = blank end + active */ + vbs = vbe + mode->vdisplay; + hbs = hbe + mode->hdisplay; + + value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); + tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe)); + + /* XXX interlacing support */ + tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe)); +} + static int tegra_sor_detach(struct tegra_sor *sor) { unsigned long value, timeout; @@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor) if ((value & SOR_PWR_TRIGGER) != 0) return -ETIMEDOUT; - err = clk_set_parent(sor->clk, sor->clk_safe); + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); if (err < 0) dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); @@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor) sor->debugfs = NULL; } +static void tegra_sor_connector_reset(struct drm_connector *connector) +{ + struct tegra_sor_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + + if (connector->state) { + __drm_atomic_helper_connector_destroy_state(connector->state); + kfree(connector->state); + } + + __drm_atomic_helper_connector_reset(connector, &state->base); +} + static enum drm_connector_status tegra_sor_connector_detect(struct drm_connector *connector, bool force) { @@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force) return tegra_output_connector_detect(connector, force); } +static struct drm_connector_state * +tegra_sor_connector_duplicate_state(struct drm_connector *connector) +{ + struct tegra_sor_state *state = to_sor_state(connector->state); + struct tegra_sor_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_connector_duplicate_state(connector, ©->base); + + return ©->base; +} + static const struct drm_connector_funcs tegra_sor_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, - .reset = drm_atomic_helper_connector_reset, + .reset = tegra_sor_connector_reset, .detect = tegra_sor_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = tegra_output_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_duplicate_state = tegra_sor_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -1081,13 +1372,16 @@ static enum drm_mode_status tegra_sor_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + /* HDMI 2.0 modes are not yet supported */ + if (mode->clock > 340000) + return MODE_NOCLOCK; + return MODE_OK; } static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { .get_modes = tegra_sor_connector_get_modes, .mode_valid = tegra_sor_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { @@ -1141,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder) if (output->panel) drm_panel_unprepare(output->panel); - reset_control_assert(sor->rst); - clk_disable_unprepare(sor->clk); + pm_runtime_put(sor->dev); } #if 0 @@ -1192,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct tegra_output *output = encoder_to_output(encoder); struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - unsigned int vbe, vse, hbe, hse, vbs, hbs, i; struct tegra_sor *sor = to_sor(output); struct tegra_sor_config config; + struct tegra_sor_state *state; struct drm_dp_link link; u8 rate, lanes; + unsigned int i; int err = 0; u32 value; - err = clk_prepare_enable(sor->clk); - if (err < 0) - dev_err(sor->dev, "failed to enable clock: %d\n", err); + state = to_sor_state(output->connector.state); - reset_control_deassert(sor->rst); + pm_runtime_get_sync(sor->dev); if (output->panel) drm_panel_prepare(output->panel); @@ -1219,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) return; } - err = clk_set_parent(sor->clk, sor->clk_safe); + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); if (err < 0) dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); memset(&config, 0, sizeof(config)); - config.bits_per_pixel = output->connector.display_info.bpc * 3; + config.bits_per_pixel = state->bpc * 3; - err = tegra_sor_calc_config(sor, mode, &config, &link); + err = tegra_sor_compute_config(sor, mode, &config, &link); if (err < 0) - dev_err(sor->dev, "failed to compute link configuration: %d\n", - err); + dev_err(sor->dev, "failed to compute configuration: %d\n", err); value = tegra_sor_readl(sor, SOR_CLK_CNTRL); value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; @@ -1326,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) value &= ~SOR_PLL2_PORT_POWERDOWN; tegra_sor_writel(sor, value, SOR_PLL2); - /* switch to DP clock */ - err = clk_set_parent(sor->clk, sor->clk_dp); + /* XXX not in TRM */ + for (value = 0, i = 0; i < 5; i++) + value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) | + SOR_XBAR_CTRL_LINK1_XSEL(i, i); + + tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + + /* switch to DP parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_dp); if (err < 0) - dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); + dev_err(sor->dev, "failed to set parent clock: %d\n", err); /* power DP lanes */ value = tegra_sor_readl(sor, SOR_DP_PADCTL0); @@ -1375,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - /* set linkctl */ + tegra_sor_apply_config(sor, &config); + + /* enable link */ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); value |= SOR_DP_LINKCTL_ENABLE; - - value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK; - value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size); - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); @@ -1394,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, value, SOR_DP_TPG); - value = tegra_sor_readl(sor, SOR_DP_CONFIG0); - value &= ~SOR_DP_CONFIG_WATERMARK_MASK; - value |= SOR_DP_CONFIG_WATERMARK(config.watermark); - - value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK; - value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count); - - value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK; - value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac); - - if (config.active_polarity) - value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; - else - value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; - - value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE; - value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; - tegra_sor_writel(sor, value, SOR_DP_CONFIG0); - - value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS); - value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK; - value |= config.hblank_symbols & 0xffff; - tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS); - - value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS); - value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK; - value |= config.vblank_symbols & 0xffff; - tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS); - /* enable pad calibration logic */ value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value |= SOR_DP_PADCTL_PAD_CAL_PD; @@ -1478,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder) if (err < 0) dev_err(sor->dev, "failed to power up SOR: %d\n", err); - /* - * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete - * raster, associate with display controller) - */ - value = SOR_STATE_ASY_PROTOCOL_DP_A | - SOR_STATE_ASY_CRC_MODE_COMPLETE | - SOR_STATE_ASY_OWNER(dc->pipe + 1); - - if (mode->flags & DRM_MODE_FLAG_PHSYNC) - value &= ~SOR_STATE_ASY_HSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_PVSYNC) - value &= ~SOR_STATE_ASY_VSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL; - - switch (config.bits_per_pixel) { - case 24: - value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; - break; - - case 18: - value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444; - break; - - default: - BUG(); - break; - } - - tegra_sor_writel(sor, value, SOR_STATE1); - - /* - * TODO: The video timing programming below doesn't seem to match the - * register definitions. - */ - - value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe)); - - vse = mode->vsync_end - mode->vsync_start - 1; - hse = mode->hsync_end - mode->hsync_start - 1; - - value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe)); - - vbe = vse + (mode->vsync_start - mode->vdisplay); - hbe = hse + (mode->hsync_start - mode->hdisplay); - - value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe)); - - vbs = vbe + mode->vdisplay; - hbs = hbe + mode->hdisplay; - - value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe)); - - tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe)); - /* CSTM (LVDS, link A/B, upper) */ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | SOR_CSTM_UPPER; tegra_sor_writel(sor, value, SOR_CSTM); + /* use DP-A protocol */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value |= SOR_STATE_ASY_PROTOCOL_DP_A; + tegra_sor_writel(sor, value, SOR_STATE1); + + tegra_sor_mode_set(sor, mode, state); + /* PWM setup */ err = tegra_sor_setup_pwm(sor, 250); if (err < 0) @@ -1578,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct tegra_output *output = encoder_to_output(encoder); + struct tegra_sor_state *state = to_sor_state(conn_state); struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_sor *sor = to_sor(output); + struct drm_display_info *info; int err; + info = &output->connector.display_info; + err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, pclk, 0); if (err < 0) { @@ -1590,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, return err; } + switch (info->bpc) { + case 8: + case 6: + state->bpc = info->bpc; + break; + + default: + DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc); + state->bpc = 8; + break; + } + return 0; } @@ -1752,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) if (err < 0) dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err); - reset_control_assert(sor->rst); - usleep_range(1000, 2000); - clk_disable_unprepare(sor->clk); + pm_runtime_put(sor->dev); } static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) @@ -1762,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) struct tegra_output *output = encoder_to_output(encoder); unsigned int h_ref_to_sync = 1, pulse_start, max_ac; struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - unsigned int vbe, vse, hbe, hse, vbs, hbs, div; struct tegra_sor_hdmi_settings *settings; struct tegra_sor *sor = to_sor(output); + struct tegra_sor_state *state; struct drm_display_mode *mode; - struct drm_display_info *info; + unsigned int div, i; u32 value; int err; + state = to_sor_state(output->connector.state); mode = &encoder->crtc->state->adjusted_mode; - info = &output->connector.display_info; - err = clk_prepare_enable(sor->clk); - if (err < 0) - dev_err(sor->dev, "failed to enable clock: %d\n", err); + pm_runtime_get_sync(sor->dev); - usleep_range(1000, 2000); - - reset_control_deassert(sor->rst); - - err = clk_set_parent(sor->clk, sor->clk_safe); + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); if (err < 0) dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); @@ -1877,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div); tegra_sor_writel(sor, value, SOR_REFCLK); - /* XXX don't hardcode */ - value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) | - SOR_XBAR_CTRL_LINK1_XSEL(3, 3) | - SOR_XBAR_CTRL_LINK1_XSEL(2, 2) | - SOR_XBAR_CTRL_LINK1_XSEL(1, 1) | - SOR_XBAR_CTRL_LINK1_XSEL(0, 0) | - SOR_XBAR_CTRL_LINK0_XSEL(4, 4) | - SOR_XBAR_CTRL_LINK0_XSEL(3, 3) | - SOR_XBAR_CTRL_LINK0_XSEL(2, 0) | - SOR_XBAR_CTRL_LINK0_XSEL(1, 1) | - SOR_XBAR_CTRL_LINK0_XSEL(0, 2); - tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + /* XXX not in TRM */ + for (value = 0, i = 0; i < 5; i++) + value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) | + SOR_XBAR_CTRL_LINK1_XSEL(i, i); tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - err = clk_set_parent(sor->clk, sor->clk_parent); + /* switch to parent clock */ + err = clk_set_parent(sor->clk_src, sor->clk_parent); + if (err < 0) + dev_err(sor->dev, "failed to set source clock: %d\n", err); + + err = tegra_sor_set_parent_clock(sor, sor->clk_src); if (err < 0) dev_err(sor->dev, "failed to set parent clock: %d\n", err); @@ -2002,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) value &= ~DITHER_CONTROL_MASK; value &= ~BASE_COLOR_SIZE_MASK; - switch (info->bpc) { + switch (state->bpc) { case 6: value |= BASE_COLOR_SIZE_666; break; @@ -2012,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) break; default: - WARN(1, "%u bits-per-color not supported\n", info->bpc); + WARN(1, "%u bits-per-color not supported\n", state->bpc); + value |= BASE_COLOR_SIZE_888; break; } @@ -2022,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) if (err < 0) dev_err(sor->dev, "failed to power up SOR: %d\n", err); - /* configure mode */ - value = tegra_sor_readl(sor, SOR_STATE1); - value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK; - value &= ~SOR_STATE_ASY_CRC_MODE_MASK; - value &= ~SOR_STATE_ASY_OWNER_MASK; - - value |= SOR_STATE_ASY_CRC_MODE_COMPLETE | - SOR_STATE_ASY_OWNER(dc->pipe + 1); - - if (mode->flags & DRM_MODE_FLAG_PHSYNC) - value &= ~SOR_STATE_ASY_HSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_PVSYNC) - value &= ~SOR_STATE_ASY_VSYNCPOL; - - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL; - - switch (info->bpc) { - case 8: - value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; - break; - - case 6: - value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444; - break; - - default: - BUG(); - break; - } - - tegra_sor_writel(sor, value, SOR_STATE1); - + /* configure dynamic range of output */ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK; value &= ~SOR_HEAD_STATE_DYNRANGE_MASK; tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); + /* configure colorspace */ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); value &= ~SOR_HEAD_STATE_COLORSPACE_MASK; value |= SOR_HEAD_STATE_COLORSPACE_RGB; tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); - /* - * TODO: The video timing programming below doesn't seem to match the - * register definitions. - */ - - value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe)); - - /* sync end = sync width - 1 */ - vse = mode->vsync_end - mode->vsync_start - 1; - hse = mode->hsync_end - mode->hsync_start - 1; - - value = ((vse & 0x7fff) << 16) | (hse & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe)); - - /* blank end = sync end + back porch */ - vbe = vse + (mode->vtotal - mode->vsync_end); - hbe = hse + (mode->htotal - mode->hsync_end); - - value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe)); - - /* blank start = blank end + active */ - vbs = vbe + mode->vdisplay; - hbs = hbe + mode->hdisplay; - - value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff); - tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe)); - - tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe)); + tegra_sor_mode_set(sor, mode, state); tegra_sor_update(sor); @@ -2196,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client) * XXX: Remove this reset once proper hand-over from firmware to * kernel is possible. */ - err = reset_control_assert(sor->rst); - if (err < 0) { - dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); - return err; + if (sor->rst) { + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to assert SOR reset: %d\n", + err); + return err; + } } err = clk_prepare_enable(sor->clk); @@ -2210,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client) usleep_range(1000, 3000); - err = reset_control_deassert(sor->rst); - if (err < 0) { - dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); - return err; + if (sor->rst) { + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to deassert SOR reset: %d\n", + err); + return err; + } } err = clk_prepare_enable(sor->clk_safe); @@ -2324,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = { .remove = tegra_sor_hdmi_remove, }; +static const u8 tegra124_sor_xbar_cfg[5] = { + 0, 1, 2, 3, 4 +}; + static const struct tegra_sor_soc tegra124_sor = { .supports_edp = true, .supports_lvds = true, .supports_hdmi = false, .supports_dp = false, + .xbar_cfg = tegra124_sor_xbar_cfg, }; static const struct tegra_sor_soc tegra210_sor = { @@ -2336,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = { .supports_lvds = false, .supports_hdmi = false, .supports_dp = false, + .xbar_cfg = tegra124_sor_xbar_cfg, +}; + +static const u8 tegra210_sor_xbar_cfg[5] = { + 2, 1, 0, 3, 4 }; static const struct tegra_sor_soc tegra210_sor1 = { @@ -2346,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = { .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), .settings = tegra210_sor_hdmi_defaults, + + .xbar_cfg = tegra210_sor_xbar_cfg, }; static const struct of_device_id tegra_sor_of_match[] = { @@ -2435,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } - sor->rst = devm_reset_control_get(&pdev->dev, "sor"); - if (IS_ERR(sor->rst)) { - err = PTR_ERR(sor->rst); - dev_err(&pdev->dev, "failed to get reset control: %d\n", err); - goto remove; + if (!pdev->dev.pm_domain) { + sor->rst = devm_reset_control_get(&pdev->dev, "sor"); + if (IS_ERR(sor->rst)) { + err = PTR_ERR(sor->rst); + dev_err(&pdev->dev, "failed to get reset control: %d\n", + err); + goto remove; + } } sor->clk = devm_clk_get(&pdev->dev, NULL); @@ -2449,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } + if (sor->soc->supports_hdmi || sor->soc->supports_dp) { + sor->clk_src = devm_clk_get(&pdev->dev, "source"); + if (IS_ERR(sor->clk_src)) { + err = PTR_ERR(sor->clk_src); + dev_err(sor->dev, "failed to get source clock: %d\n", + err); + goto remove; + } + } + sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(sor->clk_parent)) { err = PTR_ERR(sor->clk_parent); @@ -2470,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } + platform_set_drvdata(pdev, sor); + pm_runtime_enable(&pdev->dev); + + pm_runtime_get_sync(&pdev->dev); + sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick"); + pm_runtime_put(&pdev->dev); + + if (IS_ERR(sor->clk_brick)) { + err = PTR_ERR(sor->clk_brick); + dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err); + goto remove; + } + INIT_LIST_HEAD(&sor->client.list); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; @@ -2481,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } - platform_set_drvdata(pdev, sor); - return 0; remove: @@ -2498,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev) struct tegra_sor *sor = platform_get_drvdata(pdev); int err; + pm_runtime_disable(&pdev->dev); + err = host1x_client_unregister(&sor->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", @@ -2516,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int tegra_sor_suspend(struct device *dev) +{ + struct tegra_sor *sor = dev_get_drvdata(dev); + int err; + + if (sor->rst) { + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(sor->clk); + + return 0; +} + +static int tegra_sor_resume(struct device *dev) +{ + struct tegra_sor *sor = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + return err; + } + + usleep_range(1000, 2000); + + if (sor->rst) { + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + clk_disable_unprepare(sor->clk); + return err; + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops tegra_sor_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL) +}; + struct platform_driver tegra_sor_driver = { .driver = { .name = "tegra-sor", .of_match_table = tegra_sor_of_match, + .pm = &tegra_sor_pm_ops, }, .probe = tegra_sor_probe, .remove = tegra_sor_remove, diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h index 2d31d027e3f6..865c73b48968 100644 --- a/drivers/gpu/drm/tegra/sor.h +++ b/drivers/gpu/drm/tegra/sor.h @@ -27,6 +27,9 @@ #define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17) #define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17) #define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17) +#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17) +#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17) +#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17) #define SOR_STATE_ASY_VSYNCPOL (1 << 13) #define SOR_STATE_ASY_HSYNCPOL (1 << 12) #define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8) diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index f60a1ec84fa4..28fed7e206d0 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -2,7 +2,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 79027b1c64d3..107c8bd04f6d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); - drm_handle_vblank(dev, 0); + drm_crtc_handle_vblank(crtc); if (!skip_event) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 709bc903524d..d27809372d54 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = { .load = tilcdc_load, .unload = tilcdc_unload, .lastclose = tilcdc_lastclose, - .set_busid = drm_platform_set_busid, .irq_handler = tilcdc_irq, .irq_preinstall = tilcdc_irq_preinstall, .irq_postinstall = tilcdc_irq_postinstall, @@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = tilcdc_enable_vblank, .disable_vblank = tilcdc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a71cf98c655f..4054d804fe06 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); - - if (bo->ttm) - ttm_tt_destroy(bo->ttm); + ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); + fence_put(bo->moving); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -360,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { @@ -396,8 +396,7 @@ moved: out_err: new_man = &bdev->man[bo->mem.mem_type]; - if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { - ttm_tt_unbind(bo->ttm); + if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -418,11 +417,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, NULL); - if (bo->ttm) { - ttm_tt_unbind(bo->ttm); - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; ttm_bo_mem_put(bo, &bo->mem); ww_mutex_unlock (&bo->resv->lock); @@ -688,15 +684,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); - - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) { - pr_err("Failed to expire sync object before buffer eviction\n"); - } - goto out; - } - lockdep_assert_held(&bo->resv->lock.base); evict_mem = bo->mem; @@ -720,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait_gpu); - if (ret) { + if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_bo_mem_put(bo, &evict_mem); @@ -800,6 +787,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) EXPORT_SYMBOL(ttm_bo_mem_put); /** + * Add the last move fence to the BO and reserve a new shared slot. + */ +static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, + struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct fence *fence; + int ret; + + spin_lock(&man->move_lock); + fence = fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + reservation_object_add_shared_fence(bo->resv, fence); + + ret = reservation_object_reserve_shared(bo->resv); + if (unlikely(ret)) + return ret; + + fence_put(bo->moving); + bo->moving = fence; + } + + return 0; +} + +/** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ @@ -825,10 +840,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, if (unlikely(ret != 0)) return ret; } while (1); - if (mem->mm_node == NULL) - return -ENOMEM; mem->mem_type = mem_type; - return 0; + return ttm_bo_add_move_fence(bo, man, mem); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -898,6 +911,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool has_erestartsys = false; int i, ret; + ret = reservation_object_reserve_shared(bo->resv); + if (unlikely(ret)) + return ret; + mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; @@ -931,9 +948,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret)) return ret; - - if (mem->mm_node) + + if (mem->mm_node) { + ret = ttm_bo_add_move_fence(bo, man, mem); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + return ret; + } break; + } } if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { @@ -1000,20 +1023,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, lockdep_assert_held(&bo->resv->lock.base); - /* - * Don't wait for the BO on initial allocation. This is important when - * the BO has an imported reservation object. - */ - if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) { - /* - * FIXME: It's possible to pipeline buffer moves. - * Have the driver move function wait for idle when necessary, - * instead of doing it here. - */ - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); - if (ret) - return ret; - } mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; @@ -1166,7 +1175,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; - bo->priv_flags = 0; + bo->moving = NULL; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; @@ -1278,6 +1287,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; + struct fence *fence; int ret; /* @@ -1298,6 +1308,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); } spin_unlock(&glob->lru_lock); + + spin_lock(&man->move_lock); + fence = fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + ret = fence_wait(fence, false); + fence_put(fence); + if (ret) { + if (allow_errors) { + return ret; + } else { + pr_err("Cleanup eviction failed\n"); + } + } + } + return 0; } @@ -1317,6 +1344,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } + fence_put(man->move); man->use_type = false; man->has_type = false; @@ -1362,6 +1390,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->io_reserve_fastpath = true; man->use_io_reserve_lru = false; mutex_init(&man->io_reserve_mutex); + spin_lock_init(&man->move_lock); INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); @@ -1380,6 +1409,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->size = p_size; INIT_LIST_HEAD(&man->lru); + man->move = NULL; return 0; } @@ -1573,47 +1603,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { - struct reservation_object_list *fobj; - struct reservation_object *resv; - struct fence *excl; - long timeout = 15 * HZ; - int i; - - resv = bo->resv; - fobj = reservation_object_get_list(resv); - excl = reservation_object_get_excl(resv); - if (excl) { - if (!fence_is_signaled(excl)) { - if (no_wait) - return -EBUSY; - - timeout = fence_wait_timeout(excl, - interruptible, timeout); - } - } - - for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { - struct fence *fence; - fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(resv)); - - if (!fence_is_signaled(fence)) { - if (no_wait) - return -EBUSY; - - timeout = fence_wait_timeout(fence, - interruptible, timeout); - } - } + long timeout = no_wait ? 0 : 15 * HZ; + timeout = reservation_object_wait_timeout_rcu(bo->resv, true, + interruptible, timeout); if (timeout < 0) return timeout; if (timeout == 0) return -EBUSY; - reservation_object_add_excl_fence(resv, NULL); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + reservation_object_add_excl_fence(bo->resv, NULL); return 0; } EXPORT_SYMBOL(ttm_bo_wait); @@ -1683,14 +1683,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ttm_bo_list_ref_sub(bo, put_count, true); /** - * Wait for GPU, then move to system cached. + * Move to system cached */ - ret = ttm_bo_wait(bo, false, false); - - if (unlikely(ret != 0)) - goto out; - if ((bo->mem.placement & swap_placement) != swap_placement) { struct ttm_mem_reg evict_mem; @@ -1705,6 +1700,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) goto out; } + /** + * Make sure BO is idle. + */ + + ret = ttm_bo_wait(bo, false, false); + if (unlikely(ret != 0)) + goto out; + ttm_bo_unmap_virtual(bo); /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d9831559706e..2df602a35f92 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -321,7 +321,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, bool interruptible, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -337,6 +338,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, unsigned long add = 0; int dir; + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); if (ret) return ret; @@ -401,8 +406,7 @@ out2: *old_mem = *new_mem; new_mem->mm_node = NULL; - if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { - ttm_tt_unbind(ttm); + if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(ttm); bo->ttm = NULL; } @@ -462,6 +466,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); + fbo->moving = NULL; drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); @@ -634,7 +639,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct fence *fence, bool evict, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -649,9 +653,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && - (bo->ttm != NULL)) { - ttm_tt_unbind(bo->ttm); + if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -665,7 +667,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * operation has completed. */ - set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + fence_put(bo->moving); + bo->moving = fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -694,3 +697,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, return 0; } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); + +int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, + struct fence *fence, bool evict, + struct ttm_mem_reg *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_reg *old_mem = &bo->mem; + + struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; + struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; + + int ret; + + reservation_object_add_excl_fence(bo->resv, fence); + + if (!evict) { + struct ttm_buffer_object *ghost_obj; + + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ + + fence_put(bo->moving); + bo->moving = fence_get(fence); + + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; + + reservation_object_add_excl_fence(ghost_obj->resv, fence); + + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ + + if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; + + ttm_bo_unreserve(ghost_obj); + ttm_bo_unref(&ghost_obj); + + } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { + + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + + spin_lock(&from->move_lock); + if (!from->move || fence_is_later(fence, from->move)) { + fence_put(from->move); + from->move = fence_get(fence); + } + spin_unlock(&from->move_lock); + + ttm_bo_free_old_node(bo); + + fence_put(bo->moving); + bo->moving = fence_get(fence); + + } else { + /** + * Last resort, wait for the move to be completed. + * + * Should never happen in pratice. + */ + + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + + if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + } + ttm_bo_free_old_node(bo); + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + + return 0; +} +EXPORT_SYMBOL(ttm_bo_pipeline_move); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 3216878bced3..a6ed9d5e5167 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, { int ret = 0; - if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) + if (likely(!bo->moving)) goto out_unlock; /* * Quick non-stalling check for idle. */ - ret = ttm_bo_wait(bo, false, true); - if (likely(ret == 0)) - goto out_unlock; + if (fence_is_signaled(bo->moving)) + goto out_clear; /* * If possible, avoid waiting for GPU with mmap_sem @@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; up_read(&vma->vm_mm->mmap_sem); - (void) ttm_bo_wait(bo, true, false); + (void) fence_wait(bo->moving, true); goto out_unlock; } /* * Ordinary wait. */ - ret = ttm_bo_wait(bo, true, false); - if (unlikely(ret != 0)) + ret = fence_wait(bo->moving, true); + if (unlikely(ret != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; + goto out_unlock; + } + +out_clear: + fence_put(bo->moving); + bo->moving = NULL; out_unlock: return ret; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 077ae9b2865d..bc5aa573f466 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -166,12 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); void ttm_tt_destroy(struct ttm_tt *ttm) { - if (unlikely(ttm == NULL)) + if (ttm == NULL) return; - if (ttm->state == tt_bound) { - ttm_tt_unbind(ttm); - } + ttm_tt_unbind(ttm); if (ttm->state == tt_unbound) ttm_tt_unpopulate(ttm); @@ -298,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = shmem_read_mapping_page(swap_space, i); @@ -347,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) } else swap_storage = persistent_swap_storage; - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 613ab0622d6e..1616ec4f4d84 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -4,12 +4,7 @@ config DRM_UDL depends on USB_SUPPORT depends on USB_ARCH_HAS_HCD select USB - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - select FB_DEFERRED_IO select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER help This is a KMS driver for the USB displaylink video adapters. Say M/Y to add support for these devices via drm/kms interfaces. diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index c20408940cd0..17d34e0edbdd 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface) struct drm_device *dev = usb_get_intfdata(interface); drm_kms_helper_poll_disable(dev); - drm_connector_unregister_all(dev); udl_fbdev_unplug(dev); udl_drop_usb(dev); drm_unplug_dev(dev); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index b87afee44995..f92ea9579674 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); if (event) - drm_send_vblank_event(dev, 0, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); crtc->primary->fb = fb; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index e5a9d3aaf45f..59adcf8532dd 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev) /* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it. - * - * Note that this is called with the struct_mutex held. */ void vc4_free_object(struct drm_gem_object *gem_bo) { diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 0f18b76c7906..8fc2b731b59a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -46,12 +46,17 @@ struct vc4_crtc { const struct vc4_crtc_data *data; void __iomem *regs; + /* Timestamp at start of vblank irq - unaffected by lock delays. */ + ktime_t t_vblank; + /* Which HVS channel we're using for our CRTC. */ int channel; u8 lut_r[256]; u8 lut_g[256]; u8 lut_b[256]; + /* Size in pixels of the COB memory allocated to this CRTC. */ + u32 cob_size; struct drm_pending_vblank_event *event; }; @@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused) } #endif +int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; + u32 val; + int fifo_lines; + int vblank_lines; + int ret = 0; + + /* + * XXX Doesn't work well in interlaced mode yet, partially due + * to problems in vc4 kms or drm core interlaced mode handling, + * so disable for now in interlaced mode. + */ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return ret; + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + + /* Get optional system timestamp before query. */ + if (stime) + *stime = ktime_get(); + + /* + * Read vertical scanline which is currently composed for our + * pixelvalve by the HVS, and also the scaler status. + */ + val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel)); + + /* Get optional system timestamp after query. */ + if (etime) + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + + /* Vertical position of hvs composed scanline. */ + *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE); + + /* No hpos info available. */ + if (hpos) + *hpos = 0; + + /* This is the offset we need for translating hvs -> pv scanout pos. */ + fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay; + + if (fifo_lines > 0) + ret |= DRM_SCANOUTPOS_VALID; + + /* HVS more than fifo_lines into frame for compositing? */ + if (*vpos > fifo_lines) { + /* + * We are in active scanout and can get some meaningful results + * from HVS. The actual PV scanout can not trail behind more + * than fifo_lines as that is the fifo's capacity. Assume that + * in active scanout the HVS and PV work in lockstep wrt. HVS + * refilling the fifo and PV consuming from the fifo, ie. + * whenever the PV consumes and frees up a scanline in the + * fifo, the HVS will immediately refill it, therefore + * incrementing vpos. Therefore we choose HVS read position - + * fifo size in scanlines as a estimate of the real scanout + * position of the PV. + */ + *vpos -= fifo_lines + 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + *vpos /= 2; + + ret |= DRM_SCANOUTPOS_ACCURATE; + return ret; + } + + /* + * Less: This happens when we are in vblank and the HVS, after getting + * the VSTART restart signal from the PV, just started refilling its + * fifo with new lines from the top-most lines of the new framebuffers. + * The PV does not scan out in vblank, so does not remove lines from + * the fifo, so the fifo will be full quickly and the HVS has to pause. + * We can't get meaningful readings wrt. scanline position of the PV + * and need to make things up in a approximative but consistent way. + */ + ret |= DRM_SCANOUTPOS_IN_VBLANK; + vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay; + + if (flags & DRM_CALLED_FROM_VBLIRQ) { + /* + * Assume the irq handler got called close to first + * line of vblank, so PV has about a full vblank + * scanlines to go, and as a base timestamp use the + * one taken at entry into vblank irq handler, so it + * is not affected by random delays due to lock + * contention on event_lock or vblank_time lock in + * the core. + */ + *vpos = -vblank_lines; + + if (stime) + *stime = vc4_crtc->t_vblank; + if (etime) + *etime = vc4_crtc->t_vblank; + + /* + * If the HVS fifo is not yet full then we know for certain + * we are at the very beginning of vblank, as the hvs just + * started refilling, and the stime and etime timestamps + * truly correspond to start of vblank. + */ + if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL) + ret |= DRM_SCANOUTPOS_ACCURATE; + } else { + /* + * No clue where we are inside vblank. Return a vpos of zero, + * which will cause calling code to just return the etime + * timestamp uncorrected. At least this is no worse than the + * standard fallback. + */ + *vpos = 0; + } + + return ret; +} + +int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id, + int *max_error, struct timeval *vblank_time, + unsigned flags) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; + struct drm_crtc *crtc = &vc4_crtc->base; + struct drm_crtc_state *state = crtc->state; + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error, + vblank_time, flags, + &state->adjusted_mode); +} + static void vc4_crtc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); @@ -175,20 +318,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); } -static void +static int vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); u32 i; - for (i = start; i < start + size; i++) { + for (i = 0; i < size; i++) { vc4_crtc->lut_r[i] = r[i] >> 8; vc4_crtc->lut_g[i] = g[i] >> 8; vc4_crtc->lut_b[i] = b[i] >> 8; } vc4_crtc_lut_load(crtc); + + return 0; } static u32 vc4_get_fifo_full_level(u32 format) @@ -395,6 +540,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; unsigned long flags; + const struct drm_plane_state *plane_state; u32 dlist_count = 0; int ret; @@ -404,18 +550,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (hweight32(state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *plane_state = - state->state->plane_states[drm_plane_index(plane)]; - - /* plane might not have changed, in which case take - * current state: - */ - if (!plane_state) - plane_state = plane->state; - + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) dlist_count += vc4_plane_dlist_size(plane_state); - } dlist_count++; /* Account for SCALER_CTL0_END. */ @@ -526,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) irqreturn_t ret = IRQ_NONE; if (stat & PV_INT_VFP_START) { + vc4_crtc->t_vblank = ktime_get(); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); drm_crtc_handle_vblank(&vc4_crtc->base); vc4_crtc_handle_page_flip(vc4_crtc); @@ -730,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, } } +static void +vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc) +{ + struct drm_device *drm = vc4_crtc->base.dev; + struct vc4_dev *vc4 = to_vc4_dev(drm); + u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel)); + /* Top/base are supposed to be 4-pixel aligned, but the + * Raspberry Pi firmware fills the low bits (which are + * presumably ignored). + */ + u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; + u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; + + vc4_crtc->cob_size = top - base + 4; +} + static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -806,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) crtc->cursor = cursor_plane; } + vc4_crtc_get_cob_allocation(vc4_crtc); + CRTC_WRITE(PV_INTEN, 0); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); ret = devm_request_irq(dev, platform_get_irq(pdev, 0), diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 9817dbfa4ac3..275fedbdbd9e 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -vc4_dpi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_dpi_connector *dpi_connector = - to_vc4_dpi_connector(connector); - return dpi_connector->encoder; -} - static const struct drm_connector_funcs vc4_dpi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_dpi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { .get_modes = vc4_dpi_connector_get_modes, - .best_encoder = vc4_dpi_connector_best_encoder, }; static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, @@ -236,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, { struct drm_connector *connector = NULL; struct vc4_dpi_connector *dpi_connector; - int ret = 0; dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector), GFP_KERNEL); - if (!dpi_connector) { - ret = -ENOMEM; - goto fail; - } + if (!dpi_connector) + return ERR_PTR(-ENOMEM); + connector = &dpi_connector->base; dpi_connector->encoder = dpi->encoder; @@ -260,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, drm_mode_connector_attach_encoder(connector, dpi->encoder); return connector; - - fail: - if (connector) - vc4_dpi_connector_destroy(connector); - - return ERR_PTR(ret); } static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = { diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 250ed7e3754c..8b42d31a7f0e 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include "drm_fb_cma_helper.h" #include "uapi/drm/vc4_drm.h" @@ -43,12 +44,54 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index) return map; } +static int vc4_get_param_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_vc4_get_param *args = data; + int ret; + + if (args->pad != 0) + return -EINVAL; + + switch (args->param) { + case DRM_VC4_PARAM_V3D_IDENT0: + ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); + if (ret) + return ret; + args->value = V3D_READ(V3D_IDENT0); + pm_runtime_put(&vc4->v3d->pdev->dev); + break; + case DRM_VC4_PARAM_V3D_IDENT1: + ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); + if (ret) + return ret; + args->value = V3D_READ(V3D_IDENT1); + pm_runtime_put(&vc4->v3d->pdev->dev); + break; + case DRM_VC4_PARAM_V3D_IDENT2: + ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); + if (ret) + return ret; + args->value = V3D_READ(V3D_IDENT2); + pm_runtime_put(&vc4->v3d->pdev->dev); + break; + case DRM_VC4_PARAM_SUPPORTS_BRANCHES: + args->value = true; + break; + default: + DRM_DEBUG("Unknown parameter %d\n", args->param); + return -EINVAL; + } + + return 0; +} + static void vc4_lastclose(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (vc4->fbdev) - drm_fbdev_cma_restore_mode(vc4->fbdev); + drm_fbdev_cma_restore_mode(vc4->fbdev); } static const struct file_operations vc4_drm_fops = { @@ -74,6 +117,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver vc4_drm_driver = { @@ -92,6 +136,8 @@ static struct drm_driver vc4_drm_driver = { .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, .get_vblank_counter = drm_vblank_no_hw_counter, + .get_scanout_position = vc4_crtc_get_scanoutpos, + .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp, #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, @@ -99,7 +145,7 @@ static struct drm_driver vc4_drm_driver = { #endif .gem_create_object = vc4_create_object, - .gem_free_object = vc4_free_object, + .gem_free_object_unlocked = vc4_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -176,7 +222,6 @@ static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm; - struct drm_connector *connector; struct vc4_dev *vc4; int ret = 0; @@ -196,8 +241,6 @@ static int vc4_drm_bind(struct device *dev) vc4_bo_cache_init(drm); drm_mode_config_init(drm); - if (ret) - goto unref; vc4_gem_init(drm); @@ -211,27 +254,14 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto unbind_all; - /* Connector registration has to occur after DRM device - * registration, because it creates sysfs entries based on the - * DRM device. - */ - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_connector_register(connector); - if (ret) - goto unregister; - } - vc4_kms_load(drm); return 0; -unregister: - drm_dev_unregister(drm); unbind_all: component_unbind_all(dev, drm); gem_destroy: vc4_gem_destroy(drm); -unref: drm_dev_unref(drm); vc4_bo_cache_destroy(drm); return ret; @@ -259,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = { static struct platform_driver *const component_drivers[] = { &vc4_hdmi_driver, &vc4_dpi_driver, - &vc4_crtc_driver, &vc4_hvs_driver, + &vc4_crtc_driver, &vc4_v3d_driver, }; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 37cac59401d7..489e3de0c050 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -355,6 +355,9 @@ struct vc4_validated_shader_info { uint32_t uniforms_src_size; uint32_t num_texture_samples; struct vc4_texture_sample_info *texture_samples; + + uint32_t num_uniform_addr_offsets; + uint32_t *uniform_addr_offsets; }; /** @@ -415,6 +418,13 @@ extern struct platform_driver vc4_crtc_driver; int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); +int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); +int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id, + int *max_error, struct timeval *vblank_time, + unsigned flags); /* vc4_debugfs.c */ int vc4_debugfs_init(struct drm_minor *minor); @@ -469,7 +479,7 @@ int vc4_kms_load(struct drm_device *dev); struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type); u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); -u32 vc4_plane_dlist_size(struct drm_plane_state *state); +u32 vc4_plane_dlist_size(const struct drm_plane_state *state); void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 46899d6de675..6155e8aca1c6 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) { unsigned int i; - mutex_lock(&dev->struct_mutex); for (i = 0; i < state->user_state.bo_count; i++) - drm_gem_object_unreference(state->bo[i]); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(state->bo[i]); kfree(state); } @@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned i; - /* Need the struct lock for drm_gem_object_unreference(). */ - mutex_lock(&dev->struct_mutex); if (exec->bo) { for (i = 0; i < exec->bo_count; i++) - drm_gem_object_unreference(&exec->bo[i]->base); + drm_gem_object_unreference_unlocked(&exec->bo[i]->base); kfree(exec->bo); } @@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); - drm_gem_object_unreference(&bo->base.base); + drm_gem_object_unreference_unlocked(&bo->base.base); } - mutex_unlock(&dev->struct_mutex); mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fd2644d231ff..4452f3631cac 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder * -vc4_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_hdmi_connector *hdmi_connector = - to_vc4_hdmi_connector(connector); - return hdmi_connector->encoder; -} - static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_hdmi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { .get_modes = vc4_hdmi_connector_get_modes, - .best_encoder = vc4_hdmi_connector_best_encoder, }; static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, @@ -465,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(hdmi->hd_regs)) return PTR_ERR(hdmi->hd_regs); - ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); - if (!ddc_node) { - DRM_ERROR("Failed to find ddc node in device tree\n"); - return -ENODEV; - } - hdmi->pixel_clock = devm_clk_get(dev, "pixel"); if (IS_ERR(hdmi->pixel_clock)) { DRM_ERROR("Failed to get pixel clock\n"); @@ -482,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(hdmi->hsm_clock); } + ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); + if (!ddc_node) { + DRM_ERROR("Failed to find ddc node in device tree\n"); + return -ENODEV; + } + hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); if (!hdmi->ddc) { DRM_DEBUG("Failed to get ddc i2c adapter by node\n"); return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 861a623bc185..4ac894d993cd 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (vc4->fbdev) - drm_fbdev_cma_hotplug_event(vc4->fbdev); + drm_fbdev_cma_hotplug_event(vc4->fbdev); } struct vc4_commit { @@ -111,6 +110,8 @@ static int vc4_atomic_commit(struct drm_device *dev, int i; uint64_t wait_seqno = 0; struct vc4_commit *c; + struct drm_plane *plane; + struct drm_plane_state *new_state; c = commit_init(state); if (!c) @@ -138,13 +139,7 @@ static int vc4_atomic_commit(struct drm_device *dev, return ret; } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - + for_each_plane_in_state(state, plane, new_state, i) { if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(new_state->fb, 0); @@ -160,7 +155,7 @@ static int vc4_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 4037b52fde31..29e4b400e25e 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -94,6 +94,14 @@ static const struct hvs_format { .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true, }, { + .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true, + }, + { + .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false, + }, + { .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false, }, @@ -690,9 +698,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) return vc4_state->dlist_count; } -u32 vc4_plane_dlist_size(struct drm_plane_state *state) +u32 vc4_plane_dlist_size(const struct drm_plane_state *state) { - struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + const struct vc4_plane_state *vc4_state = + container_of(state, typeof(*vc4_state), base); return vc4_state->dlist_count; } diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h index d5c2f3c85ebb..f4e795a0d3f6 100644 --- a/drivers/gpu/drm/vc4/vc4_qpu_defines.h +++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h @@ -70,7 +70,7 @@ enum qpu_raddr { QPU_R_ELEM_QPU = 38, QPU_R_NOP, QPU_R_XY_PIXEL_COORD = 41, - QPU_R_MS_REV_FLAGS = 41, + QPU_R_MS_REV_FLAGS = 42, QPU_R_VPM = 48, QPU_R_VPM_LD_BUSY, QPU_R_VPM_LD_WAIT, @@ -230,6 +230,15 @@ enum qpu_unpack_r4 { #define QPU_COND_MUL_SHIFT 46 #define QPU_COND_MUL_MASK QPU_MASK(48, 46) +#define QPU_BRANCH_COND_SHIFT 52 +#define QPU_BRANCH_COND_MASK QPU_MASK(55, 52) + +#define QPU_BRANCH_REL ((uint64_t)1 << 51) +#define QPU_BRANCH_REG ((uint64_t)1 << 50) + +#define QPU_BRANCH_RADDR_A_SHIFT 45 +#define QPU_BRANCH_RADDR_A_MASK QPU_MASK(49, 45) + #define QPU_SF ((uint64_t)1 << 45) #define QPU_WADDR_ADD_SHIFT 38 @@ -261,4 +270,10 @@ enum qpu_unpack_r4 { #define QPU_OP_ADD_SHIFT 24 #define QPU_OP_ADD_MASK QPU_MASK(28, 24) +#define QPU_LOAD_IMM_SHIFT 0 +#define QPU_LOAD_IMM_MASK QPU_MASK(31, 0) + +#define QPU_BRANCH_TARGET_SHIFT 0 +#define QPU_BRANCH_TARGET_MASK QPU_MASK(31, 0) + #endif /* VC4_QPU_DEFINES_H */ diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index f99eece4cc97..160942a9180e 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -366,7 +366,6 @@ # define SCALER_DISPBKGND_FILL BIT(24) #define SCALER_DISPSTAT0 0x00000048 -#define SCALER_DISPBASE0 0x0000004c # define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30) # define SCALER_DISPSTATX_MODE_SHIFT 30 # define SCALER_DISPSTATX_MODE_DISABLED 0 @@ -375,6 +374,24 @@ # define SCALER_DISPSTATX_MODE_EOF 3 # define SCALER_DISPSTATX_FULL BIT(29) # define SCALER_DISPSTATX_EMPTY BIT(28) +# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12) +# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12 +# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0) +# define SCALER_DISPSTATX_LINE_SHIFT 0 + +#define SCALER_DISPBASE0 0x0000004c +/* Last pixel in the COB (display FIFO memory) allocated to this HVS + * channel. Must be 4-pixel aligned (and thus 4 pixels less than the + * next COB base). + */ +# define SCALER_DISPBASEX_TOP_MASK VC4_MASK(31, 16) +# define SCALER_DISPBASEX_TOP_SHIFT 16 +/* First pixel in the COB (display FIFO memory) allocated to this HVS + * channel. Must be 4-pixel aligned. + */ +# define SCALER_DISPBASEX_BASE_MASK VC4_MASK(15, 0) +# define SCALER_DISPBASEX_BASE_SHIFT 0 + #define SCALER_DISPCTRL1 0x00000050 #define SCALER_DISPBKGND1 0x00000054 #define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \ @@ -385,6 +402,9 @@ (x) * (SCALER_DISPSTAT1 - \ SCALER_DISPSTAT0)) #define SCALER_DISPBASE1 0x0000005c +#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \ + (x) * (SCALER_DISPBASE1 - \ + SCALER_DISPBASE0)) #define SCALER_DISPCTRL2 0x00000060 #define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \ (x) * (SCALER_DISPCTRL1 - \ diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 24c2c746e8f3..9ce1d0adf882 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev, uint32_t src_offset = *(uint32_t *)(pkt_u + o); uint32_t *texture_handles_u; void *uniform_data_u; - uint32_t tex; + uint32_t tex, uni; *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; @@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev, } } + /* Fill in the uniform slots that need this shader's + * start-of-uniforms address (used for resetting the uniform + * stream in the presence of control flow). + */ + for (uni = 0; + uni < validated_shader->num_uniform_addr_offsets; + uni++) { + uint32_t o = validated_shader->uniform_addr_offsets[uni]; + ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p; + } + *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p; exec->uniforms_u += validated_shader->uniforms_src_size; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index f67124b4c534..46527e989ce3 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -39,7 +39,17 @@ #include "vc4_drv.h" #include "vc4_qpu_defines.h" +#define LIVE_REG_COUNT (32 + 32 + 4) + struct vc4_shader_validation_state { + /* Current IP being validated. */ + uint32_t ip; + + /* IP at the end of the BO, do not read shader[max_ip] */ + uint32_t max_ip; + + uint64_t *shader; + struct vc4_texture_sample_info tmu_setup[2]; int tmu_write_count[2]; @@ -49,8 +59,30 @@ struct vc4_shader_validation_state { * * This is used for the validation of direct address memory reads. */ - uint32_t live_min_clamp_offsets[32 + 32 + 4]; - bool live_max_clamp_regs[32 + 32 + 4]; + uint32_t live_min_clamp_offsets[LIVE_REG_COUNT]; + bool live_max_clamp_regs[LIVE_REG_COUNT]; + uint32_t live_immediates[LIVE_REG_COUNT]; + + /* Bitfield of which IPs are used as branch targets. + * + * Used for validation that the uniform stream is updated at the right + * points and clearing the texturing/clamping state. + */ + unsigned long *branch_targets; + + /* Set when entering a basic block, and cleared when the uniform + * address update is found. This is used to make sure that we don't + * read uniforms when the address is undefined. + */ + bool needs_uniform_address_update; + + /* Set when we find a backwards branch. If the branch is backwards, + * the taraget is probably doing an address reset to read uniforms, + * and so we need to be sure that a uniforms address is present in the + * stream, even if the shader didn't need to read uniforms in later + * basic blocks. + */ + bool needs_uniform_address_for_loop; }; static uint32_t @@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader, } static bool -check_tmu_write(uint64_t inst, - struct vc4_validated_shader_info *validated_shader, +check_tmu_write(struct vc4_validated_shader_info *validated_shader, struct vc4_shader_validation_state *validation_state, bool is_mul) { + uint64_t inst = validation_state->shader[validation_state->ip]; uint32_t waddr = (is_mul ? QPU_GET_FIELD(inst, QPU_WADDR_MUL) : QPU_GET_FIELD(inst, QPU_WADDR_ADD)); @@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst, return false; } - /* We assert that the the clamped address is the first + /* We assert that the clamped address is the first * argument, and the UBO base address is the second argument. * This is arbitrary, but simpler than supporting flipping the * two either way. @@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst, /* Since direct uses a RADDR uniform reference, it will get counted in * check_instruction_reads() */ - if (!is_direct) + if (!is_direct) { + if (validation_state->needs_uniform_address_update) { + DRM_ERROR("Texturing with undefined uniform address\n"); + return false; + } + validated_shader->uniforms_size += 4; + } if (submit) { if (!record_texture_sample(validated_shader, @@ -227,23 +265,138 @@ check_tmu_write(uint64_t inst, return true; } +static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader) +{ + uint32_t o = validated_shader->num_uniform_addr_offsets; + uint32_t num_uniforms = validated_shader->uniforms_size / 4; + + validated_shader->uniform_addr_offsets = + krealloc(validated_shader->uniform_addr_offsets, + (o + 1) * + sizeof(*validated_shader->uniform_addr_offsets), + GFP_KERNEL); + if (!validated_shader->uniform_addr_offsets) + return false; + + validated_shader->uniform_addr_offsets[o] = num_uniforms; + validated_shader->num_uniform_addr_offsets++; + + return true; +} + static bool -check_reg_write(uint64_t inst, - struct vc4_validated_shader_info *validated_shader, +validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state, + bool is_mul) +{ + uint64_t inst = validation_state->shader[validation_state->ip]; + u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B); + u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); + u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); + u32 add_lri = raddr_add_a_to_live_reg_index(inst); + /* We want our reset to be pointing at whatever uniform follows the + * uniforms base address. + */ + u32 expected_offset = validated_shader->uniforms_size + 4; + + /* We only support absolute uniform address changes, and we + * require that they be in the current basic block before any + * of its uniform reads. + * + * One could potentially emit more efficient QPU code, by + * noticing that (say) an if statement does uniform control + * flow for all threads and that the if reads the same number + * of uniforms on each side. However, this scheme is easy to + * validate so it's all we allow for now. + */ + + if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) { + DRM_ERROR("uniforms address change must be " + "normal math\n"); + return false; + } + + if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { + DRM_ERROR("Uniform address reset must be an ADD.\n"); + return false; + } + + if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { + DRM_ERROR("Uniform address reset must be unconditional.\n"); + return false; + } + + if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && + !(inst & QPU_PM)) { + DRM_ERROR("No packing allowed on uniforms reset\n"); + return false; + } + + if (add_lri == -1) { + DRM_ERROR("First argument of uniform address write must be " + "an immediate value.\n"); + return false; + } + + if (validation_state->live_immediates[add_lri] != expected_offset) { + DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", + validation_state->live_immediates[add_lri], + expected_offset); + return false; + } + + if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && + !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { + DRM_ERROR("Second argument of uniform address write must be " + "a uniform.\n"); + return false; + } + + validation_state->needs_uniform_address_update = false; + validation_state->needs_uniform_address_for_loop = false; + return require_uniform_address_uniform(validated_shader); +} + +static bool +check_reg_write(struct vc4_validated_shader_info *validated_shader, struct vc4_shader_validation_state *validation_state, bool is_mul) { + uint64_t inst = validation_state->shader[validation_state->ip]; uint32_t waddr = (is_mul ? QPU_GET_FIELD(inst, QPU_WADDR_MUL) : QPU_GET_FIELD(inst, QPU_WADDR_ADD)); + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + bool ws = inst & QPU_WS; + bool is_b = is_mul ^ ws; + u32 lri = waddr_to_live_reg_index(waddr, is_b); + + if (lri != -1) { + uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD); + uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL); + + if (sig == QPU_SIG_LOAD_IMM && + QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP && + ((is_mul && cond_mul == QPU_COND_ALWAYS) || + (!is_mul && cond_add == QPU_COND_ALWAYS))) { + validation_state->live_immediates[lri] = + QPU_GET_FIELD(inst, QPU_LOAD_IMM); + } else { + validation_state->live_immediates[lri] = ~0; + } + } switch (waddr) { case QPU_W_UNIFORMS_ADDRESS: - /* XXX: We'll probably need to support this for reladdr, but - * it's definitely a security-related one. - */ - DRM_ERROR("uniforms address load unsupported\n"); - return false; + if (is_b) { + DRM_ERROR("relative uniforms address change " + "unsupported\n"); + return false; + } + + return validate_uniform_address_write(validated_shader, + validation_state, + is_mul); case QPU_W_TLB_COLOR_MS: case QPU_W_TLB_COLOR_ALL: @@ -261,7 +414,7 @@ check_reg_write(uint64_t inst, case QPU_W_TMU1_T: case QPU_W_TMU1_R: case QPU_W_TMU1_B: - return check_tmu_write(inst, validated_shader, validation_state, + return check_tmu_write(validated_shader, validation_state, is_mul); case QPU_W_HOST_INT: @@ -294,10 +447,10 @@ check_reg_write(uint64_t inst, } static void -track_live_clamps(uint64_t inst, - struct vc4_validated_shader_info *validated_shader, +track_live_clamps(struct vc4_validated_shader_info *validated_shader, struct vc4_shader_validation_state *validation_state) { + uint64_t inst = validation_state->shader[validation_state->ip]; uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD); uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); @@ -369,10 +522,10 @@ track_live_clamps(uint64_t inst, } static bool -check_instruction_writes(uint64_t inst, - struct vc4_validated_shader_info *validated_shader, +check_instruction_writes(struct vc4_validated_shader_info *validated_shader, struct vc4_shader_validation_state *validation_state) { + uint64_t inst = validation_state->shader[validation_state->ip]; uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); bool ok; @@ -382,20 +535,44 @@ check_instruction_writes(uint64_t inst, return false; } - ok = (check_reg_write(inst, validated_shader, validation_state, - false) && - check_reg_write(inst, validated_shader, validation_state, - true)); + ok = (check_reg_write(validated_shader, validation_state, false) && + check_reg_write(validated_shader, validation_state, true)); - track_live_clamps(inst, validated_shader, validation_state); + track_live_clamps(validated_shader, validation_state); return ok; } static bool -check_instruction_reads(uint64_t inst, - struct vc4_validated_shader_info *validated_shader) +check_branch(uint64_t inst, + struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state, + int ip) +{ + int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET); + uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); + uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); + + if ((int)branch_imm < 0) + validation_state->needs_uniform_address_for_loop = true; + + /* We don't want to have to worry about validation of this, and + * there's no need for it. + */ + if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { + DRM_ERROR("branch instruction at %d wrote a register.\n", + validation_state->ip); + return false; + } + + return true; +} + +static bool +check_instruction_reads(struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state) { + uint64_t inst = validation_state->shader[validation_state->ip]; uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); @@ -407,40 +584,204 @@ check_instruction_reads(uint64_t inst, * already be OOM. */ validated_shader->uniforms_size += 4; + + if (validation_state->needs_uniform_address_update) { + DRM_ERROR("Uniform read with undefined uniform " + "address\n"); + return false; + } + } + + return true; +} + +/* Make sure that all branches are absolute and point within the shader, and + * note their targets for later. + */ +static bool +vc4_validate_branches(struct vc4_shader_validation_state *validation_state) +{ + uint32_t max_branch_target = 0; + bool found_shader_end = false; + int ip; + int shader_end_ip = 0; + int last_branch = -2; + + for (ip = 0; ip < validation_state->max_ip; ip++) { + uint64_t inst = validation_state->shader[ip]; + int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET); + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + uint32_t after_delay_ip = ip + 4; + uint32_t branch_target_ip; + + if (sig == QPU_SIG_PROG_END) { + shader_end_ip = ip; + found_shader_end = true; + continue; + } + + if (sig != QPU_SIG_BRANCH) + continue; + + if (ip - last_branch < 4) { + DRM_ERROR("Branch at %d during delay slots\n", ip); + return false; + } + last_branch = ip; + + if (inst & QPU_BRANCH_REG) { + DRM_ERROR("branching from register relative " + "not supported\n"); + return false; + } + + if (!(inst & QPU_BRANCH_REL)) { + DRM_ERROR("relative branching required\n"); + return false; + } + + /* The actual branch target is the instruction after the delay + * slots, plus whatever byte offset is in the low 32 bits of + * the instruction. Make sure we're not branching beyond the + * end of the shader object. + */ + if (branch_imm % sizeof(inst) != 0) { + DRM_ERROR("branch target not aligned\n"); + return false; + } + + branch_target_ip = after_delay_ip + (branch_imm >> 3); + if (branch_target_ip >= validation_state->max_ip) { + DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", + ip, branch_target_ip, + validation_state->max_ip); + return false; + } + set_bit(branch_target_ip, validation_state->branch_targets); + + /* Make sure that the non-branching path is also not outside + * the shader. + */ + if (after_delay_ip >= validation_state->max_ip) { + DRM_ERROR("Branch at %d continues past shader end " + "(%d/%d)\n", + ip, after_delay_ip, validation_state->max_ip); + return false; + } + set_bit(after_delay_ip, validation_state->branch_targets); + max_branch_target = max(max_branch_target, after_delay_ip); + + /* There are two delay slots after program end is signaled + * that are still executed, then we're finished. + */ + if (found_shader_end && ip == shader_end_ip + 2) + break; + } + + if (max_branch_target > shader_end_ip) { + DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); + return false; } return true; } +/* Resets any known state for the shader, used when we may be branched to from + * multiple locations in the program (or at shader start). + */ +static void +reset_validation_state(struct vc4_shader_validation_state *validation_state) +{ + int i; + + for (i = 0; i < 8; i++) + validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0; + + for (i = 0; i < LIVE_REG_COUNT; i++) { + validation_state->live_min_clamp_offsets[i] = ~0; + validation_state->live_max_clamp_regs[i] = false; + validation_state->live_immediates[i] = ~0; + } +} + +static bool +texturing_in_progress(struct vc4_shader_validation_state *validation_state) +{ + return (validation_state->tmu_write_count[0] != 0 || + validation_state->tmu_write_count[1] != 0); +} + +static bool +vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) +{ + uint32_t ip = validation_state->ip; + + if (!test_bit(ip, validation_state->branch_targets)) + return true; + + if (texturing_in_progress(validation_state)) { + DRM_ERROR("Branch target landed during TMU setup\n"); + return false; + } + + /* Reset our live values tracking, since this instruction may have + * multiple predecessors. + * + * One could potentially do analysis to determine that, for + * example, all predecessors have a live max clamp in the same + * register, but we don't bother with that. + */ + reset_validation_state(validation_state); + + /* Since we've entered a basic block from potentially multiple + * predecessors, we need the uniforms address to be updated before any + * unforms are read. We require that after any branch point, the next + * uniform to be loaded is a uniform address offset. That uniform's + * offset will be marked by the uniform address register write + * validation, or a one-off the end-of-program check. + */ + validation_state->needs_uniform_address_update = true; + + return true; +} + struct vc4_validated_shader_info * vc4_validate_shader(struct drm_gem_cma_object *shader_obj) { bool found_shader_end = false; int shader_end_ip = 0; - uint32_t ip, max_ip; - uint64_t *shader; - struct vc4_validated_shader_info *validated_shader; + uint32_t ip; + struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; - int i; memset(&validation_state, 0, sizeof(validation_state)); + validation_state.shader = shader_obj->vaddr; + validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t); - for (i = 0; i < 8; i++) - validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0; - for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++) - validation_state.live_min_clamp_offsets[i] = ~0; + reset_validation_state(&validation_state); - shader = shader_obj->vaddr; - max_ip = shader_obj->base.size / sizeof(uint64_t); + validation_state.branch_targets = + kcalloc(BITS_TO_LONGS(validation_state.max_ip), + sizeof(unsigned long), GFP_KERNEL); + if (!validation_state.branch_targets) + goto fail; validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL); if (!validated_shader) - return NULL; + goto fail; + + if (!vc4_validate_branches(&validation_state)) + goto fail; - for (ip = 0; ip < max_ip; ip++) { - uint64_t inst = shader[ip]; + for (ip = 0; ip < validation_state.max_ip; ip++) { + uint64_t inst = validation_state.shader[ip]; uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + validation_state.ip = ip; + + if (!vc4_handle_branch_target(&validation_state)) + goto fail; + switch (sig) { case QPU_SIG_NONE: case QPU_SIG_WAIT_FOR_SCOREBOARD: @@ -450,13 +791,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) case QPU_SIG_LOAD_TMU1: case QPU_SIG_PROG_END: case QPU_SIG_SMALL_IMM: - if (!check_instruction_writes(inst, validated_shader, + if (!check_instruction_writes(validated_shader, &validation_state)) { DRM_ERROR("Bad write at ip %d\n", ip); goto fail; } - if (!check_instruction_reads(inst, validated_shader)) + if (!check_instruction_reads(validated_shader, + &validation_state)) goto fail; if (sig == QPU_SIG_PROG_END) { @@ -467,13 +809,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) break; case QPU_SIG_LOAD_IMM: - if (!check_instruction_writes(inst, validated_shader, + if (!check_instruction_writes(validated_shader, &validation_state)) { DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); goto fail; } break; + case QPU_SIG_BRANCH: + if (!check_branch(inst, validated_shader, + &validation_state, ip)) + goto fail; + break; default: DRM_ERROR("Unsupported QPU signal %d at " "instruction %d\n", sig, ip); @@ -487,13 +834,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) break; } - if (ip == max_ip) { + if (ip == validation_state.max_ip) { DRM_ERROR("shader failed to terminate before " "shader BO end at %zd\n", shader_obj->base.size); goto fail; } + /* If we did a backwards branch and we haven't emitted a uniforms + * reset since then, we still need the uniforms stream to have the + * uniforms address available so that the backwards branch can do its + * uniforms reset. + * + * We could potentially prove that the backwards branch doesn't + * contain any uses of uniforms until program exit, but that doesn't + * seem to be worth the trouble. + */ + if (validation_state.needs_uniform_address_for_loop) { + if (!require_uniform_address_uniform(validated_shader)) + goto fail; + validated_shader->uniforms_size += 4; + } + /* Again, no chance of integer overflow here because the worst case * scenario is 8 bytes of uniforms plus handles per 8-byte * instruction. @@ -502,9 +864,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) (validated_shader->uniforms_size + 4 * validated_shader->num_texture_samples); + kfree(validation_state.branch_targets); + return validated_shader; fail: + kfree(validation_state.branch_targets); if (validated_shader) { kfree(validated_shader->texture_samples); kfree(validated_shader); diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile index 3f4c7b842028..bfcdea1330e6 100644 --- a/drivers/gpu/drm/vgem/Makefile +++ b/drivers/gpu/drm/vgem/Makefile @@ -1,4 +1,4 @@ ccflags-y := -Iinclude/drm -vgem-y := vgem_drv.o +vgem-y := vgem_drv.o vgem_fence.o obj-$(CONFIG_DRM_VGEM) += vgem.o diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 341f9be3dde6..c15bafb06665 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -42,81 +42,38 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -void vgem_gem_put_pages(struct drm_vgem_gem_object *obj) -{ - drm_gem_put_pages(&obj->base, obj->pages, false, false); - obj->pages = NULL; -} - static void vgem_gem_free_object(struct drm_gem_object *obj) { struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); - drm_gem_free_mmap_offset(obj); - - if (vgem_obj->use_dma_buf && obj->dma_buf) { - dma_buf_put(obj->dma_buf); - obj->dma_buf = NULL; - } - drm_gem_object_release(obj); - - if (vgem_obj->pages) - vgem_gem_put_pages(vgem_obj); - - vgem_obj->pages = NULL; - kfree(vgem_obj); } -int vgem_gem_get_pages(struct drm_vgem_gem_object *obj) -{ - struct page **pages; - - if (obj->pages || obj->use_dma_buf) - return 0; - - pages = drm_gem_get_pages(&obj->base); - if (IS_ERR(pages)) { - return PTR_ERR(pages); - } - - obj->pages = pages; - - return 0; -} - static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_vgem_gem_object *obj = vma->vm_private_data; - loff_t num_pages; - pgoff_t page_offset; - int ret; - /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> - PAGE_SHIFT; - - num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); - - if (page_offset > num_pages) - return VM_FAULT_SIGBUS; - - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, - obj->pages[page_offset]); - switch (ret) { - case 0: - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - case -EBUSY: - return VM_FAULT_RETRY; - case -EFAULT: - case -EINVAL: - return VM_FAULT_SIGBUS; - default: - WARN_ON(1); - return VM_FAULT_SIGBUS; + unsigned long vaddr = (unsigned long)vmf->virtual_address; + struct page *page; + + page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping, + (vaddr - vma->vm_start) >> PAGE_SHIFT); + if (!IS_ERR(page)) { + vmf->page = page; + return 0; + } else switch (PTR_ERR(page)) { + case -ENOSPC: + case -ENOMEM: + return VM_FAULT_OOM; + case -EBUSY: + return VM_FAULT_RETRY; + case -EFAULT: + case -EINVAL: + return VM_FAULT_SIGBUS; + default: + WARN_ON_ONCE(PTR_ERR(page)); + return VM_FAULT_SIGBUS; } } @@ -126,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = { .close = drm_gem_vm_close, }; +static int vgem_open(struct drm_device *dev, struct drm_file *file) +{ + struct vgem_file *vfile; + int ret; + + vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); + if (!vfile) + return -ENOMEM; + + file->driver_priv = vfile; + + ret = vgem_fence_open(vfile); + if (ret) { + kfree(vfile); + return ret; + } + + return 0; +} + +static void vgem_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct vgem_file *vfile = file->driver_priv; + + vgem_fence_close(vfile); + kfree(vfile); +} + /* ioctls */ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, @@ -134,57 +119,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, unsigned long size) { struct drm_vgem_gem_object *obj; - struct drm_gem_object *gem_object; - int err; - - size = roundup(size, PAGE_SIZE); + int ret; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return ERR_PTR(-ENOMEM); - gem_object = &obj->base; - - err = drm_gem_object_init(dev, gem_object, size); - if (err) - goto out; - - err = vgem_gem_get_pages(obj); - if (err) - goto out; - - err = drm_gem_handle_create(file, gem_object, handle); - if (err) - goto handle_out; + ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); + if (ret) + goto err_free; - drm_gem_object_unreference_unlocked(gem_object); + ret = drm_gem_handle_create(file, &obj->base, handle); + drm_gem_object_unreference_unlocked(&obj->base); + if (ret) + goto err; - return gem_object; + return &obj->base; -handle_out: - drm_gem_object_release(gem_object); -out: +err_free: kfree(obj); - return ERR_PTR(err); +err: + return ERR_PTR(ret); } static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct drm_gem_object *gem_object; - uint64_t size; - uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + u64 pitch, size; + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); size = args->height * pitch; if (size == 0) return -EINVAL; gem_object = vgem_gem_create(dev, file, &args->handle, size); - - if (IS_ERR(gem_object)) { - DRM_DEBUG_DRIVER("object creation failed\n"); + if (IS_ERR(gem_object)) return PTR_ERR(gem_object); - } args->size = gem_object->size; args->pitch = pitch; @@ -194,26 +165,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) +static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset) { - int ret = 0; struct drm_gem_object *obj; + int ret; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; + if (!obj->filp) { + ret = -EINVAL; + goto unref; + } + ret = drm_gem_create_mmap_offset(obj); if (ret) goto unref; - BUG_ON(!obj->filp); - - obj->filp->private_data = obj; - *offset = drm_vma_node_offset_addr(&obj->vma_node); - unref: drm_gem_object_unreference_unlocked(obj); @@ -221,26 +192,134 @@ unref: } static struct drm_ioctl_desc vgem_ioctls[] = { + DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; +static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long flags = vma->vm_flags; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + /* Keep the WC mmaping set by drm_gem_mmap() but our pages + * are ordinary and not special. + */ + vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; + return 0; +} + static const struct file_operations vgem_driver_fops = { .owner = THIS_MODULE, .open = drm_open, - .mmap = drm_gem_mmap, + .mmap = vgem_mmap, .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, .release = drm_release, }; +static int vgem_prime_pin(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + + /* Flush the object from the CPU cache so that importers can rely + * on coherent indirect access via the exported dma-address. + */ + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + drm_clflush_pages(pages, n_pages); + drm_gem_put_pages(obj, pages, true, false); + + return 0; +} + +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct sg_table *st; + struct page **pages; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return ERR_CAST(pages); + + st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT); + drm_gem_put_pages(obj, pages, false, false); + + return st; +} + +static void *vgem_prime_vmap(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + void *addr; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return NULL; + + addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + drm_gem_put_pages(obj, pages, false, false); + + return addr; +} + +static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + vunmap(vaddr); +} + +static int vgem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + + if (obj->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!obj->filp) + return -ENODEV; + + ret = obj->filp->f_op->mmap(obj->filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->filp); + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + + return 0; +} + static struct drm_driver vgem_driver = { - .driver_features = DRIVER_GEM, - .gem_free_object = vgem_gem_free_object, + .driver_features = DRIVER_GEM | DRIVER_PRIME, + .open = vgem_open, + .preclose = vgem_preclose, + .gem_free_object_unlocked = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, + .num_ioctls = ARRAY_SIZE(vgem_ioctls), .fops = &vgem_driver_fops, + .dumb_create = vgem_gem_dumb_create, .dumb_map_offset = vgem_gem_dumb_map, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .gem_prime_pin = vgem_prime_pin, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = vgem_prime_get_sg_table, + .gem_prime_vmap = vgem_prime_vmap, + .gem_prime_vunmap = vgem_prime_vunmap, + .gem_prime_mmap = vgem_prime_mmap, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -248,7 +327,7 @@ static struct drm_driver vgem_driver = { .minor = DRIVER_MINOR, }; -struct drm_device *vgem_device; +static struct drm_device *vgem_device; static int __init vgem_init(void) { @@ -260,10 +339,7 @@ static int __init vgem_init(void) goto out; } - drm_dev_set_unique(vgem_device, "vgem"); - ret = drm_dev_register(vgem_device, 0); - if (ret) goto out_unref; @@ -285,5 +361,6 @@ module_init(vgem_init); module_exit(vgem_exit); MODULE_AUTHOR("Red Hat, Inc."); +MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index e9f92f7ee275..1f8798ad329c 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -32,15 +32,25 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include <uapi/drm/vgem_drm.h> + +struct vgem_file { + struct idr fence_idr; + struct mutex fence_mutex; +}; + #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) struct drm_vgem_gem_object { struct drm_gem_object base; - struct page **pages; - bool use_dma_buf; }; -/* vgem_drv.c */ -extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); -extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); +int vgem_fence_open(struct vgem_file *file); +int vgem_fence_attach_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); +int vgem_fence_signal_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file); +void vgem_fence_close(struct vgem_file *file); #endif diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c new file mode 100644 index 000000000000..5c57c1ffa1f9 --- /dev/null +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -0,0 +1,283 @@ +/* + * Copyright 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software") + * to deal in the software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * them Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/dma-buf.h> +#include <linux/reservation.h> + +#include "vgem_drv.h" + +#define VGEM_FENCE_TIMEOUT (10*HZ) + +struct vgem_fence { + struct fence base; + struct spinlock lock; + struct timer_list timer; +}; + +static const char *vgem_fence_get_driver_name(struct fence *fence) +{ + return "vgem"; +} + +static const char *vgem_fence_get_timeline_name(struct fence *fence) +{ + return "unbound"; +} + +static bool vgem_fence_signaled(struct fence *fence) +{ + return false; +} + +static bool vgem_fence_enable_signaling(struct fence *fence) +{ + return true; +} + +static void vgem_fence_release(struct fence *base) +{ + struct vgem_fence *fence = container_of(base, typeof(*fence), base); + + del_timer_sync(&fence->timer); + fence_free(&fence->base); +} + +static void vgem_fence_value_str(struct fence *fence, char *str, int size) +{ + snprintf(str, size, "%u", fence->seqno); +} + +static void vgem_fence_timeline_value_str(struct fence *fence, char *str, + int size) +{ + snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); +} + +static const struct fence_ops vgem_fence_ops = { + .get_driver_name = vgem_fence_get_driver_name, + .get_timeline_name = vgem_fence_get_timeline_name, + .enable_signaling = vgem_fence_enable_signaling, + .signaled = vgem_fence_signaled, + .wait = fence_default_wait, + .release = vgem_fence_release, + + .fence_value_str = vgem_fence_value_str, + .timeline_value_str = vgem_fence_timeline_value_str, +}; + +static void vgem_fence_timeout(unsigned long data) +{ + struct vgem_fence *fence = (struct vgem_fence *)data; + + fence_signal(&fence->base); +} + +static struct fence *vgem_fence_create(struct vgem_file *vfile, + unsigned int flags) +{ + struct vgem_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return NULL; + + spin_lock_init(&fence->lock); + fence_init(&fence->base, &vgem_fence_ops, &fence->lock, + fence_context_alloc(1), 1); + + setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); + + /* We force the fence to expire within 10s to prevent driver hangs */ + mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT); + + return &fence->base; +} + +static int attach_dmabuf(struct drm_device *dev, + struct drm_gem_object *obj) +{ + struct dma_buf *dmabuf; + + if (obj->dma_buf) + return 0; + + dmabuf = dev->driver->gem_prime_export(dev, obj, 0); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + obj->dma_buf = dmabuf; + drm_gem_object_reference(obj); + return 0; +} + +/* + * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH): + * + * Create and attach a fence to the vGEM handle. This fence is then exposed + * via the dma-buf reservation object and visible to consumers of the exported + * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the + * vGEM buffer is being written to by the client and is exposed as an exclusive + * fence, otherwise the fence indicates the client is current reading from the + * buffer and all future writes should wait for the client to signal its + * completion. Note that if a conflicting fence is already on the dma-buf (i.e. + * an exclusive fence when adding a read, or any fence when adding a write), + * -EBUSY is reported. Serialisation between operations should be handled + * by waiting upon the dma-buf. + * + * This returns the handle for the new fence that must be signaled within 10 + * seconds (or otherwise it will automatically expire). See + * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL). + * + * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT. + */ +int vgem_fence_attach_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_vgem_fence_attach *arg = data; + struct vgem_file *vfile = file->driver_priv; + struct reservation_object *resv; + struct drm_gem_object *obj; + struct fence *fence; + int ret; + + if (arg->flags & ~VGEM_FENCE_WRITE) + return -EINVAL; + + if (arg->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(file, arg->handle); + if (!obj) + return -ENOENT; + + ret = attach_dmabuf(dev, obj); + if (ret) + goto err; + + fence = vgem_fence_create(vfile, arg->flags); + if (!fence) { + ret = -ENOMEM; + goto err; + } + + /* Check for a conflicting fence */ + resv = obj->dma_buf->resv; + if (!reservation_object_test_signaled_rcu(resv, + arg->flags & VGEM_FENCE_WRITE)) { + ret = -EBUSY; + goto err_fence; + } + + /* Expose the fence via the dma-buf */ + ret = 0; + mutex_lock(&resv->lock.base); + if (arg->flags & VGEM_FENCE_WRITE) + reservation_object_add_excl_fence(resv, fence); + else if ((ret = reservation_object_reserve_shared(resv)) == 0) + reservation_object_add_shared_fence(resv, fence); + mutex_unlock(&resv->lock.base); + + /* Record the fence in our idr for later signaling */ + if (ret == 0) { + mutex_lock(&vfile->fence_mutex); + ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL); + mutex_unlock(&vfile->fence_mutex); + if (ret > 0) { + arg->out_fence = ret; + ret = 0; + } + } +err_fence: + if (ret) { + fence_signal(fence); + fence_put(fence); + } +err: + drm_gem_object_unreference_unlocked(obj); + return ret; +} + +/* + * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL): + * + * Signal and consume a fence ealier attached to a vGEM handle using + * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH). + * + * All fences must be signaled within 10s of attachment or otherwise they + * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT). + * + * Signaling a fence indicates to all consumers of the dma-buf that the + * client has completed the operation associated with the fence, and that the + * buffer is then ready for consumption. + * + * If the fence does not exist (or has already been signaled by the client), + * vgem_fence_signal_ioctl returns -ENOENT. + */ +int vgem_fence_signal_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct vgem_file *vfile = file->driver_priv; + struct drm_vgem_fence_signal *arg = data; + struct fence *fence; + int ret = 0; + + if (arg->flags) + return -EINVAL; + + mutex_lock(&vfile->fence_mutex); + fence = idr_replace(&vfile->fence_idr, NULL, arg->fence); + mutex_unlock(&vfile->fence_mutex); + if (!fence) + return -ENOENT; + if (IS_ERR(fence)) + return PTR_ERR(fence); + + if (fence_is_signaled(fence)) + ret = -ETIMEDOUT; + + fence_signal(fence); + fence_put(fence); + return ret; +} + +int vgem_fence_open(struct vgem_file *vfile) +{ + mutex_init(&vfile->fence_mutex); + idr_init(&vfile->fence_idr); + + return 0; +} + +static int __vgem_fence_idr_fini(int id, void *p, void *data) +{ + fence_signal(p); + fence_put(p); + return 0; +} + +void vgem_fence_close(struct vgem_file *vfile) +{ + idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile); + idr_destroy(&vfile->fence_idr); +} diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 4f20742e7788..a04ef1c992d9 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev, struct via_file_private *file_priv = file->driver_priv; struct via_memblock *entry, *next; - if (!(file->minor->master && file->master->lock.hw_lock)) + if (!(dev->master && file->master->lock.hw_lock)) return; drm_legacy_idlelock_take(&file->master->lock); diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index 9983eadb81b6..e1afc3d3f8d9 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -1,11 +1,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM help This is the virtual GPU driver for virtio. It can be used with diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index d4305da88f44..4e192aa2d021 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -29,8 +29,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> -#define XRES_MIN 320 -#define YRES_MIN 200 +#define XRES_MIN 32 +#define YRES_MIN 32 #define XRES_DEF 1024 #define YRES_DEF 768 @@ -38,138 +38,11 @@ #define XRES_MAX 8192 #define YRES_MAX 8192 -static void -virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_output *output) -{ - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = 0; - virtio_gpu_cursor_ping(vgdev, output); -} - -static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height, - int32_t hot_x, int32_t hot_y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; - struct virtio_gpu_fence *fence = NULL; - int ret = 0; - - if (handle == 0) { - virtio_gpu_hide_cursor(vgdev, output); - return 0; - } - - /* lookup the cursor */ - gobj = drm_gem_object_lookup(file_priv, handle); - if (gobj == NULL) - return -ENOENT; - - qobj = gem_to_virtio_gpu_obj(gobj); - - if (!qobj->hw_res_handle) { - ret = -EINVAL; - goto out; - } - - virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0, - cpu_to_le32(64), - cpu_to_le32(64), - 0, 0, &fence); - ret = virtio_gpu_object_reserve(qobj, false); - if (!ret) { - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); - fence_put(&fence->f); - virtio_gpu_object_unreserve(qobj); - virtio_gpu_object_wait(qobj, false); - } - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle); - output->cursor.hot_x = cpu_to_le32(hot_x); - output->cursor.hot_y = cpu_to_le32(hot_y); - virtio_gpu_cursor_ping(vgdev, output); - ret = 0; - -out: - drm_gem_object_unreference_unlocked(gobj); - return ret; -} - -static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); - output->cursor.pos.x = cpu_to_le32(x); - output->cursor.pos.y = cpu_to_le32(y); - virtio_gpu_cursor_ping(vgdev, output); - return 0; -} - -static int virtio_gpu_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_plane *plane = crtc->primary; - struct virtio_gpu_framebuffer *vgfb; - struct virtio_gpu_object *bo; - unsigned long irqflags; - uint32_t handle; - - plane->fb = fb; - vgfb = to_virtio_gpu_framebuffer(plane->fb); - bo = gem_to_virtio_gpu_obj(vgfb->obj); - handle = bo->hw_res_handle; - - DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle, - bo->dumb ? ", dumb" : "", - crtc->mode.hdisplay, crtc->mode.vdisplay); - if (bo->dumb) { - virtio_gpu_cmd_transfer_to_host_2d - (vgdev, handle, 0, - cpu_to_le32(crtc->mode.hdisplay), - cpu_to_le32(crtc->mode.vdisplay), - 0, 0, NULL); - } - virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - crtc->mode.hdisplay, - crtc->mode.vdisplay, 0, 0); - virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0, - crtc->mode.hdisplay, - crtc->mode.vdisplay); - - if (event) { - spin_lock_irqsave(&crtc->dev->event_lock, irqflags); - drm_send_vblank_event(crtc->dev, -1, event); - spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags); - } - - return 0; -} - static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { - .cursor_set2 = virtio_gpu_crtc_cursor_set, - .cursor_move = virtio_gpu_crtc_cursor_move, .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, - .page_flip = virtio_gpu_page_flip, + .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -180,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb) struct virtio_gpu_framebuffer *virtio_gpu_fb = to_virtio_gpu_framebuffer(fb); - if (virtio_gpu_fb->obj) - drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj); + drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(virtio_gpu_fb); } @@ -267,6 +139,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&crtc->dev->event_lock, flags); if (crtc->state->event) drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } @@ -341,15 +214,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector, return MODE_BAD; } -static struct drm_encoder* -virtio_gpu_best_encoder(struct drm_connector *connector) -{ - struct virtio_gpu_output *virtio_gpu_output = - drm_connector_to_virtio_gpu_output(connector); - - return &virtio_gpu_output->enc; -} - static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { .mode_set = virtio_gpu_enc_mode_set, .enable = virtio_gpu_enc_enable, @@ -359,7 +223,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { .get_modes = virtio_gpu_conn_get_modes, .mode_valid = virtio_gpu_conn_mode_valid, - .best_encoder = virtio_gpu_best_encoder, }; static enum drm_connector_status virtio_gpu_conn_detect( @@ -406,7 +269,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_connector *connector = &output->conn; struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; - struct drm_plane *plane; + struct drm_plane *primary, *cursor; output->index = index; if (index == 0) { @@ -415,13 +278,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) output->info.r.height = cpu_to_le32(YRES_DEF); } - plane = virtio_gpu_plane_init(vgdev, index); - if (IS_ERR(plane)) - return PTR_ERR(plane); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, + primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) + return PTR_ERR(cursor); + drm_crtc_init_with_planes(dev, crtc, primary, cursor, &virtio_gpu_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); - plane->crtc = crtc; + primary->crtc = crtc; + cursor->crtc = crtc; drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); @@ -458,14 +325,31 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); if (ret) { kfree(virtio_gpu_fb); - if (obj) - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference_unlocked(obj); return NULL; } return &virtio_gpu_fb->base; } +static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_planes(dev, state, true); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = { + .atomic_commit_tail = vgdev_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .fb_create = virtio_gpu_user_framebuffer_create, .atomic_check = drm_atomic_helper_check, @@ -477,7 +361,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) int i; drm_mode_config_init(vgdev->ddev); - vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers; /* modes will be validated against the framebuffer size */ vgdev->ddev->mode_config.min_width = XRES_MIN; diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c index 88a39165edd5..7f0e93f87a55 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c @@ -27,16 +27,6 @@ #include "virtgpu_drv.h" -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master) -{ - struct pci_dev *pdev = dev->pdev; - - if (pdev) { - return drm_pci_set_busid(dev, master); - } - return 0; -} - static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) { struct apertures_struct *ap; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 3cc7afa77a35..c13f70cfc461 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, - .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, .open = virtio_gpu_driver_open, @@ -143,7 +142,7 @@ static struct drm_driver driver = { .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, - .gem_free_object = virtio_gpu_gem_free_object, + .gem_free_object_unlocked = virtio_gpu_gem_free_object, .gem_open_object = virtio_gpu_gem_object_open, .gem_close_object = virtio_gpu_gem_object_close, .fops = &virtio_gpu_driver_fops, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 0a54f43f846a..b18ef3111f0c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -33,6 +33,7 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc_helper.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -48,7 +49,6 @@ #define DRIVER_PATCHLEVEL 1 /* virtgpu_drm_bus.c */ -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master); int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); struct virtio_gpu_object { @@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtio_gpu_plane.c */ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index); /* virtio_gpu_ttm.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 70b44a2345ab..925ca25209df 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = { DRM_FORMAT_ABGR8888, }; +static const uint32_t virtio_gpu_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + static void virtio_gpu_plane_destroy(struct drm_plane *plane) { kfree(plane); @@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, return 0; } -static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void virtio_gpu_primary_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); + struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; uint32_t handle; + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); @@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, handle, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - plane->state->crtc_x, plane->state->crtc_y, NULL); + cpu_to_le32(plane->state->src_w >> 16), + cpu_to_le32(plane->state->src_h >> 16), + plane->state->src_x >> 16, + plane->state->src_y >> 16, NULL); } } else { handle = 0; } - DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, + DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle, plane->state->crtc_w, plane->state->crtc_h, - plane->state->crtc_x, plane->state->crtc_y); + plane->state->crtc_x, plane->state->crtc_y, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - plane->state->crtc_w, - plane->state->crtc_h, - plane->state->crtc_x, - plane->state->crtc_y); + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_resource_flush(vgdev, handle, - plane->state->crtc_x, - plane->state->crtc_y, - plane->state->crtc_w, - plane->state->crtc_h); + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16); } +static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = NULL; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_fence *fence = NULL; + struct virtio_gpu_object *bo = NULL; + uint32_t handle; + int ret = 0; -static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->obj); + handle = bo->hw_res_handle; + } else { + handle = 0; + } + + if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { + /* new cursor -- update & wait */ + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + 0, 0, &fence); + ret = virtio_gpu_object_reserve(bo, false); + if (!ret) { + reservation_object_add_excl_fence(bo->tbo.resv, + &fence->f); + fence_put(&fence->f); + fence = NULL; + virtio_gpu_object_unreserve(bo); + virtio_gpu_object_wait(bo, false); + } + } + + if (plane->state->fb != old_state->fb) { + DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->fb ? plane->state->fb->hot_x : 0, + plane->state->fb ? plane->state->fb->hot_y : 0); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); + output->cursor.resource_id = cpu_to_le32(handle); + if (plane->state->fb) { + output->cursor.hot_x = + cpu_to_le32(plane->state->fb->hot_x); + output->cursor.hot_y = + cpu_to_le32(plane->state->fb->hot_y); + } else { + output->cursor.hot_x = cpu_to_le32(0); + output->cursor.hot_y = cpu_to_le32(0); + } + } else { + DRM_DEBUG("move +%d+%d\n", + plane->state->crtc_x, + plane->state->crtc_y); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); + } + output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); + output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); + virtio_gpu_cursor_ping(vgdev, output); +} + +static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { + .atomic_check = virtio_gpu_plane_atomic_check, + .atomic_update = virtio_gpu_primary_plane_update, +}; + +static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { .atomic_check = virtio_gpu_plane_atomic_check, - .atomic_update = virtio_gpu_plane_atomic_update, + .atomic_update = virtio_gpu_cursor_plane_update, }; struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index) { struct drm_device *dev = vgdev->ddev; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane; - int ret; + const uint32_t *formats; + int ret, nformats; plane = kzalloc(sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); + if (type == DRM_PLANE_TYPE_CURSOR) { + formats = virtio_gpu_cursor_formats; + nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); + funcs = &virtio_gpu_cursor_helper_funcs; + } else { + formats = virtio_gpu_formats; + nformats = ARRAY_SIZE(virtio_gpu_formats); + funcs = &virtio_gpu_primary_helper_funcs; + } ret = drm_universal_plane_init(dev, plane, 1 << index, &virtio_gpu_plane_funcs, - virtio_gpu_formats, - ARRAY_SIZE(virtio_gpu_formats), - DRM_PLANE_TYPE_PRIMARY, NULL); + formats, nformats, + type, NULL); if (ret) goto err_plane_init; - drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); + drm_plane_helper_add(plane, funcs); return plane; err_plane_init: diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index a0580815629f..80482ac5f95d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + int ret; + + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + virtio_gpu_move_null(bo, new_mem); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 67cebb23c940..aa04fb0159a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) struct vmw_cmdbuf_man *man = header->man; u32 val; - if (sizeof(header->handle) > 4) - val = (header->handle >> 32); - else - val = 0; + val = upper_32_bits(header->handle); vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); - val = (header->handle & 0xFFFFFFFFULL); + val = lower_32_bits(header->handle); val |= header->cb_context & SVGA_CB_CONTEXT_MASK; vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8d528fcf6e96..e8ae3dc476d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1053,15 +1053,14 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev, struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster; - if (file_priv->minor->type != DRM_MINOR_LEGACY || - !(flags & DRM_AUTH)) + if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) return NULL; ret = mutex_lock_interruptible(&dev->master_mutex); if (unlikely(ret != 0)) return ERR_PTR(-ERESTARTSYS); - if (file_priv->is_master) { + if (drm_is_current_master(file_priv)) { mutex_unlock(&dev->master_mutex); return NULL; } @@ -1240,8 +1239,7 @@ static int vmw_master_set(struct drm_device *dev, } static void vmw_master_drop(struct drm_device *dev, - struct drm_file *file_priv, - bool from_release) + struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 89fb19443a3f..74304b03f9d4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -32,6 +32,7 @@ #include <drm/drmP.h> #include <drm/vmwgfx_drm.h> #include <drm/drm_hashtab.h> +#include <drm/drm_auth.h> #include <linux/suspend.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_object.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1a1a87cbf109..dc5beff2b4aa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, (sw_context->cmd_bounce_size >> 1)); } - if (sw_context->cmd_bounce != NULL) - vfree(sw_context->cmd_bounce); - + vfree(sw_context->cmd_bounce); sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); if (sw_context->cmd_bounce == NULL) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index e959df6ede83..26ac8e80a478 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -46,7 +46,7 @@ struct vmw_fence_manager { bool goal_irq_on; /* Protected by @goal_irq_mutex */ bool seqno_valid; /* Protected by @lock, and may not be set to true without the @goal_irq_mutex held. */ - unsigned ctx; + u64 ctx; }; struct vmw_user_fence { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e29da45a2847..bf28ccc150df 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, return 0; } -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, - u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, + u16 *r, u16 *g, u16 *b, + uint32_t size) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); int i; @@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); } + + return 0; } int vmw_du_connector_dpms(struct drm_connector *connector, int mode) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 57203212c501..ff4803c107bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -195,9 +195,9 @@ struct vmw_display_unit { void vmw_du_cleanup(struct vmw_display_unit *du); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); + uint32_t size); int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c index a18db4d5347c..c5d82a8a2ec9 100644 --- a/drivers/gpu/host1x/cdma.c +++ b/drivers/gpu/host1x/cdma.c @@ -96,12 +96,12 @@ fail: */ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) { - u32 pos = pb->pos; - u32 *p = (u32 *)((void *)pb->mapped + pos); - WARN_ON(pos == pb->fence); + u32 *p = (u32 *)((void *)pb->mapped + pb->pos); + + WARN_ON(pb->pos == pb->fence); *(p++) = op1; *(p++) = op2; - pb->pos = (pos + 8) & (pb->size_bytes - 1); + pb->pos = (pb->pos + 8) & (pb->size_bytes - 1); } /* @@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma, enum cdma_event event) { for (;;) { + struct push_buffer *pb = &cdma->push_buffer; unsigned int space; - if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY) + switch (event) { + case CDMA_EVENT_SYNC_QUEUE_EMPTY: space = list_empty(&cdma->sync_queue) ? 1 : 0; - else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) { - struct push_buffer *pb = &cdma->push_buffer; + break; + + case CDMA_EVENT_PUSH_BUFFER_SPACE: space = host1x_pushbuffer_space(pb); - } else { + break; + + default: WARN_ON(1); return -EINVAL; } @@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma, mutex_lock(&cdma->lock); continue; } + cdma->event = event; mutex_unlock(&cdma->lock); down(&cdma->sem); mutex_lock(&cdma->lock); } + return 0; } @@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma) /* Start timer on next pending syncpt */ if (job->timeout) cdma_start_timer_locked(cdma, job); + break; } @@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma) /* Pop push buffer slots */ if (job->num_slots) { struct push_buffer *pb = &cdma->push_buffer; + host1x_pushbuffer_pop(pb, job->num_slots); + if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) signal = true; } @@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma) void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma, struct device *dev) { - u32 restart_addr; - u32 syncpt_incrs; - struct host1x_job *job = NULL; - u32 syncpt_val; struct host1x *host1x = cdma_to_host1x(cdma); + u32 restart_addr, syncpt_incrs, syncpt_val; + struct host1x_job *job = NULL; syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt); @@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma, syncpt_val += syncpt_incrs; } - /* The following sumbits from the same client may be dependent on the + /* + * The following sumbits from the same client may be dependent on the * failed submit and therefore they may fail. Force a small timeout - * to make the queue cleanup faster */ + * to make the queue cleanup faster. + */ list_for_each_entry_from(job, &cdma->sync_queue, list) if (job->client == cdma->timeout.client) @@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma) err = host1x_pushbuffer_init(&cdma->push_buffer); if (err) return err; + return 0; } @@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job) /* init state on first submit with timeout value */ if (!cdma->timeout.initialized) { int err; + err = host1x_hw_cdma_timeout_init(host1x, cdma, job->syncpt_id); if (err) { @@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job) } } } + if (!cdma->running) host1x_hw_cdma_start(host1x, cdma); @@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2) slots_free = host1x_cdma_wait_locked(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE); } + cdma->slots_free = slots_free - 1; cdma->slots_used++; host1x_pushbuffer_push(pb, op1, op2); diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index b4ae3affb987..8f437d924c10 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put); struct host1x_channel *host1x_channel_request(struct device *dev) { struct host1x *host = dev_get_drvdata(dev->parent); - int max_channels = host->info->nb_channels; + unsigned int max_channels = host->info->nb_channels; struct host1x_channel *channel = NULL; - int index, err; + unsigned long index; + int err; mutex_lock(&host->chlist_mutex); diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index ee3d12b51c50..d9330fcc62ad 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...) va_start(args, fmt); len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); va_end(args); + o->fn(o->ctx, o->buf, len); } @@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo) struct output *o = data; mutex_lock(&ch->reflock); + if (ch->refcount) { mutex_lock(&ch->cdma.lock); + if (show_fifo) host1x_hw_show_channel_fifo(m, ch, o); + host1x_hw_show_channel_cdma(m, ch, o); mutex_unlock(&ch->cdma.lock); } + mutex_unlock(&ch->reflock); return 0; @@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo) static void show_syncpts(struct host1x *m, struct output *o) { - int i; + unsigned int i; + host1x_debug_output(o, "---- syncpts ----\n"); + for (i = 0; i < host1x_syncpt_nb_pts(m); i++) { u32 max = host1x_syncpt_read_max(m->syncpt + i); u32 min = host1x_syncpt_load(m->syncpt + i); + if (!min && !max) continue; - host1x_debug_output(o, "id %d (%s) min %d max %d\n", + + host1x_debug_output(o, "id %u (%s) min %d max %d\n", i, m->syncpt[i].name, min, max); } for (i = 0; i < host1x_syncpt_nb_bases(m); i++) { u32 base_val; + base_val = host1x_syncpt_load_wait_base(m->syncpt + i); if (base_val) - host1x_debug_output(o, "waitbase id %d val %d\n", i, + host1x_debug_output(o, "waitbase id %u val %d\n", i, base_val); } @@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused) .fn = write_to_seqfile, .ctx = s }; + show_all(s->private, &o); + return 0; } @@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused) .fn = write_to_seqfile, .ctx = s }; + show_all_no_fifo(s->private, &o); + return 0; } @@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file) } static const struct file_operations host1x_debug_all_fops = { - .open = host1x_debug_open_all, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, + .open = host1x_debug_open_all, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; static int host1x_debug_open(struct inode *inode, struct file *file) @@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file) } static const struct file_operations host1x_debug_fops = { - .open = host1x_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, + .open = host1x_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; static void host1x_debugfs_init(struct host1x *host1x) @@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x) struct output o = { .fn = write_to_printk }; + show_all(host1x, &o); } @@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x) struct output o = { .fn = write_to_printk }; + show_syncpts(host1x, &o); } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index ff348690df94..a62317af76ad 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r) } static const struct host1x_info host1x01_info = { - .nb_channels = 8, - .nb_pts = 32, - .nb_mlocks = 16, - .nb_bases = 8, - .init = host1x01_init, - .sync_offset = 0x3000, - .dma_mask = DMA_BIT_MASK(32), + .nb_channels = 8, + .nb_pts = 32, + .nb_mlocks = 16, + .nb_bases = 8, + .init = host1x01_init, + .sync_offset = 0x3000, + .dma_mask = DMA_BIT_MASK(32), }; static const struct host1x_info host1x02_info = { @@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = { .dma_mask = DMA_BIT_MASK(34), }; -static struct of_device_id host1x_of_match[] = { +static const struct of_device_id host1x_of_match[] = { { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index dace124994bb..5220510f39da 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -45,7 +45,7 @@ struct host1x_cdma_ops { void (*start)(struct host1x_cdma *cdma); void (*stop)(struct host1x_cdma *cdma); void (*flush)(struct host1x_cdma *cdma); - int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id); + int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt); void (*timeout_destroy)(struct host1x_cdma *cdma); void (*freeze)(struct host1x_cdma *cdma); void (*resume)(struct host1x_cdma *cdma, u32 getptr); @@ -82,21 +82,21 @@ struct host1x_intr_ops { int (*init_host_sync)(struct host1x *host, u32 cpm, void (*syncpt_thresh_work)(struct work_struct *work)); void (*set_syncpt_threshold)( - struct host1x *host, u32 id, u32 thresh); - void (*enable_syncpt_intr)(struct host1x *host, u32 id); - void (*disable_syncpt_intr)(struct host1x *host, u32 id); + struct host1x *host, unsigned int id, u32 thresh); + void (*enable_syncpt_intr)(struct host1x *host, unsigned int id); + void (*disable_syncpt_intr)(struct host1x *host, unsigned int id); void (*disable_all_syncpt_intrs)(struct host1x *host); int (*free_syncpt_irq)(struct host1x *host); }; struct host1x_info { - int nb_channels; /* host1x: num channels supported */ - int nb_pts; /* host1x: num syncpoints supported */ - int nb_bases; /* host1x: num syncpoints supported */ - int nb_mlocks; /* host1x: number of mlocks */ - int (*init)(struct host1x *); /* initialize per SoC ops */ - int sync_offset; - u64 dma_mask; /* mask of addressable memory */ + unsigned int nb_channels; /* host1x: number of channels supported */ + unsigned int nb_pts; /* host1x: number of syncpoints supported */ + unsigned int nb_bases; /* host1x: number of syncpoint bases supported */ + unsigned int nb_mlocks; /* host1x: number of mlocks supported */ + int (*init)(struct host1x *host1x); /* initialize per SoC ops */ + unsigned int sync_offset; /* offset of syncpoint registers */ + u64 dma_mask; /* mask of addressable memory */ }; struct host1x { @@ -109,7 +109,6 @@ struct host1x { struct clk *clk; struct mutex intr_mutex; - struct workqueue_struct *intr_wq; int intr_syncpt_irq; const struct host1x_syncpt_ops *syncpt_op; @@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm, } static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host, - u32 id, u32 thresh) + unsigned int id, + u32 thresh) { host->intr_op->set_syncpt_threshold(host, id, thresh); } static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host, - u32 id) + unsigned int id) { host->intr_op->enable_syncpt_intr(host, id); } static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host, - u32 id) + unsigned int id) { host->intr_op->disable_syncpt_intr(host, id); } @@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host) static inline int host1x_hw_channel_init(struct host1x *host, struct host1x_channel *channel, - int chid) + unsigned int id) { - return host->channel_op->init(channel, host, chid); + return host->channel_op->init(channel, host, id); } static inline int host1x_hw_channel_submit(struct host1x *host, @@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host, static inline int host1x_hw_cdma_timeout_init(struct host1x *host, struct host1x_cdma *cdma, - u32 syncpt_id) + unsigned int syncpt) { - return host->cdma_op->timeout_init(cdma, syncpt_id); + return host->cdma_op->timeout_init(cdma, syncpt); } static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host, diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 305ea8f3382d..659c1bbfeeba 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, { struct host1x *host1x = cdma_to_host1x(cdma); struct push_buffer *pb = &cdma->push_buffer; - u32 i; + unsigned int i; for (i = 0; i < syncpt_incrs; i++) host1x_syncpt_incr(cdma->timeout.syncpt); @@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, &pb->phys, getptr); getptr = (getptr + 8) & (pb->size_bytes - 1); } + wmb(); } @@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma) struct host1x_channel *ch = cdma_to_channel(cdma); mutex_lock(&cdma->lock); + if (cdma->running) { host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY); host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); cdma->running = false; } + mutex_unlock(&cdma->lock); } @@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr) u32 cmdproc_stop; dev_dbg(host1x->dev, - "resuming channel (id %d, DMAGET restart = 0x%x)\n", + "resuming channel (id %u, DMAGET restart = 0x%x)\n", ch->id, getptr); cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); - cmdproc_stop &= ~(BIT(ch->id)); + cmdproc_stop &= ~BIT(ch->id); host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); cdma->torndown = false; @@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr) */ static void cdma_timeout_handler(struct work_struct *work) { + u32 prev_cmdproc, cmdproc_stop, syncpt_val; struct host1x_cdma *cdma; struct host1x *host1x; struct host1x_channel *ch; - u32 syncpt_val; - - u32 prev_cmdproc, cmdproc_stop; - cdma = container_of(to_delayed_work(work), struct host1x_cdma, timeout.wq); host1x = cdma_to_host1x(cdma); @@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work) return; } - dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n", - __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name, - syncpt_val, cdma->timeout.syncpt_val); + dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n", + __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name, + syncpt_val, cdma->timeout.syncpt_val); /* stop HW, resetting channel/module */ host1x_hw_cdma_freeze(host1x, cdma); @@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work) /* * Init timeout resources */ -static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id) +static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt) { INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); cdma->timeout.initialized = true; @@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma) { if (cdma->timeout.initialized) cancel_delayed_work(&cdma->timeout.wq); + cdma->timeout.initialized = false; } diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 946c332c3906..5e8df78b7acd 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo, */ for (i = 0; i < words; i += TRACE_MAX_LENGTH) { u32 num_words = min(words - i, TRACE_MAX_LENGTH); + offset += i * sizeof(u32); trace_host1x_cdma_push_gather(dev_name(dev), bo, @@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job) struct host1x_job_gather *g = &job->gathers[i]; u32 op1 = host1x_opcode_gather(g->words); u32 op2 = g->base + g->offset; + trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff); host1x_cdma_push(cdma, op1, op2); } @@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job) { struct host1x *host = dev_get_drvdata(job->channel->dev->parent); struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; - u32 id, value; + unsigned int id; + u32 value; value = host1x_syncpt_read_max(sp); id = sp->base->id; diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index cc3f1825c735..7a4a3286e4a7 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -40,8 +40,7 @@ enum { static unsigned int show_channel_command(struct output *o, u32 val) { - unsigned mask; - unsigned subop; + unsigned int mask, subop; switch (val >> 28) { case HOST1X_OPCODE_SETCLASS: @@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val) val >> 6 & 0x3ff, val >> 16 & 0xfff, mask); return hweight8(mask); - } else { - host1x_debug_output(o, "SETCL(class=%03x)\n", - val >> 6 & 0x3ff); - return 0; } + host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff); + return 0; + case HOST1X_OPCODE_INCR: host1x_debug_output(o, "INCR(offset=%03x, [", val >> 16 & 0xfff); @@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) struct host1x_job *job; list_for_each_entry(job, &cdma->sync_queue, list) { - int i; + unsigned int i; + host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n", job, job->syncpt_id, job->syncpt_end, job->first_get, job->timeout, @@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host, cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); - host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev)); + host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) || !ch->cdma.push_buffer.mapped) { @@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host, if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == - HOST1X_UCLASS_WAIT_SYNCPT) + HOST1X_UCLASS_WAIT_SYNCPT) host1x_debug_output(o, "waiting on syncpt %d val %d\n", cbread >> 24, cbread & 0xffffff); else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == - HOST1X_CLASS_HOST1X && - HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == - HOST1X_UCLASS_WAIT_SYNCPT_BASE) { - + HOST1X_CLASS_HOST1X && + HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == + HOST1X_UCLASS_WAIT_SYNCPT_BASE) { base = (cbread >> 16) & 0xff; baseval = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); @@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host, u32 val, rd_ptr, wr_ptr, start, end; unsigned int data_count = 0; - host1x_debug_output(o, "%d: fifo:\n", ch->id); + host1x_debug_output(o, "%u: fifo:\n", ch->id); val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); host1x_debug_output(o, "FIFOSTAT %08x\n", val); @@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host, static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) { - int i; + unsigned int i; host1x_debug_output(o, "---- mlocks ----\n"); + for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { u32 owner = host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) - host1x_debug_output(o, "%d: locked by channel %d\n", + host1x_debug_output(o, "%u: locked by channel %u\n", i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner)); else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) - host1x_debug_output(o, "%d: locked by cpu\n", i); + host1x_debug_output(o, "%u: locked by cpu\n", i); else - host1x_debug_output(o, "%d: unlocked\n", i); + host1x_debug_output(o, "%u: unlocked\n", i); } + host1x_debug_output(o, "\n"); } diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index e1e31e9e67cd..dacb8009a605 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt) host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); - queue_work(host->intr_wq, &syncpt->intr.work); + schedule_work(&syncpt->intr.work); } static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) { struct host1x *host = dev_id; unsigned long reg; - int i, id; + unsigned int i, id; for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) { reg = host1x_sync_readl(host, @@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) { - u32 i; + unsigned int i; for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) { host1x_sync_writel(host, 0xffffffffu, @@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) } } -static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, - void (*syncpt_thresh_work)(struct work_struct *)) +static int +_host1x_intr_init_host_sync(struct host1x *host, u32 cpm, + void (*syncpt_thresh_work)(struct work_struct *)) { - int i, err; + unsigned int i; + int err; host1x_hw_intr_disable_all_syncpt_intrs(host); @@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, } static void _host1x_intr_set_syncpt_threshold(struct host1x *host, - u32 id, u32 thresh) + unsigned int id, + u32 thresh) { host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id)); } -static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id) +static void _host1x_intr_enable_syncpt_intr(struct host1x *host, + unsigned int id) { host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id))); } -static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id) +static void _host1x_intr_disable_syncpt_intr(struct host1x *host, + unsigned int id) { host1x_sync_writel(host, BIT_MASK(id), HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); @@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id) static int _host1x_free_syncpt_irq(struct host1x *host) { + unsigned int i; + devm_free_irq(host->dev, host->intr_syncpt_irq, host); - flush_workqueue(host->intr_wq); + + for (i = 0; i < host->info->nb_pts; i++) + cancel_work_sync(&host->syncpt[i].intr.work); + return 0; } diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c index 56e85395ac24..c93f74fcce72 100644 --- a/drivers/gpu/host1x/hw/syncpt_hw.c +++ b/drivers/gpu/host1x/hw/syncpt_hw.c @@ -26,8 +26,9 @@ */ static void syncpt_restore(struct host1x_syncpt *sp) { + u32 min = host1x_syncpt_read_min(sp); struct host1x *host = sp->host; - int min = host1x_syncpt_read_min(sp); + host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id)); } @@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp) static void syncpt_restore_wait_base(struct host1x_syncpt *sp) { struct host1x *host = sp->host; + host1x_sync_writel(host, sp->base_val, HOST1X_SYNC_SYNCPT_BASE(sp->id)); } @@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp) static void syncpt_read_wait_base(struct host1x_syncpt *sp) { struct host1x *host = sp->host; + sp->base_val = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id)); } @@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp) if (!host1x_syncpt_client_managed(sp) && host1x_syncpt_idle(sp)) return -EINVAL; + host1x_sync_writel(host, BIT_MASK(sp->id), HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset)); wmb(); @@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp) /* remove a wait pointed to by patch_addr */ static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr) { - u32 override = host1x_class_host_wait_syncpt( - HOST1X_SYNCPT_RESERVED, 0); + u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0); *((u32 *)patch_addr) = override; + return 0; } diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 2491bf82e30c..8b4fad0ab35d 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter) static void action_wakeup(struct host1x_waitlist *waiter) { wait_queue_head_t *wq = waiter->data; + wake_up(wq); } static void action_wakeup_interruptible(struct host1x_waitlist *waiter) { wait_queue_head_t *wq = waiter->data; + wake_up_interruptible(wq); } typedef void (*action_handler)(struct host1x_waitlist *waiter); -static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = { +static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = { action_submit_complete, action_wakeup, action_wakeup_interruptible, @@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work) host1x_syncpt_load(host->syncpt + id)); } -int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, +int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh, enum host1x_intr_action action, void *data, struct host1x_waitlist *waiter, void **ref) { @@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, return 0; } -void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref) +void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref) { struct host1x_waitlist *waiter = ref; struct host1x_syncpt *syncpt; @@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync) mutex_init(&host->intr_mutex); host->intr_syncpt_irq = irq_sync; - host->intr_wq = create_workqueue("host_syncpt"); - if (!host->intr_wq) - return -ENOMEM; for (id = 0; id < nb_pts; ++id) { struct host1x_syncpt *syncpt = host->syncpt + id; @@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync) INIT_LIST_HEAD(&syncpt->intr.wait_head); snprintf(syncpt->intr.thresh_irq_name, sizeof(syncpt->intr.thresh_irq_name), - "host1x_sp_%02d", id); + "host1x_sp_%02u", id); } host1x_intr_start(host); @@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync) void host1x_intr_deinit(struct host1x *host) { host1x_intr_stop(host); - destroy_workqueue(host->intr_wq); } void host1x_intr_start(struct host1x *host) @@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host) if (!list_empty(&syncpt[id].intr.wait_head)) { /* output diagnostics */ mutex_unlock(&host->intr_mutex); - pr_warn("%s cannot stop syncpt intr id=%d\n", + pr_warn("%s cannot stop syncpt intr id=%u\n", __func__, id); return; } diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h index 2b8adf016a05..1370c2bb75b8 100644 --- a/drivers/gpu/host1x/intr.h +++ b/drivers/gpu/host1x/intr.h @@ -75,7 +75,7 @@ struct host1x_waitlist { * * This is a non-blocking api. */ -int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, +int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh, enum host1x_intr_action action, void *data, struct host1x_waitlist *waiter, void **ref); @@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, * You must call this if you passed non-NULL as ref. * @ref the ref returned from host1x_intr_add_action() */ -void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref); +void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref); /* Initialize host1x sync point interrupt */ int host1x_intr_init(struct host1x *host, unsigned int irq_sync); diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index b4515d544039..a91b7c4a6110 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host, if (host1x_syncpt_is_expired(sp, wait->thresh)) { dev_dbg(host->dev, - "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", + "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n", wait->syncpt_id, sp->name, wait->thresh, host1x_syncpt_read_min(sp)); @@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) for (i = 0; i < job->num_gathers; i++) { struct host1x_job_gather *g = &job->gathers[i]; + size += g->words * sizeof(u32); } @@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host)); for (i = 0; i < job->num_waitchk; i++) { u32 syncpt_id = job->waitchk[i].syncpt_id; + if (syncpt_id < host1x_syncpt_nb_pts(host)) set_bit(syncpt_id, waitchk_mask); } @@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; + host1x_bo_unpin(unpin->bo, unpin->sgt); host1x_bo_put(unpin->bo); } + job->num_unpins = 0; if (job->gather_copy_size) dma_free_wc(job->channel->dev, job->gather_copy_size, - job->gather_copy_mapped, job->gather_copy); + job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index 6b7fdc1e2ed0..95589328ad52 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, return NULL; } - name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, + name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id, dev ? dev_name(dev) : NULL); if (!name) return NULL; @@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max); void host1x_syncpt_restore(struct host1x *host) { struct host1x_syncpt *sp_base = host->syncpt; - u32 i; + unsigned int i; for (i = 0; i < host1x_syncpt_nb_pts(host); i++) host1x_hw_syncpt_restore(host, sp_base + i); + for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_hw_syncpt_restore_wait_base(host, sp_base + i); + wmb(); } @@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host) void host1x_syncpt_save(struct host1x *host) { struct host1x_syncpt *sp_base = host->syncpt; - u32 i; + unsigned int i; for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { if (host1x_syncpt_client_managed(sp_base + i)) @@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host) u32 host1x_syncpt_load(struct host1x_syncpt *sp) { u32 val; + val = host1x_hw_syncpt_load(sp->host, sp); trace_host1x_syncpt_load_min(sp->id, val); @@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp) */ u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) { - u32 val; host1x_hw_syncpt_load_wait_base(sp->host, sp); - val = sp->base_val; - return val; + + return sp->base_val; } /* @@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr); static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) { host1x_hw_syncpt_load(sp->host, sp); + return host1x_syncpt_is_expired(sp, thresh); } @@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) * Main entrypoint for syncpoint value waits. */ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, - u32 *value) + u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; @@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, if (host1x_syncpt_is_expired(sp, thresh)) { if (value) *value = host1x_syncpt_load(sp); + return 0; } @@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, if (host1x_syncpt_is_expired(sp, thresh)) { if (value) *value = val; + goto done; } @@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, /* wait for the syncpoint, or timeout, or signal */ while (timeout) { long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout); - int remain = wait_event_interruptible_timeout(wq, + int remain; + + remain = wait_event_interruptible_timeout(wq, syncpt_load_min_is_expired(sp, thresh), check); if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) { if (value) *value = host1x_syncpt_load(sp); + err = 0; + break; } + if (remain < 0) { err = remain; break; } + timeout -= check; + if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { dev_warn(sp->host->dev, - "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n", + "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n", current->comm, sp->id, sp->name, thresh, timeout); host1x_debug_dump_syncpts(sp->host); + if (check_count == MAX_STUCK_CHECK_COUNT) host1x_debug_dump(sp->host); + check_count++; } } + host1x_intr_put_ref(sp->host, sp->id, ref); done: @@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) { u32 current_val; u32 future_val; + smp_rmb(); + current_val = (u32)atomic_read(&sp->min_val); future_val = (u32)atomic_read(&sp->max_val); @@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host) { struct host1x_syncpt_base *bases; struct host1x_syncpt *syncpt; - int i; + unsigned int i; - syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, + syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt), GFP_KERNEL); if (!syncpt) return -ENOMEM; - bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases, + bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases), GFP_KERNEL); if (!bases) return -ENOMEM; @@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev, unsigned long flags) { struct host1x *host = dev_get_drvdata(dev->parent); + return host1x_syncpt_alloc(host, dev, flags); } EXPORT_SYMBOL(host1x_syncpt_request); @@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free); void host1x_syncpt_deinit(struct host1x *host) { - int i; struct host1x_syncpt *sp = host->syncpt; + unsigned int i; + for (i = 0; i < host->info->nb_pts; i++, sp++) kfree(sp->name); } @@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host) /* * Read max. It indicates how many operations there are in queue, either in * channel or in a software thread. - * */ + */ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) { smp_rmb(); + return (u32)atomic_read(&sp->max_val); } EXPORT_SYMBOL(host1x_syncpt_read_max); @@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max); u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) { smp_rmb(); + return (u32)atomic_read(&sp->min_val); } EXPORT_SYMBOL(host1x_syncpt_read_min); @@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp) } EXPORT_SYMBOL(host1x_syncpt_read); -int host1x_syncpt_nb_pts(struct host1x *host) +unsigned int host1x_syncpt_nb_pts(struct host1x *host) { return host->info->nb_pts; } -int host1x_syncpt_nb_bases(struct host1x *host) +unsigned int host1x_syncpt_nb_bases(struct host1x *host) { return host->info->nb_bases; } -int host1x_syncpt_nb_mlocks(struct host1x *host) +unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) { return host->info->nb_mlocks; } -struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id) +struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) { if (host->info->nb_pts < id) return NULL; + return host->syncpt + id; } EXPORT_SYMBOL(host1x_syncpt_get); diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h index 9056465ecd3f..f719205105ac 100644 --- a/drivers/gpu/host1x/syncpt.h +++ b/drivers/gpu/host1x/syncpt.h @@ -37,7 +37,7 @@ struct host1x_syncpt_base { }; struct host1x_syncpt { - int id; + unsigned int id; atomic_t min_val; atomic_t max_val; u32 base_val; @@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host); void host1x_syncpt_deinit(struct host1x *host); /* Return number of sync point supported. */ -int host1x_syncpt_nb_pts(struct host1x *host); +unsigned int host1x_syncpt_nb_pts(struct host1x *host); /* Return number of wait bases supported. */ -int host1x_syncpt_nb_bases(struct host1x *host); +unsigned int host1x_syncpt_nb_bases(struct host1x *host); /* Return number of mlocks supported. */ -int host1x_syncpt_nb_mlocks(struct host1x *host); +unsigned int host1x_syncpt_nb_mlocks(struct host1x *host); /* * Check sync point sanity. If max is larger than min, there have too many diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index 2f29780e7c68..659475c1e44a 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand, static int ipu_bus_format_to_map(u32 fmt) { switch (fmt) { + default: + WARN_ON(1); + /* fall-through */ case MEDIA_BUS_FMT_RGB888_1X24: return IPU_DC_MAP_RGB24; case MEDIA_BUS_FMT_RGB565_1X16: @@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt) return IPU_DC_MAP_LVDS666; case MEDIA_BUS_FMT_BGR888_1X24: return IPU_DC_MAP_BGR24; - default: - return -EINVAL; } } @@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, dc->di = ipu_di_get_num(di); map = ipu_bus_format_to_map(bus_format); - if (map < 0) { - dev_dbg(priv->dev, "IPU_DISP: No MAP\n"); - return map; - } /* * In interlaced mode we need more counters to create the asymmetric diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index 359268e3a166..a8d87ddd8a17 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig) dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n", di->id, sig->mode.hactive, sig->mode.vactive); - if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0)) - return -EINVAL; - dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n", clk_get_rate(di->clk_ipu), clk_get_rate(di->clk_di), diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c index 837b1ec22800..42705bb5aaa3 100644 --- a/drivers/gpu/ipu-v3/ipu-dmfc.c +++ b/drivers/gpu/ipu-v3/ipu-dmfc.c @@ -45,17 +45,6 @@ #define DMFC_DP_CHAN_6B_24 16 #define DMFC_DP_CHAN_6F_29 24 -#define DMFC_FIFO_SIZE_64 (3 << 3) -#define DMFC_FIFO_SIZE_128 (2 << 3) -#define DMFC_FIFO_SIZE_256 (1 << 3) -#define DMFC_FIFO_SIZE_512 (0 << 3) - -#define DMFC_SEGMENT(x) ((x & 0x7) << 0) -#define DMFC_BURSTSIZE_128 (0 << 6) -#define DMFC_BURSTSIZE_64 (1 << 6) -#define DMFC_BURSTSIZE_32 (2 << 6) -#define DMFC_BURSTSIZE_16 (3 << 6) - struct dmfc_channel_data { int ipu_channel; unsigned long channel_reg; @@ -104,9 +93,6 @@ struct ipu_dmfc_priv; struct dmfc_channel { unsigned slots; - unsigned slotmask; - unsigned segment; - int burstsize; struct ipu_soc *ipu; struct ipu_dmfc_priv *priv; const struct dmfc_channel_data *data; @@ -117,7 +103,6 @@ struct ipu_dmfc_priv { struct device *dev; struct dmfc_channel channels[DMFC_NUM_CHANNELS]; struct mutex mutex; - unsigned long bandwidth_per_slot; void __iomem *base; int use_count; }; @@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc) } EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel); -static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots, - int segment, int burstsize) -{ - struct ipu_dmfc_priv *priv = dmfc->priv; - u32 val, field; - - dev_dbg(priv->dev, - "dmfc: using %d slots starting from segment %d for IPU channel %d\n", - slots, segment, dmfc->data->ipu_channel); - - switch (slots) { - case 1: - field = DMFC_FIFO_SIZE_64; - break; - case 2: - field = DMFC_FIFO_SIZE_128; - break; - case 4: - field = DMFC_FIFO_SIZE_256; - break; - case 8: - field = DMFC_FIFO_SIZE_512; - break; - default: - return -EINVAL; - } - - switch (burstsize) { - case 16: - field |= DMFC_BURSTSIZE_16; - break; - case 32: - field |= DMFC_BURSTSIZE_32; - break; - case 64: - field |= DMFC_BURSTSIZE_64; - break; - case 128: - field |= DMFC_BURSTSIZE_128; - break; - } - - field |= DMFC_SEGMENT(segment); - - val = readl(priv->base + dmfc->data->channel_reg); - - val &= ~(0xff << dmfc->data->shift); - val |= field << dmfc->data->shift; - - writel(val, priv->base + dmfc->data->channel_reg); - - dmfc->slots = slots; - dmfc->segment = segment; - dmfc->burstsize = burstsize; - dmfc->slotmask = ((1 << slots) - 1) << segment; - - return 0; -} - -static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv, - unsigned long bandwidth) -{ - int slots = 1; - - while (slots * priv->bandwidth_per_slot < bandwidth) - slots *= 2; - - return slots; -} - -static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots) -{ - unsigned slotmask_need, slotmask_used = 0; - int i, segment = 0; - - slotmask_need = (1 << slots) - 1; - - for (i = 0; i < DMFC_NUM_CHANNELS; i++) - slotmask_used |= priv->channels[i].slotmask; - - while (slotmask_need <= 0xff) { - if (!(slotmask_used & slotmask_need)) - return segment; - - slotmask_need <<= 1; - segment++; - } - - return -EBUSY; -} - -void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc) -{ - struct ipu_dmfc_priv *priv = dmfc->priv; - int i; - - dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n", - dmfc->slots, dmfc->segment); - - mutex_lock(&priv->mutex); - - if (!dmfc->slots) - goto out; - - dmfc->slotmask = 0; - dmfc->slots = 0; - dmfc->segment = 0; - - for (i = 0; i < DMFC_NUM_CHANNELS; i++) - priv->channels[i].slotmask = 0; - - for (i = 0; i < DMFC_NUM_CHANNELS; i++) { - if (priv->channels[i].slots > 0) { - priv->channels[i].segment = - dmfc_find_slots(priv, priv->channels[i].slots); - priv->channels[i].slotmask = - ((1 << priv->channels[i].slots) - 1) << - priv->channels[i].segment; - } - } - - for (i = 0; i < DMFC_NUM_CHANNELS; i++) { - if (priv->channels[i].slots > 0) - ipu_dmfc_setup_channel(&priv->channels[i], - priv->channels[i].slots, - priv->channels[i].segment, - priv->channels[i].burstsize); - } -out: - mutex_unlock(&priv->mutex); -} -EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth); - -int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc, - unsigned long bandwidth_pixel_per_second, int burstsize) -{ - struct ipu_dmfc_priv *priv = dmfc->priv; - int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second); - int segment = -1, ret = 0; - - dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n", - bandwidth_pixel_per_second / 1000000, - dmfc->data->ipu_channel); - - ipu_dmfc_free_bandwidth(dmfc); - - mutex_lock(&priv->mutex); - - if (slots > 8) { - ret = -EBUSY; - goto out; - } - - /* For the MEM_BG channel, first try to allocate twice the slots */ - if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC) - segment = dmfc_find_slots(priv, slots * 2); - else if (slots < 2) - /* Always allocate at least 128*4 bytes (2 slots) */ - slots = 2; - - if (segment >= 0) - slots *= 2; - else - segment = dmfc_find_slots(priv, slots); - if (segment < 0) { - ret = -EBUSY; - goto out; - } - - ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize); - -out: - mutex_unlock(&priv->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); - void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width) { struct ipu_dmfc_priv *priv = dmfc->priv; @@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get); void ipu_dmfc_put(struct dmfc_channel *dmfc) { - ipu_dmfc_free_bandwidth(dmfc); } EXPORT_SYMBOL_GPL(ipu_dmfc_put); @@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base, priv->channels[i].priv = priv; priv->channels[i].ipu = ipu; priv->channels[i].data = &dmfcdata[i]; - } - - writel(0x0, priv->base + DMFC_WR_CHAN); - writel(0x0, priv->base + DMFC_DP_CHAN); - /* - * We have a total bandwidth of clkrate * 4pixel divided - * into 8 slots. - */ - priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8; - - dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n", - priv->bandwidth_per_slot / 1000000); + if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC || + dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC || + dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC) + priv->channels[i].slots = 2; + } + writel(0x00000050, priv->base + DMFC_WR_CHAN); + writel(0x00005654, priv->base + DMFC_DP_CHAN); writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF); writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF); writel(0x00000003, priv->base + DMFC_GENERAL1); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cbd7c986d926..5f962bfcb43c 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) "vga_switcheroo: " fmt +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/debugfs.h> #include <linux/fb.h> @@ -51,9 +52,9 @@ * * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs. * * muxless: Dual GPUs but only one of them is connected to outputs. - * The other one is merely used to offload rendering, its results - * are copied over PCIe into the framebuffer. On Linux this is - * supported with DRI PRIME. + * The other one is merely used to offload rendering, its results + * are copied over PCIe into the framebuffer. On Linux this is + * supported with DRI PRIME. * * Hybrid graphics started to appear in the late Naughties and were initially * all muxed. Newer laptops moved to a muxless architecture for cost reasons. @@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev, * * Register vga client (GPU). Enable vga_switcheroo if another GPU and a * handler have already registered. The power state of the client is assumed - * to be ON. + * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called + * to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client); * @id: client identifier * * Register audio client (audio device on a GPU). The power state of the - * client is assumed to be ON. + * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer() + * shall be called to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -376,6 +379,33 @@ find_active_client(struct list_head *head) } /** + * vga_switcheroo_client_probe_defer() - whether to defer probing a given client + * @pdev: client pci device + * + * Determine whether any prerequisites are not fulfilled to probe a given + * client. Drivers shall invoke this early on in their ->probe callback + * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not + * register the client ere thou hast called this. + * + * Return: %true if probing should be deferred, otherwise %false. + */ +bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) +{ + if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { + /* + * apple-gmux is needed on pre-retina MacBook Pro + * to probe the panel if pdev is the inactive GPU. + */ + if (apple_gmux_present() && pdev != vga_default_device() && + !vgasr_priv.handler_flags) + return true; + } + + return false; +} +EXPORT_SYMBOL(vga_switcheroo_client_probe_defer); + +/** * vga_switcheroo_get_client_state() - obtain power state of a given client * @pdev: client pci device * @@ -530,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc); * * OFF: Power off the device not in use. * * ON: Power on the device not in use. * * IGD: Switch to the integrated graphics device. - * Power on the integrated GPU if necessary, power off the discrete GPU. - * Prerequisite is that no user space processes (e.g. Xorg, alsactl) - * have opened device files of the GPUs or the audio client. If the - * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ - * and /dev/snd/controlC1 to identify processes blocking the switch. + * Power on the integrated GPU if necessary, power off the discrete GPU. + * Prerequisite is that no user space processes (e.g. Xorg, alsactl) + * have opened device files of the GPUs or the audio client. If the + * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ + * and /dev/snd/controlC1 to identify processes blocking the switch. * * DIS: Switch to the discrete graphics device. * * DIGD: Delayed switch to the integrated graphics device. - * This will perform the switch once the last user space process has - * closed the device files of the GPUs and the audio client. + * This will perform the switch once the last user space process has + * closed the device files of the GPUs and the audio client. * * DDIS: Delayed switch to the discrete graphics device. * * MIGD: Mux-only switch to the integrated graphics device. - * Does not remap console or change the power state of either gpu. - * If the integrated GPU is currently off, the screen will turn black. - * If it is on, the screen will show whatever happens to be in VRAM. - * Either way, the user has to blindly enter the command to switch back. + * Does not remap console or change the power state of either gpu. + * If the integrated GPU is currently off, the screen will turn black. + * If it is on, the screen will show whatever happens to be in VRAM. + * Either way, the user has to blindly enter the command to switch back. * * MDIS: Mux-only switch to the discrete graphics device. * * For GPUs whose power state is controlled by the driver's runtime pm, diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 5646ca4b95de..78ac4811bd3c 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -388,6 +388,21 @@ config HID_LCPOWER ---help--- Support for LC-Power RC1000MCE RF remote control. +config HID_LED + tristate "Simple RGB LED support" + depends on HID + depends on LEDS_CLASS + ---help--- + Support for simple RGB LED devices. Currently supported are: + - Riso Kagaku Webmail Notifier + - Dream Cheeky Webmail Notifier and Friends Alert + - ThingM blink(1) + - Delcom Visual Signal Indicator Generation 2 + - Greynut Luxafor + + To compile this driver as a module, choose M here: the + module will be called hid-led. + config HID_LENOVO tristate "Lenovo / Thinkpad devices" depends on HID @@ -819,11 +834,11 @@ config HID_THINGM tristate "ThingM blink(1) USB RGB LED" depends on HID depends on LEDS_CLASS + select HID_LED ---help--- - Support for the ThingM blink(1) USB RGB LED. This driver registers a - Linux LED class instance, plus additional sysfs attributes to control - RGB colors, fade time and playing. The device is exposed through hidraw - to access other functions. + Support for the ThingM blink(1) USB RGB LED. This driver has been + merged into the generic hid led driver. Config symbol HID_THINGM + just selects HID_LED and will be removed soon. config HID_THRUSTMASTER tristate "ThrustMaster devices support" @@ -936,6 +951,14 @@ config HID_SENSOR_CUSTOM_SENSOR standard sensors. Select this config option for custom/generic sensor support. +config HID_ALPS + tristate "Alps HID device support" + depends on HID + ---help--- + Support for Alps I2C HID touchpads and StickPointer. + Say Y here if you have a Alps touchpads over i2c-hid or usbhid + and want support for its special functionalities. + endmenu endif # HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index a2fb562de748..fc4b2aa47f2e 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -21,6 +21,7 @@ hid-wiimote-y := hid-wiimote-core.o hid-wiimote-modules.o hid-wiimote-$(CONFIG_DEBUG_FS) += hid-wiimote-debug.o obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o +obj-$(CONFIG_HID_ALPS) += hid-alps.o obj-$(CONFIG_HID_ACRUX) += hid-axff.o obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o @@ -90,12 +91,12 @@ obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o -obj-$(CONFIG_HID_THINGM) += hid-thingm.o obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o obj-$(CONFIG_HID_TIVO) += hid-tivo.o obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o +obj-$(CONFIG_HID_LED) += hid-led.o obj-$(CONFIG_HID_XINMO) += hid-xinmo.o obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c new file mode 100644 index 000000000000..048befde295a --- /dev/null +++ b/drivers/hid/hid-alps.c @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2016 Masaki Ota <masaki.ota@jp.alps.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/kernel.h> +#include <linux/hid.h> +#include <linux/input.h> +#include <linux/input/mt.h> +#include <linux/module.h> +#include <asm/unaligned.h> +#include "hid-ids.h" + +/* ALPS Device Product ID */ +#define HID_PRODUCT_ID_T3_BTNLESS 0xD0C0 +#define HID_PRODUCT_ID_COSMO 0x1202 +#define HID_PRODUCT_ID_U1_PTP_1 0x1207 +#define HID_PRODUCT_ID_U1 0x1209 +#define HID_PRODUCT_ID_U1_PTP_2 0x120A +#define HID_PRODUCT_ID_U1_DUAL 0x120B +#define HID_PRODUCT_ID_T4_BTNLESS 0x120C + +#define DEV_SINGLEPOINT 0x01 +#define DEV_DUALPOINT 0x02 + +#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ +#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ +#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ + +#define U1_FEATURE_REPORT_LEN 0x08 /* Feature Report Length */ +#define U1_FEATURE_REPORT_LEN_ALL 0x0A +#define U1_CMD_REGISTER_READ 0xD1 +#define U1_CMD_REGISTER_WRITE 0xD2 + +#define U1_DEVTYPE_SP_SUPPORT 0x10 /* SP Support */ +#define U1_DISABLE_DEV 0x01 +#define U1_TP_ABS_MODE 0x02 +#define U1_SP_ABS_MODE 0x80 + +#define ADDRESS_U1_DEV_CTRL_1 0x00800040 +#define ADDRESS_U1_DEVICE_TYP 0x00800043 +#define ADDRESS_U1_NUM_SENS_X 0x00800047 +#define ADDRESS_U1_NUM_SENS_Y 0x00800048 +#define ADDRESS_U1_PITCH_SENS_X 0x00800049 +#define ADDRESS_U1_PITCH_SENS_Y 0x0080004A +#define ADDRESS_U1_RESO_DWN_ABS 0x0080004E +#define ADDRESS_U1_PAD_BTN 0x00800052 +#define ADDRESS_U1_SP_BTN 0x0080009F + +#define MAX_TOUCHES 5 + +/** + * struct u1_data + * + * @input: pointer to the kernel input device + * @input2: pointer to the kernel input2 device + * @hdev: pointer to the struct hid_device + * + * @dev_ctrl: device control parameter + * @dev_type: device type + * @sen_line_num_x: number of sensor line of X + * @sen_line_num_y: number of sensor line of Y + * @pitch_x: sensor pitch of X + * @pitch_y: sensor pitch of Y + * @resolution: resolution + * @btn_info: button information + * @x_active_len_mm: active area length of X (mm) + * @y_active_len_mm: active area length of Y (mm) + * @x_max: maximum x coordinate value + * @y_max: maximum y coordinate value + * @btn_cnt: number of buttons + * @sp_btn_cnt: number of stick buttons + */ +struct u1_dev { + struct input_dev *input; + struct input_dev *input2; + struct hid_device *hdev; + + u8 dev_ctrl; + u8 dev_type; + u8 sen_line_num_x; + u8 sen_line_num_y; + u8 pitch_x; + u8 pitch_y; + u8 resolution; + u8 btn_info; + u8 sp_btn_info; + u32 x_active_len_mm; + u32 y_active_len_mm; + u32 x_max; + u32 y_max; + u32 btn_cnt; + u32 sp_btn_cnt; +}; + +static int u1_read_write_register(struct hid_device *hdev, u32 address, + u8 *read_val, u8 write_val, bool read_flag) +{ + int ret, i; + u8 check_sum; + u8 *input; + u8 *readbuf; + + input = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL); + if (!input) + return -ENOMEM; + + input[0] = U1_FEATURE_REPORT_ID; + if (read_flag) { + input[1] = U1_CMD_REGISTER_READ; + input[6] = 0x00; + } else { + input[1] = U1_CMD_REGISTER_WRITE; + input[6] = write_val; + } + + put_unaligned_le32(address, input + 2); + + /* Calculate the checksum */ + check_sum = U1_FEATURE_REPORT_LEN_ALL; + for (i = 0; i < U1_FEATURE_REPORT_LEN - 1; i++) + check_sum += input[i]; + + input[7] = check_sum; + ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, input, + U1_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + + if (ret < 0) { + dev_err(&hdev->dev, "failed to read command (%d)\n", ret); + goto exit; + } + + if (read_flag) { + readbuf = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL); + if (!readbuf) { + kfree(input); + return -ENOMEM; + } + + ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, readbuf, + U1_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + + if (ret < 0) { + dev_err(&hdev->dev, "failed read register (%d)\n", ret); + goto exit; + } + + *read_val = readbuf[6]; + + kfree(readbuf); + } + + ret = 0; + +exit: + kfree(input); + return ret; +} + +static int alps_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + unsigned int x, y, z; + int i; + short sp_x, sp_y; + struct u1_dev *hdata = hid_get_drvdata(hdev); + + switch (data[0]) { + case U1_MOUSE_REPORT_ID: + break; + case U1_FEATURE_REPORT_ID: + break; + case U1_ABSOLUTE_REPORT_ID: + for (i = 0; i < MAX_TOUCHES; i++) { + u8 *contact = &data[i * 5]; + + x = get_unaligned_le16(contact + 3); + y = get_unaligned_le16(contact + 5); + z = contact[7] & 0x7F; + + input_mt_slot(hdata->input, i); + + if (z != 0) { + input_mt_report_slot_state(hdata->input, + MT_TOOL_FINGER, 1); + } else { + input_mt_report_slot_state(hdata->input, + MT_TOOL_FINGER, 0); + break; + } + + input_report_abs(hdata->input, ABS_MT_POSITION_X, x); + input_report_abs(hdata->input, ABS_MT_POSITION_Y, y); + input_report_abs(hdata->input, ABS_MT_PRESSURE, z); + + } + + input_mt_sync_frame(hdata->input); + + input_report_key(hdata->input, BTN_LEFT, + data[1] & 0x1); + input_report_key(hdata->input, BTN_RIGHT, + (data[1] & 0x2)); + input_report_key(hdata->input, BTN_MIDDLE, + (data[1] & 0x4)); + + input_sync(hdata->input); + + return 1; + + case U1_SP_ABSOLUTE_REPORT_ID: + sp_x = get_unaligned_le16(data+2); + sp_y = get_unaligned_le16(data+4); + + sp_x = sp_x / 8; + sp_y = sp_y / 8; + + input_report_rel(hdata->input2, REL_X, sp_x); + input_report_rel(hdata->input2, REL_Y, sp_y); + + input_report_key(hdata->input2, BTN_LEFT, + data[1] & 0x1); + input_report_key(hdata->input2, BTN_RIGHT, + (data[1] & 0x2)); + input_report_key(hdata->input2, BTN_MIDDLE, + (data[1] & 0x4)); + + input_sync(hdata->input2); + + return 1; + } + + return 0; +} + +#ifdef CONFIG_PM +static int alps_post_reset(struct hid_device *hdev) +{ + return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, U1_TP_ABS_MODE, false); +} + +static int alps_post_resume(struct hid_device *hdev) +{ + return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, U1_TP_ABS_MODE, false); +} +#endif /* CONFIG_PM */ + +static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) +{ + struct u1_dev *data = hid_get_drvdata(hdev); + struct input_dev *input = hi->input, *input2; + struct u1_dev devInfo; + int ret; + int res_x, res_y, i; + + data->input = input; + + hid_dbg(hdev, "Opening low level driver\n"); + ret = hid_hw_open(hdev); + if (ret) + return ret; + + /* Allow incoming hid reports */ + hid_device_io_start(hdev); + + /* Device initialization */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + &devInfo.dev_ctrl, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret); + goto exit; + } + + devInfo.dev_ctrl &= ~U1_DISABLE_DEV; + devInfo.dev_ctrl |= U1_TP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, devInfo.dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X, + &devInfo.sen_line_num_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y, + &devInfo.sen_line_num_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X, + &devInfo.pitch_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y, + &devInfo.pitch_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS, + &devInfo.resolution, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN, + &devInfo.btn_info, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret); + goto exit; + } + + /* Check StickPointer device */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP, + &devInfo.dev_type, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret); + goto exit; + } + + devInfo.x_active_len_mm = + (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10; + devInfo.y_active_len_mm = + (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10; + + devInfo.x_max = + (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1); + devInfo.y_max = + (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1); + + __set_bit(EV_ABS, input->evbit); + input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0); + + if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) { + res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm; + res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm; + + input_abs_set_res(input, ABS_MT_POSITION_X, res_x); + input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); + } + + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0); + + input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER); + + __set_bit(EV_KEY, input->evbit); + if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) { + devInfo.btn_cnt = (devInfo.btn_info & 0x0F); + } else { + /* Button pad */ + devInfo.btn_cnt = 1; + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + } + + for (i = 0; i < devInfo.btn_cnt; i++) + __set_bit(BTN_LEFT + i, input->keybit); + + + /* Stick device initialization */ + if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) { + + input2 = input_allocate_device(); + if (!input2) { + input_free_device(input2); + goto exit; + } + + data->input2 = input2; + + devInfo.dev_ctrl |= U1_SP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, devInfo.dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed SP mode (%d)\n", ret); + input_free_device(input2); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN, + &devInfo.sp_btn_info, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret); + input_free_device(input2); + goto exit; + } + + input2->phys = input->phys; + input2->name = "DualPoint Stick"; + input2->id.bustype = BUS_I2C; + input2->id.vendor = input->id.vendor; + input2->id.product = input->id.product; + input2->id.version = input->id.version; + input2->dev.parent = input->dev.parent; + + __set_bit(EV_KEY, input2->evbit); + devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F); + for (i = 0; i < devInfo.sp_btn_cnt; i++) + __set_bit(BTN_LEFT + i, input2->keybit); + + __set_bit(EV_REL, input2->evbit); + __set_bit(REL_X, input2->relbit); + __set_bit(REL_Y, input2->relbit); + __set_bit(INPUT_PROP_POINTER, input2->propbit); + __set_bit(INPUT_PROP_POINTING_STICK, input2->propbit); + + if (input_register_device(data->input2)) { + input_free_device(input2); + goto exit; + } + } + +exit: + hid_device_io_stop(hdev); + hid_hw_close(hdev); + return ret; +} + +static int alps_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + return -1; +} + +static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct u1_dev *data = NULL; + int ret; + + data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdev = hdev; + hid_set_drvdata(hdev, data); + + hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + return ret; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + return ret; + } + + return 0; +} + +static void alps_remove(struct hid_device *hdev) +{ + hid_hw_stop(hdev); +} + +static const struct hid_device_id alps_id[] = { + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, + USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, + { } +}; +MODULE_DEVICE_TABLE(hid, alps_id); + +static struct hid_driver alps_driver = { + .name = "hid-alps", + .id_table = alps_id, + .probe = alps_probe, + .remove = alps_remove, + .raw_event = alps_raw_event, + .input_mapping = alps_input_mapping, + .input_configured = alps_input_configured, +#ifdef CONFIG_PM + .resume = alps_post_resume, + .reset_resume = alps_post_reset, +#endif +}; + +module_hid_driver(alps_driver); + +MODULE_AUTHOR("Masaki Ota <masaki.ota@jp.alps.com>"); +MODULE_DESCRIPTION("ALPS HID driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 884d82f9190e..2e046082210f 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -474,6 +474,8 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), + .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8ea3a26360e9..08f53c7fd513 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1772,6 +1772,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, @@ -1851,6 +1852,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) }, @@ -1877,8 +1879,11 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, @@ -1962,6 +1967,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, @@ -2008,6 +2014,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, #if IS_ENABLED(CONFIG_HID_ROCCAT) { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, @@ -2348,8 +2355,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, @@ -2486,7 +2491,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, - { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, { } }; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 3eec09a134cb..4ed9a4fdfea7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -70,6 +70,9 @@ #define USB_VENDOR_ID_ALPS 0x0433 #define USB_DEVICE_ID_IBM_GAMEPAD 0x1101 +#define USB_VENDOR_ID_ALPS_JP 0x044E +#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B + #define USB_VENDOR_ID_ANTON 0x1130 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 @@ -142,6 +145,7 @@ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 @@ -296,6 +300,9 @@ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a +#define USB_VENDOR_ID_DELCOM 0x0fc5 +#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 @@ -334,6 +341,8 @@ #define USB_DEVICE_ID_ELECOM_BM084 0x0061 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 +#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 +#define USB_DEVICE_ID_DREAM_CHEEKY_FA 0x000a #define USB_VENDOR_ID_ELITEGROUP 0x03fc #define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8 @@ -680,6 +689,7 @@ #define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002 #define USB_DEVICE_ID_PICK16F1454 0x0042 #define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7 +#define USB_DEVICE_ID_LUXAFOR 0xf372 #define USB_VENDOR_ID_MICROSOFT 0x045e #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c new file mode 100644 index 000000000000..d8d55f37b4f5 --- /dev/null +++ b/drivers/hid/hid-led.c @@ -0,0 +1,523 @@ +/* + * Simple USB RGB LED driver + * + * Copyright 2016 Heiner Kallweit <hkallweit1@gmail.com> + * Based on drivers/hid/hid-thingm.c and + * drivers/usb/misc/usbled.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2. + */ + +#include <linux/hid.h> +#include <linux/hidraw.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/mutex.h> + +#include "hid-ids.h" + +enum hidled_report_type { + RAW_REQUEST, + OUTPUT_REPORT +}; + +enum hidled_type { + RISO_KAGAKU, + DREAM_CHEEKY, + THINGM, + DELCOM, + LUXAFOR, +}; + +static unsigned const char riso_kagaku_tbl[] = { +/* R+2G+4B -> riso kagaku color index */ + [0] = 0, /* black */ + [1] = 2, /* red */ + [2] = 1, /* green */ + [3] = 5, /* yellow */ + [4] = 3, /* blue */ + [5] = 6, /* magenta */ + [6] = 4, /* cyan */ + [7] = 7 /* white */ +}; + +#define RISO_KAGAKU_IX(r, g, b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)] + +union delcom_packet { + __u8 data[8]; + struct { + __u8 major_cmd; + __u8 minor_cmd; + __u8 data_lsb; + __u8 data_msb; + } tx; + struct { + __u8 cmd; + } rx; + struct { + __le16 family_code; + __le16 security_code; + __u8 fw_version; + } fw; +}; + +#define DELCOM_GREEN_LED 0 +#define DELCOM_RED_LED 1 +#define DELCOM_BLUE_LED 2 + +struct hidled_device; +struct hidled_rgb; + +struct hidled_config { + enum hidled_type type; + const char *name; + const char *short_name; + enum led_brightness max_brightness; + int num_leds; + size_t report_size; + enum hidled_report_type report_type; + int (*init)(struct hidled_device *ldev); + int (*write)(struct led_classdev *cdev, enum led_brightness br); +}; + +struct hidled_led { + struct led_classdev cdev; + struct hidled_rgb *rgb; + char name[32]; +}; + +struct hidled_rgb { + struct hidled_device *ldev; + struct hidled_led red; + struct hidled_led green; + struct hidled_led blue; + u8 num; +}; + +struct hidled_device { + const struct hidled_config *config; + struct hid_device *hdev; + struct hidled_rgb *rgb; + struct mutex lock; +}; + +#define MAX_REPORT_SIZE 16 + +#define to_hidled_led(arg) container_of(arg, struct hidled_led, cdev) + +static bool riso_kagaku_switch_green_blue; +module_param(riso_kagaku_switch_green_blue, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(riso_kagaku_switch_green_blue, + "switch green and blue RGB component for Riso Kagaku devices"); + +static int hidled_send(struct hidled_device *ldev, __u8 *buf) +{ + int ret; + + mutex_lock(&ldev->lock); + + if (ldev->config->report_type == RAW_REQUEST) + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); + else if (ldev->config->report_type == OUTPUT_REPORT) + ret = hid_hw_output_report(ldev->hdev, buf, + ldev->config->report_size); + else + ret = -EINVAL; + + mutex_unlock(&ldev->lock); + + if (ret < 0) + return ret; + + return ret == ldev->config->report_size ? 0 : -EMSGSIZE; +} + +/* reading data is supported for report type RAW_REQUEST only */ +static int hidled_recv(struct hidled_device *ldev, __u8 *buf) +{ + int ret; + + if (ldev->config->report_type != RAW_REQUEST) + return -EINVAL; + + mutex_lock(&ldev->lock); + + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); + if (ret < 0) + goto err; + + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); +err: + mutex_unlock(&ldev->lock); + + return ret < 0 ? ret : 0; +} + +static u8 riso_kagaku_index(struct hidled_rgb *rgb) +{ + enum led_brightness r, g, b; + + r = rgb->red.cdev.brightness; + g = rgb->green.cdev.brightness; + b = rgb->blue.cdev.brightness; + + if (riso_kagaku_switch_green_blue) + return RISO_KAGAKU_IX(r, b, g); + else + return RISO_KAGAKU_IX(r, g, b); +} + +static int riso_kagaku_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + struct hidled_rgb *rgb = led->rgb; + __u8 buf[MAX_REPORT_SIZE] = {}; + + buf[1] = riso_kagaku_index(rgb); + + return hidled_send(rgb->ldev, buf); +} + +static int dream_cheeky_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + struct hidled_rgb *rgb = led->rgb; + __u8 buf[MAX_REPORT_SIZE] = {}; + + buf[1] = rgb->red.cdev.brightness; + buf[2] = rgb->green.cdev.brightness; + buf[3] = rgb->blue.cdev.brightness; + buf[7] = 0x1a; + buf[8] = 0x05; + + return hidled_send(rgb->ldev, buf); +} + +static int dream_cheeky_init(struct hidled_device *ldev) +{ + __u8 buf[MAX_REPORT_SIZE] = {}; + + /* Dream Cheeky magic */ + buf[1] = 0x1f; + buf[2] = 0x02; + buf[4] = 0x5f; + buf[7] = 0x1a; + buf[8] = 0x03; + + return hidled_send(ldev, buf); +} + +static int _thingm_write(struct led_classdev *cdev, enum led_brightness br, + u8 offset) +{ + struct hidled_led *led = to_hidled_led(cdev); + __u8 buf[MAX_REPORT_SIZE] = { 1, 'c' }; + + buf[2] = led->rgb->red.cdev.brightness; + buf[3] = led->rgb->green.cdev.brightness; + buf[4] = led->rgb->blue.cdev.brightness; + buf[7] = led->rgb->num + offset; + + return hidled_send(led->rgb->ldev, buf); +} + +static int thingm_write_v1(struct led_classdev *cdev, enum led_brightness br) +{ + return _thingm_write(cdev, br, 0); +} + +static int thingm_write(struct led_classdev *cdev, enum led_brightness br) +{ + return _thingm_write(cdev, br, 1); +} + +static const struct hidled_config hidled_config_thingm_v1 = { + .name = "ThingM blink(1) v1", + .short_name = "thingm", + .max_brightness = 255, + .num_leds = 1, + .report_size = 9, + .report_type = RAW_REQUEST, + .write = thingm_write_v1, +}; + +static int thingm_init(struct hidled_device *ldev) +{ + __u8 buf[MAX_REPORT_SIZE] = { 1, 'v' }; + int ret; + + ret = hidled_recv(ldev, buf); + if (ret) + return ret; + + /* Check for firmware major version 1 */ + if (buf[3] == '1') + ldev->config = &hidled_config_thingm_v1; + + return 0; +} + +static inline int delcom_get_lednum(const struct hidled_led *led) +{ + if (led == &led->rgb->red) + return DELCOM_RED_LED; + else if (led == &led->rgb->green) + return DELCOM_GREEN_LED; + else + return DELCOM_BLUE_LED; +} + +static int delcom_enable_led(struct hidled_led *led) +{ + union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 12 }; + + dp.tx.data_lsb = 1 << delcom_get_lednum(led); + dp.tx.data_msb = 0; + + return hidled_send(led->rgb->ldev, dp.data); +} + +static int delcom_set_pwm(struct hidled_led *led) +{ + union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 34 }; + + dp.tx.data_lsb = delcom_get_lednum(led); + dp.tx.data_msb = led->cdev.brightness; + + return hidled_send(led->rgb->ldev, dp.data); +} + +static int delcom_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + int ret; + + /* + * enable LED + * We can't do this in the init function already because the device + * is internally reset later. + */ + ret = delcom_enable_led(led); + if (ret) + return ret; + + return delcom_set_pwm(led); +} + +static int delcom_init(struct hidled_device *ldev) +{ + union delcom_packet dp = { .rx.cmd = 104 }; + int ret; + + ret = hidled_recv(ldev, dp.data); + if (ret) + return ret; + /* + * Several Delcom devices share the same USB VID/PID + * Check for family id 2 for Visual Signal Indicator + */ + return le16_to_cpu(dp.fw.family_code) == 2 ? 0 : -ENODEV; +} + +static int luxafor_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + __u8 buf[MAX_REPORT_SIZE] = { [1] = 1 }; + + buf[2] = led->rgb->num + 1; + buf[3] = led->rgb->red.cdev.brightness; + buf[4] = led->rgb->green.cdev.brightness; + buf[5] = led->rgb->blue.cdev.brightness; + + return hidled_send(led->rgb->ldev, buf); +} + +static const struct hidled_config hidled_configs[] = { + { + .type = RISO_KAGAKU, + .name = "Riso Kagaku Webmail Notifier", + .short_name = "riso_kagaku", + .max_brightness = 1, + .num_leds = 1, + .report_size = 6, + .report_type = OUTPUT_REPORT, + .write = riso_kagaku_write, + }, + { + .type = DREAM_CHEEKY, + .name = "Dream Cheeky Webmail Notifier", + .short_name = "dream_cheeky", + .max_brightness = 31, + .num_leds = 1, + .report_size = 9, + .report_type = RAW_REQUEST, + .init = dream_cheeky_init, + .write = dream_cheeky_write, + }, + { + .type = THINGM, + .name = "ThingM blink(1)", + .short_name = "thingm", + .max_brightness = 255, + .num_leds = 2, + .report_size = 9, + .report_type = RAW_REQUEST, + .init = thingm_init, + .write = thingm_write, + }, + { + .type = DELCOM, + .name = "Delcom Visual Signal Indicator G2", + .short_name = "delcom", + .max_brightness = 100, + .num_leds = 1, + .report_size = 8, + .report_type = RAW_REQUEST, + .init = delcom_init, + .write = delcom_write, + }, + { + .type = LUXAFOR, + .name = "Greynut Luxafor", + .short_name = "luxafor", + .max_brightness = 255, + .num_leds = 6, + .report_size = 9, + .report_type = OUTPUT_REPORT, + .write = luxafor_write, + }, +}; + +static int hidled_init_led(struct hidled_led *led, const char *color_name, + struct hidled_rgb *rgb, unsigned int minor) +{ + const struct hidled_config *config = rgb->ldev->config; + + if (config->num_leds > 1) + snprintf(led->name, sizeof(led->name), "%s%u:%s:led%u", + config->short_name, minor, color_name, rgb->num); + else + snprintf(led->name, sizeof(led->name), "%s%u:%s", + config->short_name, minor, color_name); + led->cdev.name = led->name; + led->cdev.max_brightness = config->max_brightness; + led->cdev.brightness_set_blocking = config->write; + led->cdev.flags = LED_HW_PLUGGABLE; + led->rgb = rgb; + + return devm_led_classdev_register(&rgb->ldev->hdev->dev, &led->cdev); +} + +static int hidled_init_rgb(struct hidled_rgb *rgb, unsigned int minor) +{ + int ret; + + /* Register the red diode */ + ret = hidled_init_led(&rgb->red, "red", rgb, minor); + if (ret) + return ret; + + /* Register the green diode */ + ret = hidled_init_led(&rgb->green, "green", rgb, minor); + if (ret) + return ret; + + /* Register the blue diode */ + return hidled_init_led(&rgb->blue, "blue", rgb, minor); +} + +static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct hidled_device *ldev; + unsigned int minor; + int ret, i; + + ldev = devm_kzalloc(&hdev->dev, sizeof(*ldev), GFP_KERNEL); + if (!ldev) + return -ENOMEM; + + ret = hid_parse(hdev); + if (ret) + return ret; + + ldev->hdev = hdev; + mutex_init(&ldev->lock); + + for (i = 0; !ldev->config && i < ARRAY_SIZE(hidled_configs); i++) + if (hidled_configs[i].type == id->driver_data) + ldev->config = &hidled_configs[i]; + + if (!ldev->config) + return -EINVAL; + + if (ldev->config->init) { + ret = ldev->config->init(ldev); + if (ret) + return ret; + } + + ldev->rgb = devm_kcalloc(&hdev->dev, ldev->config->num_leds, + sizeof(struct hidled_rgb), GFP_KERNEL); + if (!ldev->rgb) + return -ENOMEM; + + ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + if (ret) + return ret; + + minor = ((struct hidraw *) hdev->hidraw)->minor; + + for (i = 0; i < ldev->config->num_leds; i++) { + ldev->rgb[i].ldev = ldev; + ldev->rgb[i].num = i; + ret = hidled_init_rgb(&ldev->rgb[i], minor); + if (ret) { + hid_hw_stop(hdev); + return ret; + } + } + + hid_info(hdev, "%s initialized\n", ldev->config->name); + + return 0; +} + +static const struct hid_device_id hidled_table[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, + USB_DEVICE_ID_RI_KA_WEBMAIL), .driver_data = RISO_KAGAKU }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, + USB_DEVICE_ID_DREAM_CHEEKY_WN), .driver_data = DREAM_CHEEKY }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, + USB_DEVICE_ID_DREAM_CHEEKY_FA), .driver_data = DREAM_CHEEKY }, + { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, + USB_DEVICE_ID_BLINK1), .driver_data = THINGM }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, + USB_DEVICE_ID_DELCOM_VISUAL_IND), .driver_data = DELCOM }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, + USB_DEVICE_ID_LUXAFOR), .driver_data = LUXAFOR }, + { } +}; +MODULE_DEVICE_TABLE(hid, hidled_table); + +static struct hid_driver hidled_driver = { + .name = "hid-led", + .probe = hidled_probe, + .id_table = hidled_table, +}; + +module_hid_driver(hidled_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Heiner Kallweit <hkallweit1@gmail.com>"); +MODULE_DESCRIPTION("Simple USB RGB LED driver"); diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c deleted file mode 100644 index 9ad9c6ec5bba..000000000000 --- a/drivers/hid/hid-thingm.c +++ /dev/null @@ -1,263 +0,0 @@ -/* - * ThingM blink(1) USB RGB LED driver - * - * Copyright 2013-2014 Savoir-faire Linux Inc. - * Vivien Didelot <vivien.didelot@savoirfairelinux.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2. - */ - -#include <linux/hid.h> -#include <linux/hidraw.h> -#include <linux/leds.h> -#include <linux/module.h> -#include <linux/mutex.h> - -#include "hid-ids.h" - -#define REPORT_ID 1 -#define REPORT_SIZE 9 - -/* Firmware major number of supported devices */ -#define THINGM_MAJOR_MK1 '1' -#define THINGM_MAJOR_MK2 '2' - -struct thingm_fwinfo { - char major; - unsigned numrgb; - unsigned first; -}; - -static const struct thingm_fwinfo thingm_fwinfo[] = { - { - .major = THINGM_MAJOR_MK1, - .numrgb = 1, - .first = 0, - }, { - .major = THINGM_MAJOR_MK2, - .numrgb = 2, - .first = 1, - } -}; - -/* A red, green or blue channel, part of an RGB chip */ -struct thingm_led { - struct thingm_rgb *rgb; - struct led_classdev ldev; - char name[32]; -}; - -/* Basically a WS2812 5050 RGB LED chip */ -struct thingm_rgb { - struct thingm_device *tdev; - struct thingm_led red; - struct thingm_led green; - struct thingm_led blue; - u8 num; -}; - -struct thingm_device { - struct hid_device *hdev; - struct { - char major; - char minor; - } version; - const struct thingm_fwinfo *fwinfo; - struct mutex lock; - struct thingm_rgb *rgb; -}; - -static int thingm_send(struct thingm_device *tdev, u8 buf[REPORT_SIZE]) -{ - int ret; - - hid_dbg(tdev->hdev, "-> %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n", - buf[0], buf[1], buf[2], buf[3], buf[4], - buf[5], buf[6], buf[7], buf[8]); - - mutex_lock(&tdev->lock); - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - - mutex_unlock(&tdev->lock); - - return ret < 0 ? ret : 0; -} - -static int thingm_recv(struct thingm_device *tdev, u8 buf[REPORT_SIZE]) -{ - int ret; - - /* - * A read consists of two operations: sending the read command - * and the actual read from the device. Use the mutex to protect - * the full sequence of both operations. - */ - mutex_lock(&tdev->lock); - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) - goto err; - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); - if (ret < 0) - goto err; - - ret = 0; - - hid_dbg(tdev->hdev, "<- %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n", - buf[0], buf[1], buf[2], buf[3], buf[4], - buf[5], buf[6], buf[7], buf[8]); -err: - mutex_unlock(&tdev->lock); - return ret; -} - -static int thingm_version(struct thingm_device *tdev) -{ - u8 buf[REPORT_SIZE] = { REPORT_ID, 'v', 0, 0, 0, 0, 0, 0, 0 }; - int err; - - err = thingm_recv(tdev, buf); - if (err) - return err; - - tdev->version.major = buf[3]; - tdev->version.minor = buf[4]; - - return 0; -} - -static int thingm_write_color(struct thingm_rgb *rgb) -{ - u8 buf[REPORT_SIZE] = { REPORT_ID, 'c', 0, 0, 0, 0, 0, rgb->num, 0 }; - - buf[2] = rgb->red.ldev.brightness; - buf[3] = rgb->green.ldev.brightness; - buf[4] = rgb->blue.ldev.brightness; - - return thingm_send(rgb->tdev, buf); -} - -static int thingm_led_set(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct thingm_led *led = container_of(ldev, struct thingm_led, ldev); - - return thingm_write_color(led->rgb); -} - -static int thingm_init_led(struct thingm_led *led, const char *color_name, - struct thingm_rgb *rgb, int minor) -{ - snprintf(led->name, sizeof(led->name), "thingm%d:%s:led%d", - minor, color_name, rgb->num); - led->ldev.name = led->name; - led->ldev.max_brightness = 255; - led->ldev.brightness_set_blocking = thingm_led_set; - led->ldev.flags = LED_HW_PLUGGABLE; - led->rgb = rgb; - return devm_led_classdev_register(&rgb->tdev->hdev->dev, &led->ldev); -} - -static int thingm_init_rgb(struct thingm_rgb *rgb) -{ - const int minor = ((struct hidraw *) rgb->tdev->hdev->hidraw)->minor; - int err; - - /* Register the red diode */ - err = thingm_init_led(&rgb->red, "red", rgb, minor); - if (err) - return err; - - /* Register the green diode */ - err = thingm_init_led(&rgb->green, "green", rgb, minor); - if (err) - return err; - - /* Register the blue diode */ - return thingm_init_led(&rgb->blue, "blue", rgb, minor); -} - -static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id) -{ - struct thingm_device *tdev; - int i, err; - - tdev = devm_kzalloc(&hdev->dev, sizeof(struct thingm_device), - GFP_KERNEL); - if (!tdev) - return -ENOMEM; - - tdev->hdev = hdev; - hid_set_drvdata(hdev, tdev); - - err = hid_parse(hdev); - if (err) - return err; - - mutex_init(&tdev->lock); - - err = thingm_version(tdev); - if (err) - return err; - - hid_dbg(hdev, "firmware version: %c.%c\n", - tdev->version.major, tdev->version.minor); - - for (i = 0; i < ARRAY_SIZE(thingm_fwinfo) && !tdev->fwinfo; ++i) - if (thingm_fwinfo[i].major == tdev->version.major) - tdev->fwinfo = &thingm_fwinfo[i]; - - if (!tdev->fwinfo) { - hid_err(hdev, "unsupported firmware %c\n", tdev->version.major); - return -ENODEV; - } - - tdev->rgb = devm_kzalloc(&hdev->dev, - sizeof(struct thingm_rgb) * tdev->fwinfo->numrgb, - GFP_KERNEL); - if (!tdev->rgb) - return -ENOMEM; - - err = hid_hw_start(hdev, HID_CONNECT_HIDRAW); - if (err) - return err; - - for (i = 0; i < tdev->fwinfo->numrgb; ++i) { - struct thingm_rgb *rgb = tdev->rgb + i; - - rgb->tdev = tdev; - rgb->num = tdev->fwinfo->first + i; - err = thingm_init_rgb(rgb); - if (err) { - hid_hw_stop(hdev); - return err; - } - } - - return 0; -} - -static const struct hid_device_id thingm_table[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, - { } -}; -MODULE_DEVICE_TABLE(hid, thingm_table); - -static struct hid_driver thingm_driver = { - .name = "thingm", - .probe = thingm_probe, - .id_table = thingm_table, -}; - -module_hid_driver(thingm_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Vivien Didelot <vivien.didelot@savoirfairelinux.com>"); -MODULE_DESCRIPTION("ThingM blink(1) USB RGB LED driver"); diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 2e021ba8ff05..b3ec4f2de875 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -1020,6 +1020,7 @@ static int i2c_hid_probe(struct i2c_client *client, pm_runtime_get_noresume(&client->dev); pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); + device_enable_async_suspend(&client->dev); ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) @@ -1106,6 +1107,14 @@ static int i2c_hid_remove(struct i2c_client *client) return 0; } +static void i2c_hid_shutdown(struct i2c_client *client) +{ + struct i2c_hid *ihid = i2c_get_clientdata(client); + + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); + free_irq(client->irq, ihid); +} + #ifdef CONFIG_PM_SLEEP static int i2c_hid_suspend(struct device *dev) { @@ -1230,7 +1239,7 @@ static struct i2c_driver i2c_hid_driver = { .probe = i2c_hid_probe, .remove = i2c_hid_remove, - + .shutdown = i2c_hid_shutdown, .id_table = i2c_hid_id_table, }; diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 16b6f11a0700..99ec3ff7563b 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -51,10 +51,26 @@ struct uhid_device { u32 report_id; u32 report_type; struct uhid_event report_buf; + struct work_struct worker; }; static struct miscdevice uhid_misc; +static void uhid_device_add_worker(struct work_struct *work) +{ + struct uhid_device *uhid = container_of(work, struct uhid_device, worker); + int ret; + + ret = hid_add_device(uhid->hid); + if (ret) { + hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); + + hid_destroy_device(uhid->hid); + uhid->hid = NULL; + uhid->running = false; + } +} + static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; @@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid, uhid->hid = hid; uhid->running = true; - ret = hid_add_device(hid); - if (ret) { - hid_err(hid, "Cannot register HID device\n"); - goto err_hid; - } + /* Adding of a HID device is done through a worker, to allow HID drivers + * which use feature requests during .probe to work, without they would + * be blocked on devlock, which is held by uhid_char_write. + */ + schedule_work(&uhid->worker); return 0; -err_hid: - hid_destroy_device(hid); - uhid->hid = NULL; - uhid->running = false; err_free: kfree(uhid->rd_data); uhid->rd_data = NULL; @@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid) uhid->running = false; wake_up_interruptible(&uhid->report_wait); + cancel_work_sync(&uhid->worker); + hid_destroy_device(uhid->hid); kfree(uhid->rd_data); @@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file) init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; + INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; nonseekable_open(inode, file); diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index a7f886961830..fc1e65a263a4 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -30,6 +30,7 @@ #define ADT7411_REG_CFG1 0x18 #define ADT7411_CFG1_START_MONITOR (1 << 0) +#define ADT7411_CFG1_RESERVED_BIT1 (1 << 1) #define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) #define ADT7411_REG_CFG2 0x19 @@ -37,6 +38,9 @@ #define ADT7411_REG_CFG3 0x1a #define ADT7411_CFG3_ADC_CLK_225 (1 << 0) +#define ADT7411_CFG3_RESERVED_BIT1 (1 << 1) +#define ADT7411_CFG3_RESERVED_BIT2 (1 << 2) +#define ADT7411_CFG3_RESERVED_BIT3 (1 << 3) #define ADT7411_CFG3_REF_VDD (1 << 4) #define ADT7411_REG_DEVICE_ID 0x4d @@ -280,6 +284,45 @@ static int adt7411_detect(struct i2c_client *client, return 0; } +static int adt7411_init_device(struct adt7411_data *data) +{ + int ret; + u8 val; + + ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG3); + if (ret < 0) + return ret; + + /* + * We must only write zero to bit 1 and bit 2 and only one to bit 3 + * according to the datasheet. + */ + val = ret; + val &= ~(ADT7411_CFG3_RESERVED_BIT1 | ADT7411_CFG3_RESERVED_BIT2); + val |= ADT7411_CFG3_RESERVED_BIT3; + + ret = i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG3, val); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG1); + if (ret < 0) + return ret; + + /* + * We must only write zero to bit 1 and only one to bit 3 according to + * the datasheet. + */ + val = ret; + val &= ~ADT7411_CFG1_RESERVED_BIT1; + val |= ADT7411_CFG1_RESERVED_BIT3; + + /* enable monitoring */ + val |= ADT7411_CFG1_START_MONITOR; + + return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val); +} + static int adt7411_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -297,10 +340,7 @@ static int adt7411_probe(struct i2c_client *client, mutex_init(&data->device_lock); mutex_init(&data->update_lock); - /* According to the datasheet, we must only write 1 to bit 3 */ - ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, - ADT7411_CFG1_RESERVED_BIT3 - | ADT7411_CFG1_START_MONITOR, 1); + ret = adt7411_init_device(data); if (ret < 0) return ret; diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c index 2b2ff67026be..48633e541dc3 100644 --- a/drivers/hwmon/ftsteutates.c +++ b/drivers/hwmon/ftsteutates.c @@ -242,7 +242,7 @@ static int fts_wd_set_resolution(struct fts_data *data, } if (resolution == seconds) - set_bit(1, (unsigned long *)&ret); + ret |= BIT(1); else ret &= ~BIT(1); diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index b550ba5fa58a..89449871bca7 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c @@ -110,24 +110,24 @@ static int iio_hwmon_probe(struct platform_device *pdev) switch (type) { case IIO_VOLTAGE: - a->dev_attr.attr.name = kasprintf(GFP_KERNEL, - "in%d_input", - in_i++); + a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, + "in%d_input", + in_i++); break; case IIO_TEMP: - a->dev_attr.attr.name = kasprintf(GFP_KERNEL, - "temp%d_input", - temp_i++); + a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, + "temp%d_input", + temp_i++); break; case IIO_CURRENT: - a->dev_attr.attr.name = kasprintf(GFP_KERNEL, - "curr%d_input", - curr_i++); + a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, + "curr%d_input", + curr_i++); break; case IIO_HUMIDITYRELATIVE: - a->dev_attr.attr.name = kasprintf(GFP_KERNEL, - "humidity%d_input", - humidity_i++); + a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, + "humidity%d_input", + humidity_i++); break; default: ret = -EINVAL; diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 547a9c87c68c..92f9d4bbf597 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -220,7 +220,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) struct device *dev = &client->dev; struct device *hwmon_dev; struct lm75_data *data; - int status; + int status, err; u8 set_mask, clr_mask; int new; enum lm75_type kind = id->driver_data; @@ -331,7 +331,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) if (status != new) i2c_smbus_write_byte_data(client, LM75_REG_CONF, new); - devm_add_action(dev, lm75_remove, data); + err = devm_add_action_or_reset(dev, lm75_remove, data); + if (err) + return err; dev_dbg(dev, "Config %02x\n", new); diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 1e8237478b2f..496e771b363f 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -529,7 +529,7 @@ static int lm90_update_limits(struct device *dev) return val; data->temp_hyst = val; - lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); + val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); if (val < 0) return val; data->temp11[REMOTE_LOW] = val << 8; @@ -1551,9 +1551,7 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) if (config != data->config_orig) /* Only write if changed */ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); - devm_add_action(&client->dev, lm90_restore_conf, data); - - return 0; + return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data); } static bool lm90_is_tripped(struct i2c_client *client, u16 *status) @@ -1640,7 +1638,9 @@ static int lm90_probe(struct i2c_client *client, return err; } - devm_add_action(dev, lm90_regulator_disable, regulator); + err = devm_add_action_or_reset(dev, lm90_regulator_disable, regulator); + if (err) + return err; data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL); if (!data) @@ -1696,7 +1696,9 @@ static int lm90_probe(struct i2c_client *client, err = device_create_file(dev, &dev_attr_pec); if (err) return err; - devm_add_action(dev, lm90_remove_pec, dev); + err = devm_add_action_or_reset(dev, lm90_remove_pec, dev); + if (err) + return err; } hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c index b73a48832732..6ea99cd6ae79 100644 --- a/drivers/hwmon/sht3x.c +++ b/drivers/hwmon/sht3x.c @@ -720,7 +720,7 @@ static int sht3x_probe(struct i2c_client *client, data->setup.blocking_io = false; data->setup.high_precision = true; data->mode = 0; - data->last_update = 0; + data->last_update = jiffies - msecs_to_jiffies(3000); data->client = client; crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL); diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index a942a2574a4d..8479ac5eb853 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -227,7 +227,9 @@ static int tmp102_probe(struct i2c_client *client, tmp102->config_orig = regval; - devm_add_action(dev, tmp102_restore_config, tmp102); + err = devm_add_action_or_reset(dev, tmp102_restore_config, tmp102); + if (err) + return err; regval &= ~TMP102_CONFIG_CLEAR; regval |= TMP102_CONFIG_SET; diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index c752447fbac7..fa6880b8060a 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -98,6 +98,7 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev) } regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(regmap)) return PTR_ERR(regmap); diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d83ab82672e4..2de4cad9c5ed 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); static int etm_count; static struct etm_drvdata *etmdrvdata[NR_CPUS]; +static enum cpuhp_state hp_online; + /* * Memory mapped writes to clear os lock are not supported on some processors * and OS lock must be unlocked before any memory mapped access on such @@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) /* * Configure the ETM only if the CPU is online. If it isn't online - * hw configuration will take place when 'CPU_STARTING' is received - * in @etm_cpu_callback. + * hw configuration will take place on the local CPU during bring up. */ if (cpu_online(drvdata->cpu)) { ret = smp_call_function_single(drvdata->cpu, @@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = { .source_ops = &etm_source_ops, }; -static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int etm_online_cpu(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - if (!etmdrvdata[cpu]) - goto out; + return 0; - switch (action & (~CPU_TASKS_FROZEN)) { - case CPU_STARTING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (!etmdrvdata[cpu]->os_unlock) { - etm_os_unlock(etmdrvdata[cpu]); - etmdrvdata[cpu]->os_unlock = true; - } - - if (local_read(&etmdrvdata[cpu]->mode)) - etm_enable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) + coresight_enable(etmdrvdata[cpu]->csdev); + return 0; +} - case CPU_ONLINE: - if (etmdrvdata[cpu]->boot_enable && - !etmdrvdata[cpu]->sticky_enable) - coresight_enable(etmdrvdata[cpu]->csdev); - break; +static int etm_starting_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; - case CPU_DYING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (local_read(&etmdrvdata[cpu]->mode)) - etm_disable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + spin_lock(&etmdrvdata[cpu]->spinlock); + if (!etmdrvdata[cpu]->os_unlock) { + etm_os_unlock(etmdrvdata[cpu]); + etmdrvdata[cpu]->os_unlock = true; } -out: - return NOTIFY_OK; + + if (local_read(&etmdrvdata[cpu]->mode)) + etm_enable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; } -static struct notifier_block etm_cpu_notifier = { - .notifier_call = etm_cpu_callback, -}; +static int etm_dying_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (local_read(&etmdrvdata[cpu]->mode)) + etm_disable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; +} static bool etm_arch_supported(u8 arch) { @@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm_count++) - register_hotcpu_notifier(&etm_cpu_notifier); - + if (!etm_count++) { + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, + "AP_ARM_CORESIGHT_STARTING", + etm_starting_cpu, etm_dying_cpu); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "AP_ARM_CORESIGHT_ONLINE", + etm_online_cpu, NULL); + if (ret < 0) + goto err_arch_supported; + hp_online = ret; + } put_online_cpus(); if (etm_arch_supported(drvdata->arch) == false) { @@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); - if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; @@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - if (--etm_count == 0) - unregister_hotcpu_notifier(&etm_cpu_notifier); + if (--etm_count == 0) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + if (hp_online) + cpuhp_remove_state_nocalls(hp_online); + } return ret; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 462f0dc15757..1a5e0d14c1dd 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -48,6 +48,8 @@ static int etm4_count; static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; static void etm4_set_default(struct etmv4_config *config); +static enum cpuhp_state hp_online; + static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { /* Writing any value to ETMOSLAR unlocks the trace registers */ @@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config) config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; } -static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int etm4_online_cpu(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - if (!etmdrvdata[cpu]) - goto out; - - switch (action & (~CPU_TASKS_FROZEN)) { - case CPU_STARTING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (!etmdrvdata[cpu]->os_unlock) { - etm4_os_unlock(etmdrvdata[cpu]); - etmdrvdata[cpu]->os_unlock = true; - } - - if (local_read(&etmdrvdata[cpu]->mode)) - etm4_enable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + return 0; - case CPU_ONLINE: - if (etmdrvdata[cpu]->boot_enable && - !etmdrvdata[cpu]->sticky_enable) - coresight_enable(etmdrvdata[cpu]->csdev); - break; + if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) + coresight_enable(etmdrvdata[cpu]->csdev); + return 0; +} - case CPU_DYING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (local_read(&etmdrvdata[cpu]->mode)) - etm4_disable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; +static int etm4_starting_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (!etmdrvdata[cpu]->os_unlock) { + etm4_os_unlock(etmdrvdata[cpu]); + etmdrvdata[cpu]->os_unlock = true; } -out: - return NOTIFY_OK; + + if (local_read(&etmdrvdata[cpu]->mode)) + etm4_enable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; } -static struct notifier_block etm4_cpu_notifier = { - .notifier_call = etm4_cpu_callback, -}; +static int etm4_dying_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (local_read(&etmdrvdata[cpu]->mode)) + etm4_disable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; +} static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) { @@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm4_count++) - register_hotcpu_notifier(&etm4_cpu_notifier); + if (!etm4_count++) { + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING, + "AP_ARM_CORESIGHT4_STARTING", + etm4_starting_cpu, etm4_dying_cpu); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "AP_ARM_CORESIGHT4_ONLINE", + etm4_online_cpu, NULL); + if (ret < 0) + goto err_arch_supported; + hp_online = ret; + } put_online_cpus(); @@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - if (--etm4_count == 0) - unregister_hotcpu_notifier(&etm4_cpu_notifier); + if (--etm4_count == 0) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING); + if (hp_online) + cpuhp_remove_state_nocalls(hp_online); + } return ret; } diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index 75dd6d041241..11e2a1fc10e9 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -71,7 +71,7 @@ static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req) if (rc) goto exit; - rc = be64_to_cpu(msg.params[1]); + rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) { rc = i2c_opal_translate_error(rc); goto exit; diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index 70f0a2754c13..004243bd84db 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -695,7 +695,7 @@ static const struct ide_port_info cmd640_port_info __initconst = { .pio_mask = ATA_PIO5, }; -static int cmd640x_init_one(unsigned long base, unsigned long ctl) +static int __init cmd640x_init_one(unsigned long base, unsigned long ctl) { if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index f94baadbf424..0ceae5cbd89a 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -1012,7 +1012,7 @@ static int init_chipset_hpt366(struct pci_dev *dev) pci_read_config_dword(dev, 0x40, &itr1); /* Detect PCI clock by looking at cmd_high_time. */ - switch((itr1 >> 8) & 0x07) { + switch ((itr1 >> 8) & 0x0f) { case 0x09: pci_clk = 40; break; diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 12fa04997dcc..9ecf4e35adcd 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -2052,12 +2052,12 @@ static int __init idetape_init(void) error = driver_register(&idetape_driver.gen_driver); if (error) - goto out_free_driver; + goto out_free_chrdev; return 0; -out_free_driver: - driver_unregister(&idetape_driver.gen_driver); +out_free_chrdev: + unregister_chrdev(IDETAPE_MAJOR, "ht"); out_free_class: class_destroy(idetape_sysfs_class); out: diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 7f0434f7e486..0c5d3a99468e 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -707,6 +707,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr; *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN; } + break; case controller_un_ata6: case controller_k2_ata6: { /* 100Mhz cell */ diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 9b2ef248788d..67ec58f9ef99 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1055,7 +1055,7 @@ static const struct idle_cpu idle_cpu_dnv = { static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem), - ICPU(INTEL_FAM6_WESTMERE2, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem), ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem), ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem), ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem), diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c index b637f1af842e..997e3e97f573 100644 --- a/drivers/input/keyboard/clps711x-keypad.c +++ b/drivers/input/keyboard/clps711x-keypad.c @@ -101,7 +101,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev) return -ENOMEM; priv->syscon = - syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1"); + syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1"); if (IS_ERR(priv->syscon)) return PTR_ERR(priv->syscon); @@ -181,7 +181,7 @@ static int clps711x_keypad_remove(struct platform_device *pdev) } static const struct of_device_id clps711x_keypad_of_match[] = { - { .compatible = "cirrus,clps711x-keypad", }, + { .compatible = "cirrus,ep7209-keypad", }, { } }; MODULE_DEVICE_TABLE(of, clps711x_keypad_of_match); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index ad0860383cb3..d432ca828472 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -89,8 +89,8 @@ config MSM_IOMMU bool "MSM IOMMU Support" depends on ARM depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST - depends on BROKEN select IOMMU_API + select IOMMU_IO_PGTABLE_ARMV7S help Support for the IOMMUs found on certain Qualcomm SOCs. These IOMMUs allow virtualization of the address space used by most @@ -111,6 +111,7 @@ config AMD_IOMMU select PCI_PRI select PCI_PASID select IOMMU_API + select IOMMU_IOVA depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in @@ -343,4 +344,22 @@ config MTK_IOMMU If unsure, say N here. +config MTK_IOMMU_V1 + bool "MTK IOMMU Version 1 (M4U gen1) Support" + depends on ARM + depends on ARCH_MEDIATEK || COMPILE_TEST + select ARM_DMA_USE_IOMMU + select IOMMU_API + select MEMORY + select MTK_SMI + select COMMON_CLK_MT2701_MMSYS + select COMMON_CLK_MT2701_IMGSYS + select COMMON_CLK_MT2701_VDECSYS + help + Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is + Multimedia Memory Managememt Unit. This option enables remapping of + DMA memory accesses for the multimedia subsystem. + + if unsure, say N here. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index c6edb31bf8c6..195f7b997d8e 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o -obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o +obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o @@ -18,6 +18,7 @@ obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o +obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 634f636393d5..33c177ba93be 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -21,6 +21,7 @@ #include <linux/pci.h> #include <linux/acpi.h> #include <linux/amba/bus.h> +#include <linux/platform_device.h> #include <linux/pci-ats.h> #include <linux/bitmap.h> #include <linux/slab.h> @@ -38,6 +39,7 @@ #include <linux/dma-contiguous.h> #include <linux/irqdomain.h> #include <linux/percpu.h> +#include <linux/iova.h> #include <asm/irq_remapping.h> #include <asm/io_apic.h> #include <asm/apic.h> @@ -56,6 +58,17 @@ #define LOOP_TIMEOUT 100000 +/* IO virtual address start page frame number */ +#define IOVA_START_PFN (1) +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) + +/* Reserved IOVA ranges */ +#define MSI_RANGE_START (0xfee00000) +#define MSI_RANGE_END (0xfeefffff) +#define HT_RANGE_START (0xfd00000000ULL) +#define HT_RANGE_END (0xffffffffffULL) + /* * This bitmap is used to advertise the page sizes our hardware support * to the IOMMU core, which will then use this information to split @@ -76,6 +89,25 @@ LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); +#define FLUSH_QUEUE_SIZE 256 + +struct flush_queue_entry { + unsigned long iova_pfn; + unsigned long pages; + struct dma_ops_domain *dma_dom; +}; + +struct flush_queue { + spinlock_t lock; + unsigned next; + struct flush_queue_entry *entries; +}; + +DEFINE_PER_CPU(struct flush_queue, flush_queue); + +static atomic_t queue_timer_on; +static struct timer_list queue_timer; + /* * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. @@ -121,44 +153,19 @@ static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); /* - * For dynamic growth the aperture size is split into ranges of 128MB of - * DMA address space each. This struct represents one such range. - */ -struct aperture_range { - - spinlock_t bitmap_lock; - - /* address allocation bitmap */ - unsigned long *bitmap; - unsigned long offset; - unsigned long next_bit; - - /* - * Array of PTE pages for the aperture. In this array we save all the - * leaf pages of the domain page table used for the aperture. This way - * we don't need to walk the page table to find a specific PTE. We can - * just calculate its address in constant time. - */ - u64 *pte_pages[64]; -}; - -/* * Data container for a dma_ops specific protection domain */ struct dma_ops_domain { /* generic protection domain information */ struct protection_domain domain; - /* size of the aperture for the mappings */ - unsigned long aperture_size; - - /* aperture index we start searching for free addresses */ - u32 __percpu *next_index; - - /* address space relevant data */ - struct aperture_range *aperture[APERTURE_MAX_RANGES]; + /* IOVA RB-Tree */ + struct iova_domain iovad; }; +static struct iova_domain reserved_iova_ranges; +static struct lock_class_key reserved_rbtree_key; + /**************************************************************************** * * Helper functions @@ -224,6 +231,12 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } +static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) +{ + BUG_ON(domain->flags != PD_DMA_OPS_MASK); + return container_of(domain, struct dma_ops_domain, domain); +} + static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -391,43 +404,6 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) } /* - * This function actually applies the mapping to the page table of the - * dma_ops domain. - */ -static void alloc_unity_mapping(struct dma_ops_domain *dma_dom, - struct unity_map_entry *e) -{ - u64 addr; - - for (addr = e->address_start; addr < e->address_end; - addr += PAGE_SIZE) { - if (addr < dma_dom->aperture_size) - __set_bit(addr >> PAGE_SHIFT, - dma_dom->aperture[0]->bitmap); - } -} - -/* - * Inits the unity mappings required for a specific device - */ -static void init_unity_mappings_for_device(struct device *dev, - struct dma_ops_domain *dma_dom) -{ - struct unity_map_entry *e; - int devid; - - devid = get_device_id(dev); - if (devid < 0) - return; - - list_for_each_entry(e, &amd_iommu_unity_map, list) { - if (!(devid >= e->devid_start && devid <= e->devid_end)) - continue; - alloc_unity_mapping(dma_dom, e); - } -} - -/* * This function checks if the driver got a valid device from the caller to * avoid dereferencing invalid pointers. */ @@ -454,22 +430,12 @@ static bool check_device(struct device *dev) static void init_iommu_group(struct device *dev) { - struct dma_ops_domain *dma_domain; - struct iommu_domain *domain; struct iommu_group *group; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return; - domain = iommu_group_default_domain(group); - if (!domain) - goto out; - - dma_domain = to_pdomain(domain)->priv; - - init_unity_mappings_for_device(dev, dma_domain); -out: iommu_group_put(group); } @@ -1220,7 +1186,7 @@ static void domain_flush_complete(struct protection_domain *domain) int i; for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) + if (domain && !domain->dev_iommu[i]) continue; /* @@ -1397,8 +1363,9 @@ static u64 *fetch_pte(struct protection_domain *domain, static int iommu_map_page(struct protection_domain *dom, unsigned long bus_addr, unsigned long phys_addr, + unsigned long page_size, int prot, - unsigned long page_size) + gfp_t gfp) { u64 __pte, *pte; int i, count; @@ -1410,7 +1377,7 @@ static int iommu_map_page(struct protection_domain *dom, return -EINVAL; count = PAGE_SIZE_PTE_COUNT(page_size); - pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); + pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp); if (!pte) return -ENOMEM; @@ -1474,320 +1441,37 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, /**************************************************************************** * * The next functions belong to the address allocator for the dma_ops - * interface functions. They work like the allocators in the other IOMMU - * drivers. Its basically a bitmap which marks the allocated pages in - * the aperture. Maybe it could be enhanced in the future to a more - * efficient allocator. + * interface functions. * ****************************************************************************/ -/* - * The address allocator core functions. - * - * called with domain->lock held - */ -/* - * Used to reserve address ranges in the aperture (e.g. for exclusion - * ranges. - */ -static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, - unsigned long start_page, - unsigned int pages) +static unsigned long dma_ops_alloc_iova(struct device *dev, + struct dma_ops_domain *dma_dom, + unsigned int pages, u64 dma_mask) { - unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; - - if (start_page + pages > last_page) - pages = last_page - start_page; - - for (i = start_page; i < start_page + pages; ++i) { - int index = i / APERTURE_RANGE_PAGES; - int page = i % APERTURE_RANGE_PAGES; - __set_bit(page, dom->aperture[index]->bitmap); - } -} + unsigned long pfn = 0; -/* - * This function is used to add a new aperture range to an existing - * aperture in case of dma_ops domain allocation or address allocation - * failure. - */ -static int alloc_new_range(struct dma_ops_domain *dma_dom, - bool populate, gfp_t gfp) -{ - int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - unsigned long i, old_size, pte_pgsize; - struct aperture_range *range; - struct amd_iommu *iommu; - unsigned long flags; + pages = __roundup_pow_of_two(pages); -#ifdef CONFIG_IOMMU_STRESS - populate = false; -#endif + if (dma_mask > DMA_BIT_MASK(32)) + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(DMA_BIT_MASK(32))); - if (index >= APERTURE_MAX_RANGES) - return -ENOMEM; - - range = kzalloc(sizeof(struct aperture_range), gfp); - if (!range) - return -ENOMEM; - - range->bitmap = (void *)get_zeroed_page(gfp); - if (!range->bitmap) - goto out_free; - - range->offset = dma_dom->aperture_size; - - spin_lock_init(&range->bitmap_lock); - - if (populate) { - unsigned long address = dma_dom->aperture_size; - int i, num_ptes = APERTURE_RANGE_PAGES / 512; - u64 *pte, *pte_page; - - for (i = 0; i < num_ptes; ++i) { - pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, - &pte_page, gfp); - if (!pte) - goto out_free; - - range->pte_pages[i] = pte_page; - - address += APERTURE_RANGE_SIZE / 64; - } - } + if (!pfn) + pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask)); - spin_lock_irqsave(&dma_dom->domain.lock, flags); - - /* First take the bitmap_lock and then publish the range */ - spin_lock(&range->bitmap_lock); - - old_size = dma_dom->aperture_size; - dma_dom->aperture[index] = range; - dma_dom->aperture_size += APERTURE_RANGE_SIZE; - - /* Reserve address range used for MSI messages */ - if (old_size < MSI_ADDR_BASE_LO && - dma_dom->aperture_size > MSI_ADDR_BASE_LO) { - unsigned long spage; - int pages; - - pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE); - spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT; - - dma_ops_reserve_addresses(dma_dom, spage, pages); - } - - /* Initialize the exclusion range if necessary */ - for_each_iommu(iommu) { - if (iommu->exclusion_start && - iommu->exclusion_start >= dma_dom->aperture[index]->offset - && iommu->exclusion_start < dma_dom->aperture_size) { - unsigned long startpage; - int pages = iommu_num_pages(iommu->exclusion_start, - iommu->exclusion_length, - PAGE_SIZE); - startpage = iommu->exclusion_start >> PAGE_SHIFT; - dma_ops_reserve_addresses(dma_dom, startpage, pages); - } - } - - /* - * Check for areas already mapped as present in the new aperture - * range and mark those pages as reserved in the allocator. Such - * mappings may already exist as a result of requested unity - * mappings for devices. - */ - for (i = dma_dom->aperture[index]->offset; - i < dma_dom->aperture_size; - i += pte_pgsize) { - u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - continue; - - dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, - pte_pgsize >> 12); - } - - update_domain(&dma_dom->domain); - - spin_unlock(&range->bitmap_lock); - - spin_unlock_irqrestore(&dma_dom->domain.lock, flags); - - return 0; - -out_free: - update_domain(&dma_dom->domain); - - free_page((unsigned long)range->bitmap); - - kfree(range); - - return -ENOMEM; + return (pfn << PAGE_SHIFT); } -static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom, - struct aperture_range *range, - unsigned long pages, - unsigned long dma_mask, - unsigned long boundary_size, - unsigned long align_mask, - bool trylock) +static void dma_ops_free_iova(struct dma_ops_domain *dma_dom, + unsigned long address, + unsigned int pages) { - unsigned long offset, limit, flags; - dma_addr_t address; - bool flush = false; - - offset = range->offset >> PAGE_SHIFT; - limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, - dma_mask >> PAGE_SHIFT); - - if (trylock) { - if (!spin_trylock_irqsave(&range->bitmap_lock, flags)) - return -1; - } else { - spin_lock_irqsave(&range->bitmap_lock, flags); - } - - address = iommu_area_alloc(range->bitmap, limit, range->next_bit, - pages, offset, boundary_size, align_mask); - if (address == -1) { - /* Nothing found, retry one time */ - address = iommu_area_alloc(range->bitmap, limit, - 0, pages, offset, boundary_size, - align_mask); - flush = true; - } - - if (address != -1) - range->next_bit = address + pages; - - spin_unlock_irqrestore(&range->bitmap_lock, flags); - - if (flush) { - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); - } - - return address; -} - -static unsigned long dma_ops_area_alloc(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask) -{ - unsigned long boundary_size, mask; - unsigned long address = -1; - bool first = true; - u32 start, i; - - preempt_disable(); - - mask = dma_get_seg_boundary(dev); - -again: - start = this_cpu_read(*dom->next_index); - - /* Sanity check - is it really necessary? */ - if (unlikely(start > APERTURE_MAX_RANGES)) { - start = 0; - this_cpu_write(*dom->next_index, 0); - } - - boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : - 1UL << (BITS_PER_LONG - PAGE_SHIFT); - - for (i = 0; i < APERTURE_MAX_RANGES; ++i) { - struct aperture_range *range; - int index; - - index = (start + i) % APERTURE_MAX_RANGES; - - range = dom->aperture[index]; - - if (!range || range->offset >= dma_mask) - continue; - - address = dma_ops_aperture_alloc(dom, range, pages, - dma_mask, boundary_size, - align_mask, first); - if (address != -1) { - address = range->offset + (address << PAGE_SHIFT); - this_cpu_write(*dom->next_index, index); - break; - } - } - - if (address == -1 && first) { - first = false; - goto again; - } - - preempt_enable(); - - return address; -} - -static unsigned long dma_ops_alloc_addresses(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask) -{ - unsigned long address = -1; - - while (address == -1) { - address = dma_ops_area_alloc(dev, dom, pages, - align_mask, dma_mask); - - if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC)) - break; - } - - if (unlikely(address == -1)) - address = DMA_ERROR_CODE; - - WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); - - return address; -} - -/* - * The address free function. - * - * called with domain->lock held - */ -static void dma_ops_free_addresses(struct dma_ops_domain *dom, - unsigned long address, - unsigned int pages) -{ - unsigned i = address >> APERTURE_RANGE_SHIFT; - struct aperture_range *range = dom->aperture[i]; - unsigned long flags; - - BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL); - -#ifdef CONFIG_IOMMU_STRESS - if (i < 4) - return; -#endif - - if (amd_iommu_unmap_flush) { - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); - } - - address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; - - spin_lock_irqsave(&range->bitmap_lock, flags); - if (address + pages > range->next_bit) - range->next_bit = address + pages; - bitmap_clear(range->bitmap, address, pages); - spin_unlock_irqrestore(&range->bitmap_lock, flags); + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + free_iova_fast(&dma_dom->iovad, address, pages); } /**************************************************************************** @@ -1961,44 +1645,18 @@ static void free_gcr3_table(struct protection_domain *domain) */ static void dma_ops_domain_free(struct dma_ops_domain *dom) { - int i; - if (!dom) return; - free_percpu(dom->next_index); - del_domain_from_list(&dom->domain); - free_pagetable(&dom->domain); + put_iova_domain(&dom->iovad); - for (i = 0; i < APERTURE_MAX_RANGES; ++i) { - if (!dom->aperture[i]) - continue; - free_page((unsigned long)dom->aperture[i]->bitmap); - kfree(dom->aperture[i]); - } + free_pagetable(&dom->domain); kfree(dom); } -static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom, - int max_apertures) -{ - int ret, i, apertures; - - apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - ret = 0; - - for (i = apertures; i < max_apertures; ++i) { - ret = alloc_new_range(dma_dom, false, GFP_KERNEL); - if (ret) - break; - } - - return ret; -} - /* * Allocates a new protection domain usable for the dma_ops functions. * It also initializes the page table and the address allocator data @@ -2007,7 +1665,6 @@ static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom, static struct dma_ops_domain *dma_ops_domain_alloc(void) { struct dma_ops_domain *dma_dom; - int cpu; dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); if (!dma_dom) @@ -2016,30 +1673,19 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (protection_domain_init(&dma_dom->domain)) goto free_dma_dom; - dma_dom->next_index = alloc_percpu(u32); - if (!dma_dom->next_index) - goto free_dma_dom; - - dma_dom->domain.mode = PAGE_MODE_2_LEVEL; + dma_dom->domain.mode = PAGE_MODE_3_LEVEL; dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); dma_dom->domain.flags = PD_DMA_OPS_MASK; - dma_dom->domain.priv = dma_dom; if (!dma_dom->domain.pt_root) goto free_dma_dom; - add_domain_to_list(&dma_dom->domain); + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, + IOVA_START_PFN, DMA_32BIT_PFN); - if (alloc_new_range(dma_dom, true, GFP_KERNEL)) - goto free_dma_dom; - - /* - * mark the first page as allocated so we never return 0 as - * a valid dma-address. So we can use 0 as error value - */ - dma_dom->aperture[0]->bitmap[0] = 1; + /* Initialize reserved ranges */ + copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); - for_each_possible_cpu(cpu) - *per_cpu_ptr(dma_dom->next_index, cpu) = 0; + add_domain_to_list(&dma_dom->domain); return dma_dom; @@ -2482,6 +2128,92 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) * *****************************************************************************/ +static void __queue_flush(struct flush_queue *queue) +{ + struct protection_domain *domain; + unsigned long flags; + int idx; + + /* First flush TLB of all known domains */ + spin_lock_irqsave(&amd_iommu_pd_lock, flags); + list_for_each_entry(domain, &amd_iommu_pd_list, list) + domain_flush_tlb(domain); + spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); + + /* Wait until flushes have completed */ + domain_flush_complete(NULL); + + for (idx = 0; idx < queue->next; ++idx) { + struct flush_queue_entry *entry; + + entry = queue->entries + idx; + + free_iova_fast(&entry->dma_dom->iovad, + entry->iova_pfn, + entry->pages); + + /* Not really necessary, just to make sure we catch any bugs */ + entry->dma_dom = NULL; + } + + queue->next = 0; +} + +static void queue_flush_all(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + unsigned long flags; + + queue = per_cpu_ptr(&flush_queue, cpu); + spin_lock_irqsave(&queue->lock, flags); + if (queue->next > 0) + __queue_flush(queue); + spin_unlock_irqrestore(&queue->lock, flags); + } +} + +static void queue_flush_timeout(unsigned long unsused) +{ + atomic_set(&queue_timer_on, 0); + queue_flush_all(); +} + +static void queue_add(struct dma_ops_domain *dma_dom, + unsigned long address, unsigned long pages) +{ + struct flush_queue_entry *entry; + struct flush_queue *queue; + unsigned long flags; + int idx; + + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + queue = get_cpu_ptr(&flush_queue); + spin_lock_irqsave(&queue->lock, flags); + + if (queue->next == FLUSH_QUEUE_SIZE) + __queue_flush(queue); + + idx = queue->next++; + entry = queue->entries + idx; + + entry->iova_pfn = address; + entry->pages = pages; + entry->dma_dom = dma_dom; + + spin_unlock_irqrestore(&queue->lock, flags); + + if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0) + mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10)); + + put_cpu_ptr(&flush_queue); +} + + /* * In the dma_ops path we only have the struct device. This function * finds the corresponding IOMMU, the protection domain and the @@ -2492,16 +2224,11 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) static struct protection_domain *get_domain(struct device *dev) { struct protection_domain *domain; - struct iommu_domain *io_domain; if (!check_device(dev)) return ERR_PTR(-EINVAL); - io_domain = iommu_get_domain_for_dev(dev); - if (!io_domain) - return NULL; - - domain = to_pdomain(io_domain); + domain = get_dev_data(dev)->domain; if (!dma_ops_domain(domain)) return ERR_PTR(-EBUSY); @@ -2512,8 +2239,15 @@ static void update_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; - list_for_each_entry(dev_data, &domain->dev_list, list) + list_for_each_entry(dev_data, &domain->dev_list, list) { set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); + + if (dev_data->devid == dev_data->alias) + continue; + + /* There is an alias, update device table entry for it */ + set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled); + } } static void update_domain(struct protection_domain *domain) @@ -2529,94 +2263,17 @@ static void update_domain(struct protection_domain *domain) domain->updated = false; } -/* - * This function fetches the PTE for a given address in the aperture - */ -static u64* dma_ops_get_pte(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte, *pte_page; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return NULL; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) { - pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, - GFP_ATOMIC); - aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; - } else - pte += PM_LEVEL_INDEX(0, address); - - update_domain(&dom->domain); - - return pte; -} - -/* - * This is the generic map function. It maps one 4kb page at paddr to - * the given address in the DMA address space for the domain. - */ -static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, - unsigned long address, - phys_addr_t paddr, - int direction) +static int dir2prot(enum dma_data_direction direction) { - u64 *pte, __pte; - - WARN_ON(address > dom->aperture_size); - - paddr &= PAGE_MASK; - - pte = dma_ops_get_pte(dom, address); - if (!pte) - return DMA_ERROR_CODE; - - __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; - if (direction == DMA_TO_DEVICE) - __pte |= IOMMU_PTE_IR; + return IOMMU_PROT_IR; else if (direction == DMA_FROM_DEVICE) - __pte |= IOMMU_PTE_IW; + return IOMMU_PROT_IW; else if (direction == DMA_BIDIRECTIONAL) - __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; - - WARN_ON_ONCE(*pte); - - *pte = __pte; - - return (dma_addr_t)address; -} - -/* - * The generic unmapping function for on page in the DMA address space. - */ -static void dma_ops_domain_unmap(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte; - - if (address >= dom->aperture_size) - return; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) - return; - - pte += PM_LEVEL_INDEX(0, address); - - WARN_ON_ONCE(!*pte); - - *pte = 0ULL; + return IOMMU_PROT_IW | IOMMU_PROT_IR; + else + return 0; } - /* * This function contains common code for mapping of a physically * contiguous memory region into DMA address space. It is used by all @@ -2627,32 +2284,29 @@ static dma_addr_t __map_single(struct device *dev, struct dma_ops_domain *dma_dom, phys_addr_t paddr, size_t size, - int dir, - bool align, + enum dma_data_direction direction, u64 dma_mask) { dma_addr_t offset = paddr & ~PAGE_MASK; dma_addr_t address, start, ret; unsigned int pages; - unsigned long align_mask = 0; + int prot = 0; int i; pages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; - if (align) - align_mask = (1UL << get_order(size)) - 1; - - address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, - dma_mask); - + address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); if (address == DMA_ERROR_CODE) goto out; + prot = dir2prot(direction); + start = address; for (i = 0; i < pages; ++i) { - ret = dma_ops_domain_map(dma_dom, start, paddr, dir); - if (ret == DMA_ERROR_CODE) + ret = iommu_map_page(&dma_dom->domain, start, paddr, + PAGE_SIZE, prot, GFP_ATOMIC); + if (ret) goto out_unmap; paddr += PAGE_SIZE; @@ -2672,10 +2326,13 @@ out_unmap: for (--i; i >= 0; --i) { start -= PAGE_SIZE; - dma_ops_domain_unmap(dma_dom, start); + iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); } - dma_ops_free_addresses(dma_dom, address, pages); + domain_flush_tlb(&dma_dom->domain); + domain_flush_complete(&dma_dom->domain); + + dma_ops_free_iova(dma_dom, address, pages); return DMA_ERROR_CODE; } @@ -2693,21 +2350,23 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_addr_t i, start; unsigned int pages; - if ((dma_addr == DMA_ERROR_CODE) || - (dma_addr + size > dma_dom->aperture_size)) - return; - flush_addr = dma_addr; pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr &= PAGE_MASK; start = dma_addr; for (i = 0; i < pages; ++i) { - dma_ops_domain_unmap(dma_dom, start); + iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); start += PAGE_SIZE; } - dma_ops_free_addresses(dma_dom, dma_addr, pages); + if (amd_iommu_unmap_flush) { + dma_ops_free_iova(dma_dom, dma_addr, pages); + domain_flush_tlb(&dma_dom->domain); + domain_flush_complete(&dma_dom->domain); + } else { + queue_add(dma_dom, dma_addr, pages); + } } /* @@ -2720,6 +2379,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, { phys_addr_t paddr = page_to_phys(page) + offset; struct protection_domain *domain; + struct dma_ops_domain *dma_dom; u64 dma_mask; domain = get_domain(dev); @@ -2729,9 +2389,9 @@ static dma_addr_t map_page(struct device *dev, struct page *page, return DMA_ERROR_CODE; dma_mask = *dev->dma_mask; + dma_dom = to_dma_ops_domain(domain); - return __map_single(dev, domain->priv, paddr, size, dir, false, - dma_mask); + return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); } /* @@ -2741,12 +2401,41 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct protection_domain *domain; + struct dma_ops_domain *dma_dom; domain = get_domain(dev); if (IS_ERR(domain)) return; - __unmap_single(domain->priv, dma_addr, size, dir); + dma_dom = to_dma_ops_domain(domain); + + __unmap_single(dma_dom, dma_addr, size, dir); +} + +static int sg_num_pages(struct device *dev, + struct scatterlist *sglist, + int nelems) +{ + unsigned long mask, boundary_size; + struct scatterlist *s; + int i, npages = 0; + + mask = dma_get_seg_boundary(dev); + boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : + 1UL << (BITS_PER_LONG - PAGE_SHIFT); + + for_each_sg(sglist, s, nelems, i) { + int p, n; + + s->dma_address = npages << PAGE_SHIFT; + p = npages % boundary_size; + n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); + if (p + n > boundary_size) + npages += boundary_size - p; + npages += n; + } + + return npages; } /* @@ -2754,46 +2443,79 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, * lists). */ static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, + int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { + int mapped_pages = 0, npages = 0, prot = 0, i; struct protection_domain *domain; - int i; + struct dma_ops_domain *dma_dom; struct scatterlist *s; - phys_addr_t paddr; - int mapped_elems = 0; + unsigned long address; u64 dma_mask; domain = get_domain(dev); if (IS_ERR(domain)) return 0; + dma_dom = to_dma_ops_domain(domain); dma_mask = *dev->dma_mask; + npages = sg_num_pages(dev, sglist, nelems); + + address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); + if (address == DMA_ERROR_CODE) + goto out_err; + + prot = dir2prot(direction); + + /* Map all sg entries */ for_each_sg(sglist, s, nelems, i) { - paddr = sg_phys(s); + int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - s->dma_address = __map_single(dev, domain->priv, - paddr, s->length, dir, false, - dma_mask); + for (j = 0; j < pages; ++j) { + unsigned long bus_addr, phys_addr; + int ret; - if (s->dma_address) { - s->dma_length = s->length; - mapped_elems++; - } else - goto unmap; + bus_addr = address + s->dma_address + (j << PAGE_SHIFT); + phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); + ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); + if (ret) + goto out_unmap; + + mapped_pages += 1; + } } - return mapped_elems; + /* Everything is mapped - write the right values into s->dma_address */ + for_each_sg(sglist, s, nelems, i) { + s->dma_address += address + s->offset; + s->dma_length = s->length; + } + + return nelems; + +out_unmap: + pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n", + dev_name(dev), npages); -unmap: - for_each_sg(sglist, s, mapped_elems, i) { - if (s->dma_address) - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; + for_each_sg(sglist, s, nelems, i) { + int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); + + for (j = 0; j < pages; ++j) { + unsigned long bus_addr; + + bus_addr = address + s->dma_address + (j << PAGE_SHIFT); + iommu_unmap_page(domain, bus_addr, PAGE_SIZE); + + if (--mapped_pages) + goto out_free_iova; + } } +out_free_iova: + free_iova_fast(&dma_dom->iovad, address, npages); + +out_err: return 0; } @@ -2806,18 +2528,19 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, struct dma_attrs *attrs) { struct protection_domain *domain; - struct scatterlist *s; - int i; + struct dma_ops_domain *dma_dom; + unsigned long startaddr; + int npages = 2; domain = get_domain(dev); if (IS_ERR(domain)) return; - for_each_sg(sglist, s, nelems, i) { - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; - } + startaddr = sg_dma_address(sglist) & PAGE_MASK; + dma_dom = to_dma_ops_domain(domain); + npages = sg_num_pages(dev, sglist, nelems); + + __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); } /* @@ -2829,6 +2552,7 @@ static void *alloc_coherent(struct device *dev, size_t size, { u64 dma_mask = dev->coherent_dma_mask; struct protection_domain *domain; + struct dma_ops_domain *dma_dom; struct page *page; domain = get_domain(dev); @@ -2839,6 +2563,7 @@ static void *alloc_coherent(struct device *dev, size_t size, } else if (IS_ERR(domain)) return NULL; + dma_dom = to_dma_ops_domain(domain); size = PAGE_ALIGN(size); dma_mask = dev->coherent_dma_mask; flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -2858,8 +2583,8 @@ static void *alloc_coherent(struct device *dev, size_t size, if (!dma_mask) dma_mask = *dev->dma_mask; - *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), - size, DMA_BIDIRECTIONAL, true, dma_mask); + *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), + size, DMA_BIDIRECTIONAL, dma_mask); if (*dma_addr == DMA_ERROR_CODE) goto out_free; @@ -2882,6 +2607,7 @@ static void free_coherent(struct device *dev, size_t size, struct dma_attrs *attrs) { struct protection_domain *domain; + struct dma_ops_domain *dma_dom; struct page *page; page = virt_to_page(virt_addr); @@ -2891,7 +2617,9 @@ static void free_coherent(struct device *dev, size_t size, if (IS_ERR(domain)) goto free_mem; - __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); + dma_dom = to_dma_ops_domain(domain); + + __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); free_mem: if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) @@ -2907,48 +2635,92 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } -static int set_dma_mask(struct device *dev, u64 mask) +static struct dma_map_ops amd_iommu_dma_ops = { + .alloc = alloc_coherent, + .free = free_coherent, + .map_page = map_page, + .unmap_page = unmap_page, + .map_sg = map_sg, + .unmap_sg = unmap_sg, + .dma_supported = amd_iommu_dma_supported, +}; + +static int init_reserved_iova_ranges(void) { - struct protection_domain *domain; - int max_apertures = 1; + struct pci_dev *pdev = NULL; + struct iova *val; - domain = get_domain(dev); - if (IS_ERR(domain)) - return PTR_ERR(domain); + init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, + IOVA_START_PFN, DMA_32BIT_PFN); - if (mask == DMA_BIT_MASK(64)) - max_apertures = 8; - else if (mask > DMA_BIT_MASK(32)) - max_apertures = 4; + lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, + &reserved_rbtree_key); + + /* MSI memory range */ + val = reserve_iova(&reserved_iova_ranges, + IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END)); + if (!val) { + pr_err("Reserving MSI range failed\n"); + return -ENOMEM; + } + + /* HT memory range */ + val = reserve_iova(&reserved_iova_ranges, + IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END)); + if (!val) { + pr_err("Reserving HT range failed\n"); + return -ENOMEM; + } /* - * To prevent lock contention it doesn't make sense to allocate more - * apertures than online cpus + * Memory used for PCI resources + * FIXME: Check whether we can reserve the PCI-hole completly */ - if (max_apertures > num_online_cpus()) - max_apertures = num_online_cpus(); + for_each_pci_dev(pdev) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; ++i) { + struct resource *r = &pdev->resource[i]; + + if (!(r->flags & IORESOURCE_MEM)) + continue; - if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures)) - dev_err(dev, "Can't allocate %d iommu apertures\n", - max_apertures); + val = reserve_iova(&reserved_iova_ranges, + IOVA_PFN(r->start), + IOVA_PFN(r->end)); + if (!val) { + pr_err("Reserve pci-resource range failed\n"); + return -ENOMEM; + } + } + } return 0; } -static struct dma_map_ops amd_iommu_dma_ops = { - .alloc = alloc_coherent, - .free = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, - .set_dma_mask = set_dma_mask, -}; - int __init amd_iommu_init_api(void) { - int err = 0; + int ret, cpu, err = 0; + + ret = iova_cache_get(); + if (ret) + return ret; + + ret = init_reserved_iova_ranges(); + if (ret) + return ret; + + for_each_possible_cpu(cpu) { + struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); + + queue->entries = kzalloc(FLUSH_QUEUE_SIZE * + sizeof(*queue->entries), + GFP_KERNEL); + if (!queue->entries) + goto out_put_iova; + + spin_lock_init(&queue->lock); + } err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) @@ -2958,11 +2730,26 @@ int __init amd_iommu_init_api(void) if (err) return err; #endif + err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops); + if (err) + return err; return 0; + +out_put_iova: + for_each_possible_cpu(cpu) { + struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); + + kfree(queue->entries); + } + + return -ENOMEM; } int __init amd_iommu_init_dma_ops(void) { + setup_timer(&queue_timer, queue_flush_timeout, 0); + atomic_set(&queue_timer_on, 0); + swiotlb = iommu_pass_through ? 1 : 0; iommu_detected = 1; @@ -2981,6 +2768,7 @@ int __init amd_iommu_init_dma_ops(void) pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n"); return 0; + } /***************************************************************************** @@ -3103,9 +2891,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; - - if (!dom) - return; + struct dma_ops_domain *dma_dom; domain = to_pdomain(dom); @@ -3114,13 +2900,31 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) BUG_ON(domain->dev_cnt != 0); - if (domain->mode != PAGE_MODE_NONE) - free_pagetable(domain); + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + /* + * First make sure the domain is no longer referenced from the + * flush queue + */ + queue_flush_all(); - if (domain->flags & PD_IOMMUV2_MASK) - free_gcr3_table(domain); + /* Now release the domain */ + dma_dom = to_dma_ops_domain(domain); + dma_ops_domain_free(dma_dom); + break; + default: + if (domain->mode != PAGE_MODE_NONE) + free_pagetable(domain); + + if (domain->flags & PD_IOMMUV2_MASK) + free_gcr3_table(domain); - protection_domain_free(domain); + protection_domain_free(domain); + break; + } } static void amd_iommu_detach_device(struct iommu_domain *dom, @@ -3190,7 +2994,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, prot |= IOMMU_PROT_IW; mutex_lock(&domain->api_lock); - ret = iommu_map_page(domain, iova, paddr, prot, page_size); + ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); mutex_unlock(&domain->api_lock); return ret; @@ -3292,6 +3096,19 @@ static void amd_iommu_put_dm_regions(struct device *dev, kfree(entry); } +static void amd_iommu_apply_dm_region(struct device *dev, + struct iommu_domain *domain, + struct iommu_dm_region *region) +{ + struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); + unsigned long start, end; + + start = IOVA_PFN(region->start); + end = IOVA_PFN(region->start + region->length); + + WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); +} + static const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3307,6 +3124,7 @@ static const struct iommu_ops amd_iommu_ops = { .device_group = amd_iommu_device_group, .get_dm_regions = amd_iommu_get_dm_regions, .put_dm_regions = amd_iommu_put_dm_regions, + .apply_dm_region = amd_iommu_apply_dm_region, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 590956ac704e..caf5e3822715 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -421,7 +421,6 @@ struct protection_domain { bool updated; /* complete domain flush required */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ - void *priv; /* private data */ }; /* diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index fbdaf81ae925..594849a3a9be 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -960,7 +960,7 @@ static int __init amd_iommu_v2_init(void) spin_lock_init(&state_lock); ret = -ENOMEM; - iommu_wq = create_workqueue("amd_iommu_v2"); + iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0); if (iommu_wq == NULL) goto out; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5f6b3bcab078..ce801170d5f2 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2687,6 +2687,8 @@ static int __init arm_smmu_init(void) if (ret) return ret; + pci_request_acs(); + return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 9345a3fcb706..4f49fe29f202 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -987,8 +987,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, * handler seeing a half-initialised domain state. */ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, - "arm-smmu-context-fault", domain); + ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault, + IRQF_SHARED, "arm-smmu-context-fault", domain); if (ret < 0) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", cfg->irptndx, irq); @@ -1028,7 +1028,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - free_irq(irq, domain); + devm_free_irq(smmu->dev, irq, domain); } free_io_pgtable_ops(smmu_domain->pgtbl_ops); @@ -1986,15 +1986,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } for (i = 0; i < smmu->num_global_irqs; ++i) { - err = request_irq(smmu->irqs[i], - arm_smmu_global_fault, - IRQF_SHARED, - "arm-smmu global fault", - smmu); + err = devm_request_irq(smmu->dev, smmu->irqs[i], + arm_smmu_global_fault, + IRQF_SHARED, + "arm-smmu global fault", + smmu); if (err) { dev_err(dev, "failed to request global IRQ %d (%u)\n", i, smmu->irqs[i]); - goto out_free_irqs; + goto out_put_masters; } } @@ -2006,10 +2006,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) arm_smmu_device_reset(smmu); return 0; -out_free_irqs: - while (i--) - free_irq(smmu->irqs[i], smmu); - out_put_masters: for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { struct arm_smmu_master *master @@ -2050,7 +2046,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) dev_err(dev, "removing device with active domains!\n"); for (i = 0; i < smmu->num_global_irqs; ++i) - free_irq(smmu->irqs[i], smmu); + devm_free_irq(smmu->dev, smmu->irqs[i], smmu); /* Turn the thing off */ writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); @@ -2096,8 +2092,10 @@ static int __init arm_smmu_init(void) #endif #ifdef CONFIG_PCI - if (!iommu_present(&pci_bus_type)) + if (!iommu_present(&pci_bus_type)) { + pci_request_acs(); bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + } #endif return 0; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 6a86b5d1defa..58470f5ced04 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -241,8 +241,20 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, if (!dmar_match_pci_path(info, scope->bus, path, level)) continue; - if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^ - (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) { + /* + * We expect devices with endpoint scope to have normal PCI + * headers, and devices with bridge scope to have bridge PCI + * headers. However PCI NTB devices may be listed in the + * DMAR table with bridge scope, even though they have a + * normal PCI header. NTB devices are identified by class + * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch + * for this special case. + */ + if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && + info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || + (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && + (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && + info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) { pr_warn("Device scope type does not match for %s\n", pci_name(info->dev)); return -EINVAL; @@ -1155,8 +1167,6 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) (unsigned long long)qi->desc[index].high); memcpy(&qi->desc[index], &qi->desc[wait_index], sizeof(struct qi_desc)); - __iommu_flush_cache(iommu, &qi->desc[index], - sizeof(struct qi_desc)); writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); return -EINVAL; } @@ -1231,9 +1241,6 @@ restart: hw[wait_index] = wait_desc; - __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); - __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); - qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_cnt -= 2; @@ -1871,10 +1878,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) /* * All PCI devices managed by this unit should have been destroyed. */ - if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { for_each_active_dev_scope(dmaru->devices, dmaru->devices_cnt, i, dev) return -EBUSY; + } ret = dmar_ir_hotplug(dmaru, false); if (ret == 0) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 5ecc86cb74c8..33dcc29ec200 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -54,6 +54,10 @@ typedef u32 sysmmu_pte_t; #define lv2ent_small(pent) ((*(pent) & 2) == 2) #define lv2ent_large(pent) ((*(pent) & 3) == 1) +#ifdef CONFIG_BIG_ENDIAN +#warning "revisit driver if we can enable big-endian ptes" +#endif + /* * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces * v5.0 introduced support for 36bit physical address space by shifting @@ -322,14 +326,27 @@ static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) __sysmmu_tlb_invalidate(data); } +static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) +{ + BUG_ON(clk_prepare_enable(data->clk_master)); + BUG_ON(clk_prepare_enable(data->clk)); + BUG_ON(clk_prepare_enable(data->pclk)); + BUG_ON(clk_prepare_enable(data->aclk)); +} + +static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) +{ + clk_disable_unprepare(data->aclk); + clk_disable_unprepare(data->pclk); + clk_disable_unprepare(data->clk); + clk_disable_unprepare(data->clk_master); +} + static void __sysmmu_get_version(struct sysmmu_drvdata *data) { u32 ver; - clk_enable(data->clk_master); - clk_enable(data->clk); - clk_enable(data->pclk); - clk_enable(data->aclk); + __sysmmu_enable_clocks(data); ver = readl(data->sfrbase + REG_MMU_VERSION); @@ -342,10 +359,7 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data) dev_dbg(data->sysmmu, "hardware version: %d.%d\n", MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); - clk_disable(data->aclk); - clk_disable(data->pclk); - clk_disable(data->clk); - clk_disable(data->clk_master); + __sysmmu_disable_clocks(data); } static void show_fault_information(struct sysmmu_drvdata *data, @@ -427,10 +441,7 @@ static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); writel(0, data->sfrbase + REG_MMU_CFG); - clk_disable(data->aclk); - clk_disable(data->pclk); - clk_disable(data->clk); - clk_disable(data->clk_master); + __sysmmu_disable_clocks(data); } static bool __sysmmu_disable(struct sysmmu_drvdata *data) @@ -475,10 +486,7 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data) static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) { - clk_enable(data->clk_master); - clk_enable(data->clk); - clk_enable(data->pclk); - clk_enable(data->aclk); + __sysmmu_enable_clocks(data); writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); @@ -488,6 +496,12 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); + /* + * SYSMMU driver keeps master's clock enabled only for the short + * time, while accessing the registers. For performing address + * translation during DMA transaction it relies on the client + * driver to enable it. + */ clk_disable(data->clk_master); } @@ -524,16 +538,15 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, { unsigned long flags; - clk_enable(data->clk_master); spin_lock_irqsave(&data->lock, flags); - if (is_sysmmu_active(data)) { - if (data->version >= MAKE_MMU_VER(3, 3)) - __sysmmu_tlb_invalidate_entry(data, iova, 1); + if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) { + clk_enable(data->clk_master); + __sysmmu_tlb_invalidate_entry(data, iova, 1); + clk_disable(data->clk_master); } spin_unlock_irqrestore(&data->lock, flags); - clk_disable(data->clk_master); } static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, @@ -572,6 +585,8 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, spin_unlock_irqrestore(&data->lock, flags); } +static struct iommu_ops exynos_iommu_ops; + static int __init exynos_sysmmu_probe(struct platform_device *pdev) { int irq, ret; @@ -602,37 +617,22 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) } data->clk = devm_clk_get(dev, "sysmmu"); - if (!IS_ERR(data->clk)) { - ret = clk_prepare(data->clk); - if (ret) { - dev_err(dev, "Failed to prepare clk\n"); - return ret; - } - } else { + if (PTR_ERR(data->clk) == -ENOENT) data->clk = NULL; - } + else if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); data->aclk = devm_clk_get(dev, "aclk"); - if (!IS_ERR(data->aclk)) { - ret = clk_prepare(data->aclk); - if (ret) { - dev_err(dev, "Failed to prepare aclk\n"); - return ret; - } - } else { + if (PTR_ERR(data->aclk) == -ENOENT) data->aclk = NULL; - } + else if (IS_ERR(data->aclk)) + return PTR_ERR(data->aclk); data->pclk = devm_clk_get(dev, "pclk"); - if (!IS_ERR(data->pclk)) { - ret = clk_prepare(data->pclk); - if (ret) { - dev_err(dev, "Failed to prepare pclk\n"); - return ret; - } - } else { + if (PTR_ERR(data->pclk) == -ENOENT) data->pclk = NULL; - } + else if (IS_ERR(data->pclk)) + return PTR_ERR(data->pclk); if (!data->clk && (!data->aclk || !data->pclk)) { dev_err(dev, "Failed to get device clock(s)!\n"); @@ -640,15 +640,10 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) } data->clk_master = devm_clk_get(dev, "master"); - if (!IS_ERR(data->clk_master)) { - ret = clk_prepare(data->clk_master); - if (ret) { - dev_err(dev, "Failed to prepare master's clk\n"); - return ret; - } - } else { + if (PTR_ERR(data->clk_master) == -ENOENT) data->clk_master = NULL; - } + else if (IS_ERR(data->clk_master)) + return PTR_ERR(data->clk_master); data->sysmmu = dev; spin_lock_init(&data->lock); @@ -665,6 +660,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); + of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); + return 0; } @@ -709,6 +706,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = { .name = "exynos-sysmmu", .of_match_table = sysmmu_of_match, .pm = &sysmmu_pm_ops, + .suppress_bind_attrs = true, } }; @@ -716,7 +714,7 @@ static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) { dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), DMA_TO_DEVICE); - *ent = val; + *ent = cpu_to_le32(val); dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), DMA_TO_DEVICE); } @@ -1357,7 +1355,6 @@ static int __init exynos_iommu_of_setup(struct device_node *np) if (!dma_dev) dma_dev = &pdev->dev; - of_iommu_set_ops(np, &exynos_iommu_ops); return 0; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 323dac9900ba..afbaa2c69a59 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1672,7 +1672,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) return -ENOMEM; } - size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **); + size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); iommu->domains = kzalloc(size, GFP_KERNEL); if (iommu->domains) { @@ -1737,7 +1737,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { if ((iommu->domains) && (iommu->domain_ids)) { - int elems = (cap_ndoms(iommu->cap) >> 8) + 1; + int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; int i; for (i = 0; i < elems; i++) @@ -2076,7 +2076,7 @@ out_unlock: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); - return 0; + return ret; } struct domain_context_mapping_data { @@ -4272,10 +4272,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) if (!atsru) return 0; - if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, i, dev) return -EBUSY; + } return 0; } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a1ed1b73fed4..f5c90e1366ce 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -576,7 +576,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, return 0; found_translation: - iova &= (ARM_LPAE_GRANULE(data) - 1); + iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3000051f48b4..b06d93594436 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -34,8 +34,7 @@ #include <trace/events/iommu.h> static struct kset *iommu_group_kset; -static struct ida iommu_group_ida; -static struct mutex iommu_group_mutex; +static DEFINE_IDA(iommu_group_ida); struct iommu_callback_data { const struct iommu_ops *ops; @@ -144,9 +143,7 @@ static void iommu_group_release(struct kobject *kobj) if (group->iommu_data_release) group->iommu_data_release(group->iommu_data); - mutex_lock(&iommu_group_mutex); - ida_remove(&iommu_group_ida, group->id); - mutex_unlock(&iommu_group_mutex); + ida_simple_remove(&iommu_group_ida, group->id); if (group->default_domain) iommu_domain_free(group->default_domain); @@ -186,26 +183,17 @@ struct iommu_group *iommu_group_alloc(void) INIT_LIST_HEAD(&group->devices); BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); - mutex_lock(&iommu_group_mutex); - -again: - if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) { + ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { kfree(group); - mutex_unlock(&iommu_group_mutex); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } - - if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id)) - goto again; - - mutex_unlock(&iommu_group_mutex); + group->id = ret; ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, NULL, "%d", group->id); if (ret) { - mutex_lock(&iommu_group_mutex); - ida_remove(&iommu_group_ida, group->id); - mutex_unlock(&iommu_group_mutex); + ida_simple_remove(&iommu_group_ida, group->id); kfree(group); return ERR_PTR(ret); } @@ -348,6 +336,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; + if (domain->ops->apply_dm_region) + domain->ops->apply_dm_region(dev, domain, entry); + start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); @@ -1483,9 +1474,6 @@ static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", NULL, kernel_kobj); - ida_init(&iommu_group_ida); - mutex_init(&iommu_group_mutex); - BUG_ON(!iommu_group_kset); return 0; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index e321fa517a45..b09692bb5b0a 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -27,32 +27,35 @@ #include <linux/slab.h> #include <linux/iommu.h> #include <linux/clk.h> +#include <linux/err.h> +#include <linux/of_iommu.h> #include <asm/cacheflush.h> #include <asm/sizes.h> #include "msm_iommu_hw-8xxx.h" #include "msm_iommu.h" +#include "io-pgtable.h" #define MRC(reg, processor, op1, crn, crm, op2) \ __asm__ __volatile__ ( \ " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ : "=r" (reg)) -#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) -#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) - /* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) -static int msm_iommu_tex_class[4]; - DEFINE_SPINLOCK(msm_iommu_lock); +static LIST_HEAD(qcom_iommu_devices); +static struct iommu_ops msm_iommu_ops; struct msm_priv { - unsigned long *pgtable; struct list_head list_attached; struct iommu_domain domain; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops *iop; + struct device *dev; + spinlock_t pgtlock; /* pagetable lock */ }; static struct msm_priv *to_msm_priv(struct iommu_domain *dom) @@ -60,67 +63,183 @@ static struct msm_priv *to_msm_priv(struct iommu_domain *dom) return container_of(dom, struct msm_priv, domain); } -static int __enable_clocks(struct msm_iommu_drvdata *drvdata) +static int __enable_clocks(struct msm_iommu_dev *iommu) { int ret; - ret = clk_enable(drvdata->pclk); + ret = clk_enable(iommu->pclk); if (ret) goto fail; - if (drvdata->clk) { - ret = clk_enable(drvdata->clk); + if (iommu->clk) { + ret = clk_enable(iommu->clk); if (ret) - clk_disable(drvdata->pclk); + clk_disable(iommu->pclk); } fail: return ret; } -static void __disable_clocks(struct msm_iommu_drvdata *drvdata) +static void __disable_clocks(struct msm_iommu_dev *iommu) { - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); + if (iommu->clk) + clk_disable(iommu->clk); + clk_disable(iommu->pclk); } -static int __flush_iotlb(struct iommu_domain *domain) +static void msm_iommu_reset(void __iomem *base, int ncb) { - struct msm_priv *priv = to_msm_priv(domain); - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - int ret = 0; -#ifndef CONFIG_IOMMU_PGTABLES_L2 - unsigned long *fl_table = priv->pgtable; - int i; - - if (!list_empty(&priv->list_attached)) { - dmac_flush_range(fl_table, fl_table + SZ_16K); - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { - void *sl_table = __va(fl_table[i] & - FL_BASE_MASK); - dmac_flush_range(sl_table, sl_table + SZ_4K); - } + int ctx; + + SET_RPUE(base, 0); + SET_RPUEIE(base, 0); + SET_ESRRESTORE(base, 0); + SET_TBE(base, 0); + SET_CR(base, 0); + SET_SPDMBE(base, 0); + SET_TESTBUSCR(base, 0); + SET_TLBRSW(base, 0); + SET_GLOBAL_TLBIALL(base, 0); + SET_RPU_ACR(base, 0); + SET_TLBLKCRWE(base, 1); + + for (ctx = 0; ctx < ncb; ctx++) { + SET_BPRCOSH(base, ctx, 0); + SET_BPRCISH(base, ctx, 0); + SET_BPRCNSH(base, ctx, 0); + SET_BPSHCFG(base, ctx, 0); + SET_BPMTCFG(base, ctx, 0); + SET_ACTLR(base, ctx, 0); + SET_SCTLR(base, ctx, 0); + SET_FSRRESTORE(base, ctx, 0); + SET_TTBR0(base, ctx, 0); + SET_TTBR1(base, ctx, 0); + SET_TTBCR(base, ctx, 0); + SET_BFBCR(base, ctx, 0); + SET_PAR(base, ctx, 0); + SET_FAR(base, ctx, 0); + SET_CTX_TLBIALL(base, ctx, 0); + SET_TLBFLPTER(base, ctx, 0); + SET_TLBSLPTER(base, ctx, 0); + SET_TLBLKCR(base, ctx, 0); + SET_CONTEXTIDR(base, ctx, 0); } -#endif +} - list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { +static void __flush_iotlb(void *cookie) +{ + struct msm_priv *priv = cookie; + struct msm_iommu_dev *iommu = NULL; + struct msm_iommu_ctx_dev *master; + int ret = 0; + + list_for_each_entry(iommu, &priv->list_attached, dom_node) { + ret = __enable_clocks(iommu); + if (ret) + goto fail; - BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); + list_for_each_entry(master, &iommu->ctx_list, list) + SET_CTX_TLBIALL(iommu->base, master->num, 0); - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); - BUG_ON(!iommu_drvdata); + __disable_clocks(iommu); + } +fail: + return; +} - ret = __enable_clocks(iommu_drvdata); +static void __flush_iotlb_range(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) +{ + struct msm_priv *priv = cookie; + struct msm_iommu_dev *iommu = NULL; + struct msm_iommu_ctx_dev *master; + int ret = 0; + int temp_size; + + list_for_each_entry(iommu, &priv->list_attached, dom_node) { + ret = __enable_clocks(iommu); if (ret) goto fail; - SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); - __disable_clocks(iommu_drvdata); + list_for_each_entry(master, &iommu->ctx_list, list) { + temp_size = size; + do { + iova &= TLBIVA_VA; + iova |= GET_CONTEXTIDR_ASID(iommu->base, + master->num); + SET_TLBIVA(iommu->base, master->num, iova); + iova += granule; + } while (temp_size -= granule); + } + + __disable_clocks(iommu); } + fail: - return ret; + return; +} + +static void __flush_iotlb_sync(void *cookie) +{ + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ +} + +static const struct iommu_gather_ops msm_iommu_gather_ops = { + .tlb_flush_all = __flush_iotlb, + .tlb_add_flush = __flush_iotlb_range, + .tlb_sync = __flush_iotlb_sync, +}; + +static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) +{ + int idx; + + do { + idx = find_next_zero_bit(map, end, start); + if (idx == end) + return -ENOSPC; + } while (test_and_set_bit(idx, map)); + + return idx; +} + +static void msm_iommu_free_ctx(unsigned long *map, int idx) +{ + clear_bit(idx, map); +} + +static void config_mids(struct msm_iommu_dev *iommu, + struct msm_iommu_ctx_dev *master) +{ + int mid, ctx, i; + + for (i = 0; i < master->num_mids; i++) { + mid = master->mids[i]; + ctx = master->num; + + SET_M2VCBR_N(iommu->base, mid, 0); + SET_CBACR_N(iommu->base, ctx, 0); + + /* Set VMID = 0 */ + SET_VMID(iommu->base, mid, 0); + + /* Set the context number for that MID to this context */ + SET_CBNDX(iommu->base, mid, ctx); + + /* Set MID associated with this context bank to 0*/ + SET_CBVMID(iommu->base, ctx, 0); + + /* Set the ASID for TLB tagging for this context */ + SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); + + /* Set security bit override to be Non-secure */ + SET_NSCFG(iommu->base, mid, 3); + } } static void __reset_context(void __iomem *base, int ctx) @@ -143,15 +262,17 @@ static void __reset_context(void __iomem *base, int ctx) SET_TLBFLPTER(base, ctx, 0); SET_TLBSLPTER(base, ctx, 0); SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); } -static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) +static void __program_context(void __iomem *base, int ctx, + struct msm_priv *priv) { - unsigned int prrr, nmrr; __reset_context(base, ctx); + /* Turn on TEX Remap */ + SET_TRE(base, ctx, 1); + SET_AFE(base, ctx, 1); + /* Set up HTW mode */ /* TLB miss configuration: perform HTW on miss */ SET_TLBMCFG(base, ctx, 0x3); @@ -159,8 +280,13 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) /* V2P configuration: HTW for access */ SET_V2PCFG(base, ctx, 0x3); - SET_TTBCR(base, ctx, 0); - SET_TTBR0_PA(base, ctx, (pgtable >> 14)); + SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); + SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]); + SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]); + + /* Set prrr and nmrr */ + SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); + SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); /* Invalidate the TLB for this context */ SET_CTX_TLBIALL(base, ctx, 0); @@ -179,38 +305,9 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) SET_RCOSH(base, ctx, 1); SET_RCNSH(base, ctx, 1); - /* Turn on TEX Remap */ - SET_TRE(base, ctx, 1); - - /* Set TEX remap attributes */ - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - SET_PRRR(base, ctx, prrr); - SET_NMRR(base, ctx, nmrr); - /* Turn on BFB prefetch */ SET_BFBDFE(base, ctx, 1); -#ifdef CONFIG_IOMMU_PGTABLES_L2 - /* Configure page tables as inner-cacheable and shareable to reduce - * the TLB miss penalty. - */ - SET_TTBR0_SH(base, ctx, 1); - SET_TTBR1_SH(base, ctx, 1); - - SET_TTBR0_NOS(base, ctx, 1); - SET_TTBR1_NOS(base, ctx, 1); - - SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR0_IRGNL(base, ctx, 1); - - SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR1_IRGNL(base, ctx, 1); - - SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ - SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ -#endif - /* Enable the MMU */ SET_M(base, ctx, 1); } @@ -227,13 +324,6 @@ static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) goto fail_nomem; INIT_LIST_HEAD(&priv->list_attached); - priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, - get_order(SZ_16K)); - - if (!priv->pgtable) - goto fail_nomem; - - memset(priv->pgtable, 0, SZ_16K); priv->domain.geometry.aperture_start = 0; priv->domain.geometry.aperture_end = (1ULL << 32) - 1; @@ -250,304 +340,137 @@ static void msm_iommu_domain_free(struct iommu_domain *domain) { struct msm_priv *priv; unsigned long flags; - unsigned long *fl_table; - int i; spin_lock_irqsave(&msm_iommu_lock, flags); priv = to_msm_priv(domain); + kfree(priv); + spin_unlock_irqrestore(&msm_iommu_lock, flags); +} - fl_table = priv->pgtable; - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) - free_page((unsigned long) __va(((fl_table[i]) & - FL_BASE_MASK))); +static int msm_iommu_domain_config(struct msm_priv *priv) +{ + spin_lock_init(&priv->pgtlock); + + priv->cfg = (struct io_pgtable_cfg) { + .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP, + .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, + .ias = 32, + .oas = 32, + .tlb = &msm_iommu_gather_ops, + .iommu_dev = priv->dev, + }; + + priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); + if (!priv->iop) { + dev_err(priv->dev, "Failed to allocate pgtable\n"); + return -EINVAL; + } - free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); - priv->pgtable = NULL; + msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; - kfree(priv); - spin_unlock_irqrestore(&msm_iommu_lock, flags); + return 0; } static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - struct msm_iommu_ctx_drvdata *tmp_drvdata; int ret = 0; unsigned long flags; + struct msm_iommu_dev *iommu; + struct msm_priv *priv = to_msm_priv(domain); + struct msm_iommu_ctx_dev *master; - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = to_msm_priv(domain); - - if (!dev) { - ret = -EINVAL; - goto fail; - } - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { - ret = -EINVAL; - goto fail; - } - - if (!list_empty(&ctx_drvdata->attached_elm)) { - ret = -EBUSY; - goto fail; - } + priv->dev = dev; + msm_iommu_domain_config(priv); - list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) - if (tmp_drvdata == ctx_drvdata) { - ret = -EBUSY; - goto fail; + spin_lock_irqsave(&msm_iommu_lock, flags); + list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { + master = list_first_entry(&iommu->ctx_list, + struct msm_iommu_ctx_dev, + list); + if (master->of_node == dev->of_node) { + ret = __enable_clocks(iommu); + if (ret) + goto fail; + + list_for_each_entry(master, &iommu->ctx_list, list) { + if (master->num) { + dev_err(dev, "domain already attached"); + ret = -EEXIST; + goto fail; + } + master->num = + msm_iommu_alloc_ctx(iommu->context_map, + 0, iommu->ncb); + if (IS_ERR_VALUE(master->num)) { + ret = -ENODEV; + goto fail; + } + config_mids(iommu, master); + __program_context(iommu->base, master->num, + priv); + } + __disable_clocks(iommu); + list_add(&iommu->dom_node, &priv->list_attached); } - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - __program_context(iommu_drvdata->base, ctx_dev->num, - __pa(priv->pgtable)); - - __disable_clocks(iommu_drvdata); - list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); - ret = __flush_iotlb(domain); + } fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); + return ret; } static void msm_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; + struct msm_iommu_dev *iommu; + struct msm_iommu_ctx_dev *master; int ret; - spin_lock_irqsave(&msm_iommu_lock, flags); - priv = to_msm_priv(domain); - - if (!dev) - goto fail; - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) - goto fail; - - ret = __flush_iotlb(domain); - if (ret) - goto fail; - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; + free_io_pgtable_ops(priv->iop); - __reset_context(iommu_drvdata->base, ctx_dev->num); - __disable_clocks(iommu_drvdata); - list_del_init(&ctx_drvdata->attached_elm); + spin_lock_irqsave(&msm_iommu_lock, flags); + list_for_each_entry(iommu, &priv->list_attached, dom_node) { + ret = __enable_clocks(iommu); + if (ret) + goto fail; + list_for_each_entry(master, &iommu->ctx_list, list) { + msm_iommu_free_ctx(iommu->context_map, master->num); + __reset_context(iommu->base, master->num); + } + __disable_clocks(iommu); + } fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); } -static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, +static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t pa, size_t len, int prot) { - struct msm_priv *priv; + struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - unsigned int pgprot; - int ret = 0, tex, sh; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; - tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; - - if (tex < 0 || tex > NUM_TEX_CLASS - 1) { - ret = -EINVAL; - goto fail; - } - - priv = to_msm_priv(domain); - - fl_table = priv->pgtable; - - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad size: %d\n", len); - ret = -EINVAL; - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - ret = -EINVAL; - goto fail; - } - - if (len == SZ_16M || len == SZ_1M) { - pgprot = sh ? FL_SHARED : 0; - pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? FL_TEX0 : 0; - } else { - pgprot = sh ? SL_SHARED : 0; - pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? SL_TEX0 : 0; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (len == SZ_16M) { - int i = 0; - for (i = 0; i < 16; i++) - *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | - FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | - FL_SHARED | FL_NG | pgprot; - } - - if (len == SZ_1M) - *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | - FL_TYPE_SECT | FL_SHARED | pgprot; - - /* Need a 2nd level table */ - if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { - unsigned long *sl; - sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, - get_order(SZ_4K)); - - if (!sl) { - pr_debug("Could not allocate second level table\n"); - ret = -ENOMEM; - goto fail; - } - - memset(sl, 0, SZ_4K); - *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); - } - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - - if (len == SZ_4K) - *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | - SL_SHARED | SL_TYPE_SMALL | pgprot; - - if (len == SZ_64K) { - int i; + int ret; - for (i = 0; i < 16; i++) - *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | - SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; - } + spin_lock_irqsave(&priv->pgtlock, flags); + ret = priv->iop->map(priv->iop, iova, pa, len, prot); + spin_unlock_irqrestore(&priv->pgtlock, flags); - ret = __flush_iotlb(domain); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; } -static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, - size_t len) +static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t len) { - struct msm_priv *priv; + struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - int i, ret = 0; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = to_msm_priv(domain); - fl_table = priv->pgtable; + spin_lock_irqsave(&priv->pgtlock, flags); + len = priv->iop->unmap(priv->iop, iova, len); + spin_unlock_irqrestore(&priv->pgtlock, flags); - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad length: %d\n", len); - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - goto fail; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (*fl_pte == 0) { - pr_debug("First level PTE is 0\n"); - goto fail; - } - - /* Unmap supersection */ - if (len == SZ_16M) - for (i = 0; i < 16; i++) - *(fl_pte+i) = 0; - - if (len == SZ_1M) - *fl_pte = 0; - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - if (len == SZ_64K) { - for (i = 0; i < 16; i++) - *(sl_pte+i) = 0; - } - - if (len == SZ_4K) - *sl_pte = 0; - - if (len == SZ_4K || len == SZ_64K) { - int used = 0; - - for (i = 0; i < NUM_SL_PTE; i++) - if (sl_table[i]) - used = 1; - if (!used) { - free_page((unsigned long)sl_table); - *fl_pte = 0; - } - } - - ret = __flush_iotlb(domain); - -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - - /* the IOMMU API requires us to return how many bytes were unmapped */ - len = ret ? 0 : len; return len; } @@ -555,47 +478,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t va) { struct msm_priv *priv; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_iommu_dev *iommu; + struct msm_iommu_ctx_dev *master; unsigned int par; unsigned long flags; - void __iomem *base; phys_addr_t ret = 0; - int ctx; spin_lock_irqsave(&msm_iommu_lock, flags); priv = to_msm_priv(domain); - if (list_empty(&priv->list_attached)) - goto fail; + iommu = list_first_entry(&priv->list_attached, + struct msm_iommu_dev, dom_node); - ctx_drvdata = list_entry(priv->list_attached.next, - struct msm_iommu_ctx_drvdata, attached_elm); - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); + if (list_empty(&iommu->ctx_list)) + goto fail; - base = iommu_drvdata->base; - ctx = ctx_drvdata->num; + master = list_first_entry(&iommu->ctx_list, + struct msm_iommu_ctx_dev, list); + if (!master) + goto fail; - ret = __enable_clocks(iommu_drvdata); + ret = __enable_clocks(iommu); if (ret) goto fail; /* Invalidate context TLB */ - SET_CTX_TLBIALL(base, ctx, 0); - SET_V2PPR(base, ctx, va & V2Pxx_VA); + SET_CTX_TLBIALL(iommu->base, master->num, 0); + SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); - par = GET_PAR(base, ctx); + par = GET_PAR(iommu->base, master->num); /* We are dealing with a supersection */ - if (GET_NOFAULT_SS(base, ctx)) + if (GET_NOFAULT_SS(iommu->base, master->num)) ret = (par & 0xFF000000) | (va & 0x00FFFFFF); else /* Upper 20 bits from PAR, lower 12 from VA */ ret = (par & 0xFFFFF000) | (va & 0x00000FFF); - if (GET_FAULT(base, ctx)) + if (GET_FAULT(iommu->base, master->num)) ret = 0; - __disable_clocks(iommu_drvdata); + __disable_clocks(iommu); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); return ret; @@ -629,49 +551,92 @@ static void print_ctx_regs(void __iomem *base, int ctx) GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); pr_err("SCTLR = %08x ACTLR = %08x\n", GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); - pr_err("PRRR = %08x NMRR = %08x\n", - GET_PRRR(base, ctx), GET_NMRR(base, ctx)); +} + +static void insert_iommu_master(struct device *dev, + struct msm_iommu_dev **iommu, + struct of_phandle_args *spec) +{ + struct msm_iommu_ctx_dev *master = dev->archdata.iommu; + int sid; + + if (list_empty(&(*iommu)->ctx_list)) { + master = kzalloc(sizeof(*master), GFP_ATOMIC); + master->of_node = dev->of_node; + list_add(&master->list, &(*iommu)->ctx_list); + dev->archdata.iommu = master; + } + + for (sid = 0; sid < master->num_mids; sid++) + if (master->mids[sid] == spec->args[0]) { + dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n", + sid); + return; + } + + master->mids[master->num_mids++] = spec->args[0]; +} + +static int qcom_iommu_of_xlate(struct device *dev, + struct of_phandle_args *spec) +{ + struct msm_iommu_dev *iommu; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&msm_iommu_lock, flags); + list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) + if (iommu->dev->of_node == spec->np) + break; + + if (!iommu || iommu->dev->of_node != spec->np) { + ret = -ENODEV; + goto fail; + } + + insert_iommu_master(dev, &iommu, spec); +fail: + spin_unlock_irqrestore(&msm_iommu_lock, flags); + + return ret; } irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) { - struct msm_iommu_drvdata *drvdata = dev_id; - void __iomem *base; + struct msm_iommu_dev *iommu = dev_id; unsigned int fsr; int i, ret; spin_lock(&msm_iommu_lock); - if (!drvdata) { + if (!iommu) { pr_err("Invalid device ID in context interrupt handler\n"); goto fail; } - base = drvdata->base; - pr_err("Unexpected IOMMU page fault!\n"); - pr_err("base = %08x\n", (unsigned int) base); + pr_err("base = %08x\n", (unsigned int)iommu->base); - ret = __enable_clocks(drvdata); + ret = __enable_clocks(iommu); if (ret) goto fail; - for (i = 0; i < drvdata->ncb; i++) { - fsr = GET_FSR(base, i); + for (i = 0; i < iommu->ncb; i++) { + fsr = GET_FSR(iommu->base, i); if (fsr) { pr_err("Fault occurred in context %d.\n", i); pr_err("Interesting registers:\n"); - print_ctx_regs(base, i); - SET_FSR(base, i, 0x4000000F); + print_ctx_regs(iommu->base, i); + SET_FSR(iommu->base, i, 0x4000000F); } } - __disable_clocks(drvdata); + __disable_clocks(iommu); fail: spin_unlock(&msm_iommu_lock); return 0; } -static const struct iommu_ops msm_iommu_ops = { +static struct iommu_ops msm_iommu_ops = { .capable = msm_iommu_capable, .domain_alloc = msm_iommu_domain_alloc, .domain_free = msm_iommu_domain_free, @@ -682,54 +647,163 @@ static const struct iommu_ops msm_iommu_ops = { .map_sg = default_iommu_map_sg, .iova_to_phys = msm_iommu_iova_to_phys, .pgsize_bitmap = MSM_IOMMU_PGSIZES, + .of_xlate = qcom_iommu_of_xlate, }; -static int __init get_tex_class(int icp, int ocp, int mt, int nos) +static int msm_iommu_probe(struct platform_device *pdev) { - int i = 0; - unsigned int prrr = 0; - unsigned int nmrr = 0; - int c_icp, c_ocp, c_mt, c_nos; - - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - - for (i = 0; i < NUM_TEX_CLASS; i++) { - c_nos = PRRR_NOS(prrr, i); - c_mt = PRRR_MT(prrr, i); - c_icp = NMRR_ICP(nmrr, i); - c_ocp = NMRR_OCP(nmrr, i); - - if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) - return i; + struct resource *r; + struct msm_iommu_dev *iommu; + int ret, par, val; + + iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENODEV; + + iommu->dev = &pdev->dev; + INIT_LIST_HEAD(&iommu->ctx_list); + + iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); + if (IS_ERR(iommu->pclk)) { + dev_err(iommu->dev, "could not get smmu_pclk\n"); + return PTR_ERR(iommu->pclk); + } + + ret = clk_prepare(iommu->pclk); + if (ret) { + dev_err(iommu->dev, "could not prepare smmu_pclk\n"); + return ret; + } + + iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); + if (IS_ERR(iommu->clk)) { + dev_err(iommu->dev, "could not get iommu_clk\n"); + clk_unprepare(iommu->pclk); + return PTR_ERR(iommu->clk); + } + + ret = clk_prepare(iommu->clk); + if (ret) { + dev_err(iommu->dev, "could not prepare iommu_clk\n"); + clk_unprepare(iommu->pclk); + return ret; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iommu->base = devm_ioremap_resource(iommu->dev, r); + if (IS_ERR(iommu->base)) { + dev_err(iommu->dev, "could not get iommu base\n"); + ret = PTR_ERR(iommu->base); + goto fail; } - return -ENODEV; + iommu->irq = platform_get_irq(pdev, 0); + if (iommu->irq < 0) { + dev_err(iommu->dev, "could not get iommu irq\n"); + ret = -ENODEV; + goto fail; + } + + ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val); + if (ret) { + dev_err(iommu->dev, "could not get ncb\n"); + goto fail; + } + iommu->ncb = val; + + msm_iommu_reset(iommu->base, iommu->ncb); + SET_M(iommu->base, 0, 1); + SET_PAR(iommu->base, 0, 0); + SET_V2PCFG(iommu->base, 0, 1); + SET_V2PPR(iommu->base, 0, 0); + par = GET_PAR(iommu->base, 0); + SET_V2PCFG(iommu->base, 0, 0); + SET_M(iommu->base, 0, 0); + + if (!par) { + pr_err("Invalid PAR value detected\n"); + ret = -ENODEV; + goto fail; + } + + ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL, + msm_iommu_fault_handler, + IRQF_ONESHOT | IRQF_SHARED, + "msm_iommu_secure_irpt_handler", + iommu); + if (ret) { + pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret); + goto fail; + } + + list_add(&iommu->dev_node, &qcom_iommu_devices); + of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); + + pr_info("device mapped at %p, irq %d with %d ctx banks\n", + iommu->base, iommu->irq, iommu->ncb); + + return ret; +fail: + clk_unprepare(iommu->clk); + clk_unprepare(iommu->pclk); + return ret; +} + +static const struct of_device_id msm_iommu_dt_match[] = { + { .compatible = "qcom,apq8064-iommu" }, + {} +}; + +static int msm_iommu_remove(struct platform_device *pdev) +{ + struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); + + clk_unprepare(iommu->clk); + clk_unprepare(iommu->pclk); + return 0; } -static void __init setup_iommu_tex_classes(void) +static struct platform_driver msm_iommu_driver = { + .driver = { + .name = "msm_iommu", + .of_match_table = msm_iommu_dt_match, + }, + .probe = msm_iommu_probe, + .remove = msm_iommu_remove, +}; + +static int __init msm_iommu_driver_init(void) { - msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = - get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); + int ret; - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = - get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); + ret = platform_driver_register(&msm_iommu_driver); + if (ret != 0) + pr_err("Failed to register IOMMU driver\n"); - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = - get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); + return ret; +} - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = - get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); +static void __exit msm_iommu_driver_exit(void) +{ + platform_driver_unregister(&msm_iommu_driver); } +subsys_initcall(msm_iommu_driver_init); +module_exit(msm_iommu_driver_exit); + static int __init msm_iommu_init(void) { - setup_iommu_tex_classes(); bus_set_iommu(&platform_bus_type, &msm_iommu_ops); return 0; } -subsys_initcall(msm_iommu_init); +static int __init msm_iommu_of_setup(struct device_node *np) +{ + msm_iommu_init(); + return 0; +} + +IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h index 5c7c955e6d25..4ca25d50d679 100644 --- a/drivers/iommu/msm_iommu.h +++ b/drivers/iommu/msm_iommu.h @@ -42,74 +42,53 @@ */ #define MAX_NUM_MIDS 32 +/* Maximum number of context banks that can be present in IOMMU */ +#define IOMMU_MAX_CBS 128 + /** * struct msm_iommu_dev - a single IOMMU hardware instance - * name Human-readable name given to this IOMMU HW instance * ncb Number of context banks present on this IOMMU HW instance + * dev: IOMMU device + * irq: Interrupt number + * clk: The bus clock for this IOMMU hardware instance + * pclk: The clock for the IOMMU bus interconnect + * dev_node: list head in qcom_iommu_device_list + * dom_node: list head for domain + * ctx_list: list of 'struct msm_iommu_ctx_dev' + * context_map: Bitmap to track allocated context banks */ struct msm_iommu_dev { - const char *name; + void __iomem *base; int ncb; + struct device *dev; + int irq; + struct clk *clk; + struct clk *pclk; + struct list_head dev_node; + struct list_head dom_node; + struct list_head ctx_list; + DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); }; /** * struct msm_iommu_ctx_dev - an IOMMU context bank instance - * name Human-readable name given to this context bank + * of_node node ptr of client device * num Index of this context bank within the hardware * mids List of Machine IDs that are to be mapped into this context * bank, terminated by -1. The MID is a set of signals on the * AXI bus that identifies the function associated with a specific * memory request. (See ARM spec). + * num_mids Total number of mids + * node list head in ctx_list */ struct msm_iommu_ctx_dev { - const char *name; + struct device_node *of_node; int num; int mids[MAX_NUM_MIDS]; + int num_mids; + struct list_head list; }; - -/** - * struct msm_iommu_drvdata - A single IOMMU hardware instance - * @base: IOMMU config port base address (VA) - * @ncb The number of contexts on this IOMMU - * @irq: Interrupt number - * @clk: The bus clock for this IOMMU hardware instance - * @pclk: The clock for the IOMMU bus interconnect - * - * A msm_iommu_drvdata holds the global driver data about a single piece - * of an IOMMU hardware instance. - */ -struct msm_iommu_drvdata { - void __iomem *base; - int irq; - int ncb; - struct clk *clk; - struct clk *pclk; -}; - -/** - * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance - * @num: Hardware context number of this context - * @pdev: Platform device associated wit this HW instance - * @attached_elm: List element for domains to track which devices are - * attached to them - * - * A msm_iommu_ctx_drvdata holds the driver data for a single context bank - * within each IOMMU hardware instance - */ -struct msm_iommu_ctx_drvdata { - int num; - struct platform_device *pdev; - struct list_head attached_elm; -}; - -/* - * Look up an IOMMU context device by its context name. NULL if none found. - * Useful for testing and drivers that do not yet fully have IOMMU stuff in - * their platform devices. - */ -struct device *msm_iommu_get_ctx(const char *ctx_name); - /* * Interrupt handler for the IOMMU context fault interrupt. Hooking the * interrupt is not supported in the API yet, but this will print an error diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c deleted file mode 100644 index 4b09e815accf..000000000000 --- a/drivers/iommu/msm_iommu_dev.c +++ /dev/null @@ -1,381 +0,0 @@ -/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/iommu.h> -#include <linux/interrupt.h> -#include <linux/err.h> -#include <linux/slab.h> - -#include "msm_iommu_hw-8xxx.h" -#include "msm_iommu.h" - -struct iommu_ctx_iter_data { - /* input */ - const char *name; - - /* output */ - struct device *dev; -}; - -static struct platform_device *msm_iommu_root_dev; - -static int each_iommu_ctx(struct device *dev, void *data) -{ - struct iommu_ctx_iter_data *res = data; - struct msm_iommu_ctx_dev *c = dev->platform_data; - - if (!res || !c || !c->name || !res->name) - return -EINVAL; - - if (!strcmp(res->name, c->name)) { - res->dev = dev; - return 1; - } - return 0; -} - -static int each_iommu(struct device *dev, void *data) -{ - return device_for_each_child(dev, data, each_iommu_ctx); -} - -struct device *msm_iommu_get_ctx(const char *ctx_name) -{ - struct iommu_ctx_iter_data r; - int found; - - if (!msm_iommu_root_dev) { - pr_err("No root IOMMU device.\n"); - goto fail; - } - - r.name = ctx_name; - found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu); - - if (!found) { - pr_err("Could not find context <%s>\n", ctx_name); - goto fail; - } - - return r.dev; -fail: - return NULL; -} -EXPORT_SYMBOL(msm_iommu_get_ctx); - -static void msm_iommu_reset(void __iomem *base, int ncb) -{ - int ctx; - - SET_RPUE(base, 0); - SET_RPUEIE(base, 0); - SET_ESRRESTORE(base, 0); - SET_TBE(base, 0); - SET_CR(base, 0); - SET_SPDMBE(base, 0); - SET_TESTBUSCR(base, 0); - SET_TLBRSW(base, 0); - SET_GLOBAL_TLBIALL(base, 0); - SET_RPU_ACR(base, 0); - SET_TLBLKCRWE(base, 1); - - for (ctx = 0; ctx < ncb; ctx++) { - SET_BPRCOSH(base, ctx, 0); - SET_BPRCISH(base, ctx, 0); - SET_BPRCNSH(base, ctx, 0); - SET_BPSHCFG(base, ctx, 0); - SET_BPMTCFG(base, ctx, 0); - SET_ACTLR(base, ctx, 0); - SET_SCTLR(base, ctx, 0); - SET_FSRRESTORE(base, ctx, 0); - SET_TTBR0(base, ctx, 0); - SET_TTBR1(base, ctx, 0); - SET_TTBCR(base, ctx, 0); - SET_BFBCR(base, ctx, 0); - SET_PAR(base, ctx, 0); - SET_FAR(base, ctx, 0); - SET_CTX_TLBIALL(base, ctx, 0); - SET_TLBFLPTER(base, ctx, 0); - SET_TLBSLPTER(base, ctx, 0); - SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); - SET_CONTEXTIDR(base, ctx, 0); - } -} - -static int msm_iommu_probe(struct platform_device *pdev) -{ - struct resource *r; - struct clk *iommu_clk; - struct clk *iommu_pclk; - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev); - void __iomem *regs_base; - int ret, irq, par; - - if (pdev->id == -1) { - msm_iommu_root_dev = pdev; - return 0; - } - - drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); - - if (!drvdata) { - ret = -ENOMEM; - goto fail; - } - - if (!iommu_dev) { - ret = -ENODEV; - goto fail; - } - - iommu_pclk = clk_get(NULL, "smmu_pclk"); - if (IS_ERR(iommu_pclk)) { - ret = -ENODEV; - goto fail; - } - - ret = clk_prepare_enable(iommu_pclk); - if (ret) - goto fail_enable; - - iommu_clk = clk_get(&pdev->dev, "iommu_clk"); - - if (!IS_ERR(iommu_clk)) { - if (clk_get_rate(iommu_clk) == 0) - clk_set_rate(iommu_clk, 1); - - ret = clk_prepare_enable(iommu_clk); - if (ret) { - clk_put(iommu_clk); - goto fail_pclk; - } - } else - iommu_clk = NULL; - - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); - regs_base = devm_ioremap_resource(&pdev->dev, r); - if (IS_ERR(regs_base)) { - ret = PTR_ERR(regs_base); - goto fail_clk; - } - - irq = platform_get_irq_byname(pdev, "secure_irq"); - if (irq < 0) { - ret = -ENODEV; - goto fail_clk; - } - - msm_iommu_reset(regs_base, iommu_dev->ncb); - - SET_M(regs_base, 0, 1); - SET_PAR(regs_base, 0, 0); - SET_V2PCFG(regs_base, 0, 1); - SET_V2PPR(regs_base, 0, 0); - par = GET_PAR(regs_base, 0); - SET_V2PCFG(regs_base, 0, 0); - SET_M(regs_base, 0, 0); - - if (!par) { - pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); - ret = -ENODEV; - goto fail_clk; - } - - ret = request_irq(irq, msm_iommu_fault_handler, 0, - "msm_iommu_secure_irpt_handler", drvdata); - if (ret) { - pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); - goto fail_clk; - } - - - drvdata->pclk = iommu_pclk; - drvdata->clk = iommu_clk; - drvdata->base = regs_base; - drvdata->irq = irq; - drvdata->ncb = iommu_dev->ncb; - - pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", - iommu_dev->name, regs_base, irq, iommu_dev->ncb); - - platform_set_drvdata(pdev, drvdata); - - clk_disable(iommu_clk); - - clk_disable(iommu_pclk); - - return 0; -fail_clk: - if (iommu_clk) { - clk_disable(iommu_clk); - clk_put(iommu_clk); - } -fail_pclk: - clk_disable_unprepare(iommu_pclk); -fail_enable: - clk_put(iommu_pclk); -fail: - kfree(drvdata); - return ret; -} - -static int msm_iommu_remove(struct platform_device *pdev) -{ - struct msm_iommu_drvdata *drv = NULL; - - drv = platform_get_drvdata(pdev); - if (drv) { - if (drv->clk) { - clk_unprepare(drv->clk); - clk_put(drv->clk); - } - clk_unprepare(drv->pclk); - clk_put(drv->pclk); - memset(drv, 0, sizeof(*drv)); - kfree(drv); - } - return 0; -} - -static int msm_iommu_ctx_probe(struct platform_device *pdev) -{ - struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev); - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - int i, ret; - - if (!c || !pdev->dev.parent) - return -EINVAL; - - drvdata = dev_get_drvdata(pdev->dev.parent); - if (!drvdata) - return -ENODEV; - - ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL); - if (!ctx_drvdata) - return -ENOMEM; - - ctx_drvdata->num = c->num; - ctx_drvdata->pdev = pdev; - - INIT_LIST_HEAD(&ctx_drvdata->attached_elm); - platform_set_drvdata(pdev, ctx_drvdata); - - ret = clk_prepare_enable(drvdata->pclk); - if (ret) - goto fail; - - if (drvdata->clk) { - ret = clk_prepare_enable(drvdata->clk); - if (ret) { - clk_disable_unprepare(drvdata->pclk); - goto fail; - } - } - - /* Program the M2V tables for this context */ - for (i = 0; i < MAX_NUM_MIDS; i++) { - int mid = c->mids[i]; - if (mid == -1) - break; - - SET_M2VCBR_N(drvdata->base, mid, 0); - SET_CBACR_N(drvdata->base, c->num, 0); - - /* Set VMID = 0 */ - SET_VMID(drvdata->base, mid, 0); - - /* Set the context number for that MID to this context */ - SET_CBNDX(drvdata->base, mid, c->num); - - /* Set MID associated with this context bank to 0*/ - SET_CBVMID(drvdata->base, c->num, 0); - - /* Set the ASID for TLB tagging for this context */ - SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num); - - /* Set security bit override to be Non-secure */ - SET_NSCFG(drvdata->base, mid, 3); - } - - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); - - dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); - return 0; -fail: - kfree(ctx_drvdata); - return ret; -} - -static int msm_iommu_ctx_remove(struct platform_device *pdev) -{ - struct msm_iommu_ctx_drvdata *drv = NULL; - drv = platform_get_drvdata(pdev); - if (drv) { - memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata)); - kfree(drv); - } - return 0; -} - -static struct platform_driver msm_iommu_driver = { - .driver = { - .name = "msm_iommu", - }, - .probe = msm_iommu_probe, - .remove = msm_iommu_remove, -}; - -static struct platform_driver msm_iommu_ctx_driver = { - .driver = { - .name = "msm_iommu_ctx", - }, - .probe = msm_iommu_ctx_probe, - .remove = msm_iommu_ctx_remove, -}; - -static struct platform_driver * const drivers[] = { - &msm_iommu_driver, - &msm_iommu_ctx_driver, -}; - -static int __init msm_iommu_driver_init(void) -{ - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); -} - -static void __exit msm_iommu_driver_exit(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} - -subsys_initcall(msm_iommu_driver_init); -module_exit(msm_iommu_driver_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c3043d8754e3..b12c12d74c33 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -34,7 +34,7 @@ #include <dt-bindings/memory/mt8173-larb-port.h> #include <soc/mediatek/smi.h> -#include "io-pgtable.h" +#include "mtk_iommu.h" #define REG_MMU_PT_BASE_ADDR 0x000 @@ -93,20 +93,6 @@ #define MTK_PROTECT_PA_ALIGN 128 -struct mtk_iommu_suspend_reg { - u32 standard_axi_mode; - u32 dcm_dis; - u32 ctrl_reg; - u32 int_control0; - u32 int_main_control; -}; - -struct mtk_iommu_client_priv { - struct list_head client; - unsigned int mtk_m4u_id; - struct device *m4udev; -}; - struct mtk_iommu_domain { spinlock_t pgtlock; /* lock for page table */ @@ -116,19 +102,6 @@ struct mtk_iommu_domain { struct iommu_domain domain; }; -struct mtk_iommu_data { - void __iomem *base; - int irq; - struct device *dev; - struct clk *bclk; - phys_addr_t protect_base; /* protect memory base */ - struct mtk_iommu_suspend_reg reg; - struct mtk_iommu_domain *m4u_dom; - struct iommu_group *m4u_group; - struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ - bool enable_4GB; -}; - static struct iommu_ops mtk_iommu_ops; static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) @@ -455,7 +428,6 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) if (!dev->archdata.iommu) { /* Get the m4u device */ m4updev = of_find_device_by_node(args->np); - of_node_put(args->np); if (WARN_ON(!m4updev)) return -EINVAL; @@ -552,25 +524,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return 0; } -static int compare_of(struct device *dev, void *data) -{ - return dev->of_node == data; -} - -static int mtk_iommu_bind(struct device *dev) -{ - struct mtk_iommu_data *data = dev_get_drvdata(dev); - - return component_bind_all(dev, &data->smi_imu); -} - -static void mtk_iommu_unbind(struct device *dev) -{ - struct mtk_iommu_data *data = dev_get_drvdata(dev); - - component_unbind_all(dev, &data->smi_imu); -} - static const struct component_master_ops mtk_iommu_com_ops = { .bind = mtk_iommu_bind, .unbind = mtk_iommu_unbind, diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h new file mode 100644 index 000000000000..9ed0a8462ccf --- /dev/null +++ b/drivers/iommu/mtk_iommu.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2015-2016 MediaTek Inc. + * Author: Honghui Zhang <honghui.zhang@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MTK_IOMMU_H_ +#define _MTK_IOMMU_H_ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <soc/mediatek/smi.h> + +#include "io-pgtable.h" + +struct mtk_iommu_suspend_reg { + u32 standard_axi_mode; + u32 dcm_dis; + u32 ctrl_reg; + u32 int_control0; + u32 int_main_control; +}; + +struct mtk_iommu_client_priv { + struct list_head client; + unsigned int mtk_m4u_id; + struct device *m4udev; +}; + +struct mtk_iommu_domain; + +struct mtk_iommu_data { + void __iomem *base; + int irq; + struct device *dev; + struct clk *bclk; + phys_addr_t protect_base; /* protect memory base */ + struct mtk_iommu_suspend_reg reg; + struct mtk_iommu_domain *m4u_dom; + struct iommu_group *m4u_group; + struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ + bool enable_4GB; +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int mtk_iommu_bind(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + + return component_bind_all(dev, &data->smi_imu); +} + +static void mtk_iommu_unbind(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + + component_unbind_all(dev, &data->smi_imu); +} + +#endif diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c new file mode 100644 index 000000000000..b8aeb0768483 --- /dev/null +++ b/drivers/iommu/mtk_iommu_v1.c @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2015-2016 MediaTek Inc. + * Author: Honghui Zhang <honghui.zhang@mediatek.com> + * + * Based on driver/iommu/mtk_iommu.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/bootmem.h> +#include <linux/bug.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/device.h> +#include <linux/dma-iommu.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> +#include <linux/kmemleak.h> +#include <linux/list.h> +#include <linux/of_address.h> +#include <linux/of_iommu.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <asm/barrier.h> +#include <asm/dma-iommu.h> +#include <linux/module.h> +#include <dt-bindings/memory/mt2701-larb-port.h> +#include <soc/mediatek/smi.h> +#include "mtk_iommu.h" + +#define REG_MMU_PT_BASE_ADDR 0x000 + +#define F_ALL_INVLD 0x2 +#define F_MMU_INV_RANGE 0x1 +#define F_INVLD_EN0 BIT(0) +#define F_INVLD_EN1 BIT(1) + +#define F_MMU_FAULT_VA_MSK 0xfffff000 +#define MTK_PROTECT_PA_ALIGN 128 + +#define REG_MMU_CTRL_REG 0x210 +#define F_MMU_CTRL_COHERENT_EN BIT(8) +#define REG_MMU_IVRP_PADDR 0x214 +#define REG_MMU_INT_CONTROL 0x220 +#define F_INT_TRANSLATION_FAULT BIT(0) +#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) +#define F_INT_INVALID_PA_FAULT BIT(2) +#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) +#define F_INT_TABLE_WALK_FAULT BIT(4) +#define F_INT_TLB_MISS_FAULT BIT(5) +#define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6) +#define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7) + +#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) +#define F_INT_CLR_BIT BIT(12) + +#define REG_MMU_FAULT_ST 0x224 +#define REG_MMU_FAULT_VA 0x228 +#define REG_MMU_INVLD_PA 0x22C +#define REG_MMU_INT_ID 0x388 +#define REG_MMU_INVALIDATE 0x5c0 +#define REG_MMU_INVLD_START_A 0x5c4 +#define REG_MMU_INVLD_END_A 0x5c8 + +#define REG_MMU_INV_SEL 0x5d8 +#define REG_MMU_STANDARD_AXI_MODE 0x5e8 + +#define REG_MMU_DCM 0x5f0 +#define F_MMU_DCM_ON BIT(1) +#define REG_MMU_CPE_DONE 0x60c +#define F_DESC_VALID 0x2 +#define F_DESC_NONSEC BIT(3) +#define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7)) +#define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF) +/* MTK generation one iommu HW only support 4K size mapping */ +#define MT2701_IOMMU_PAGE_SHIFT 12 +#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) + +/* + * MTK m4u support 4GB iova address space, and only support 4K page + * mapping. So the pagetable size should be exactly as 4M. + */ +#define M2701_IOMMU_PGT_SIZE SZ_4M + +struct mtk_iommu_domain { + spinlock_t pgtlock; /* lock for page table */ + struct iommu_domain domain; + u32 *pgt_va; + dma_addr_t pgt_pa; + struct mtk_iommu_data *data; +}; + +static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct mtk_iommu_domain, domain); +} + +static const int mt2701_m4u_in_larb[] = { + LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, + LARB2_PORT_OFFSET, LARB3_PORT_OFFSET +}; + +static inline int mt2701_m4u_to_larb(int id) +{ + int i; + + for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) + if ((id) >= mt2701_m4u_in_larb[i]) + return i; + + return 0; +} + +static inline int mt2701_m4u_to_port(int id) +{ + int larb = mt2701_m4u_to_larb(id); + + return id - mt2701_m4u_in_larb[larb]; +} + +static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) +{ + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, + data->base + REG_MMU_INV_SEL); + writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); + wmb(); /* Make sure the tlb flush all done */ +} + +static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, + unsigned long iova, size_t size) +{ + int ret; + u32 tmp; + + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, + data->base + REG_MMU_INV_SEL); + writel_relaxed(iova & F_MMU_FAULT_VA_MSK, + data->base + REG_MMU_INVLD_START_A); + writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, + data->base + REG_MMU_INVLD_END_A); + writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); + + ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, + tmp, tmp != 0, 10, 100000); + if (ret) { + dev_warn(data->dev, + "Partial TLB flush timed out, falling back to full flush\n"); + mtk_iommu_tlb_flush_all(data); + } + /* Clear the CPE status */ + writel_relaxed(0, data->base + REG_MMU_CPE_DONE); +} + +static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) +{ + struct mtk_iommu_data *data = dev_id; + struct mtk_iommu_domain *dom = data->m4u_dom; + u32 int_state, regval, fault_iova, fault_pa; + unsigned int fault_larb, fault_port; + + /* Read error information from registers */ + int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); + fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); + + fault_iova &= F_MMU_FAULT_VA_MSK; + fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); + regval = readl_relaxed(data->base + REG_MMU_INT_ID); + fault_larb = MT2701_M4U_TF_LARB(regval); + fault_port = MT2701_M4U_TF_PORT(regval); + + /* + * MTK v1 iommu HW could not determine whether the fault is read or + * write fault, report as read fault. + */ + if (report_iommu_fault(&dom->domain, data->dev, fault_iova, + IOMMU_FAULT_READ)) + dev_err_ratelimited(data->dev, + "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n", + int_state, fault_iova, fault_pa, + fault_larb, fault_port); + + /* Interrupt clear */ + regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); + regval |= F_INT_CLR_BIT; + writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); + + mtk_iommu_tlb_flush_all(data); + + return IRQ_HANDLED; +} + +static void mtk_iommu_config(struct mtk_iommu_data *data, + struct device *dev, bool enable) +{ + struct mtk_iommu_client_priv *head, *cur, *next; + struct mtk_smi_larb_iommu *larb_mmu; + unsigned int larbid, portid; + + head = dev->archdata.iommu; + list_for_each_entry_safe(cur, next, &head->client, client) { + larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id); + portid = mt2701_m4u_to_port(cur->mtk_m4u_id); + larb_mmu = &data->smi_imu.larb_imu[larbid]; + + dev_dbg(dev, "%s iommu port: %d\n", + enable ? "enable" : "disable", portid); + + if (enable) + larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); + else + larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); + } +} + +static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) +{ + struct mtk_iommu_domain *dom = data->m4u_dom; + + spin_lock_init(&dom->pgtlock); + + dom->pgt_va = dma_zalloc_coherent(data->dev, + M2701_IOMMU_PGT_SIZE, + &dom->pgt_pa, GFP_KERNEL); + if (!dom->pgt_va) + return -ENOMEM; + + writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); + + dom->data = data; + + return 0; +} + +static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) +{ + struct mtk_iommu_domain *dom; + + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + dom = kzalloc(sizeof(*dom), GFP_KERNEL); + if (!dom) + return NULL; + + return &dom->domain; +} + +static void mtk_iommu_domain_free(struct iommu_domain *domain) +{ + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_data *data = dom->data; + + dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, + dom->pgt_va, dom->pgt_pa); + kfree(to_mtk_domain(domain)); +} + +static int mtk_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_client_priv *priv = dev->archdata.iommu; + struct mtk_iommu_data *data; + int ret; + + if (!priv) + return -ENODEV; + + data = dev_get_drvdata(priv->m4udev); + if (!data->m4u_dom) { + data->m4u_dom = dom; + ret = mtk_iommu_domain_finalise(data); + if (ret) { + data->m4u_dom = NULL; + return ret; + } + } + + mtk_iommu_config(data, dev, true); + return 0; +} + +static void mtk_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct mtk_iommu_client_priv *priv = dev->archdata.iommu; + struct mtk_iommu_data *data; + + if (!priv) + return; + + data = dev_get_drvdata(priv->m4udev); + mtk_iommu_config(data, dev, false); +} + +static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; + unsigned long flags; + unsigned int i; + u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); + u32 pabase = (u32)paddr; + int map_size = 0; + + spin_lock_irqsave(&dom->pgtlock, flags); + for (i = 0; i < page_num; i++) { + if (pgt_base_iova[i]) { + memset(pgt_base_iova, 0, i * sizeof(u32)); + break; + } + pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; + pabase += MT2701_IOMMU_PAGE_SIZE; + map_size += MT2701_IOMMU_PAGE_SIZE; + } + + spin_unlock_irqrestore(&dom->pgtlock, flags); + + mtk_iommu_tlb_flush_range(dom->data, iova, size); + + return map_size == size ? 0 : -EEXIST; +} + +static size_t mtk_iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + unsigned long flags; + u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); + unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; + + spin_lock_irqsave(&dom->pgtlock, flags); + memset(pgt_base_iova, 0, page_num * sizeof(u32)); + spin_unlock_irqrestore(&dom->pgtlock, flags); + + mtk_iommu_tlb_flush_range(dom->data, iova, size); + + return size; +} + +static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct mtk_iommu_domain *dom = to_mtk_domain(domain); + unsigned long flags; + phys_addr_t pa; + + spin_lock_irqsave(&dom->pgtlock, flags); + pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); + pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); + spin_unlock_irqrestore(&dom->pgtlock, flags); + + return pa; +} + +/* + * MTK generation one iommu HW only support one iommu domain, and all the client + * sharing the same iova address space. + */ +static int mtk_iommu_create_mapping(struct device *dev, + struct of_phandle_args *args) +{ + struct mtk_iommu_client_priv *head, *priv, *next; + struct platform_device *m4updev; + struct dma_iommu_mapping *mtk_mapping; + struct device *m4udev; + int ret; + + if (args->args_count != 1) { + dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", + args->args_count); + return -EINVAL; + } + + if (!dev->archdata.iommu) { + /* Get the m4u device */ + m4updev = of_find_device_by_node(args->np); + if (WARN_ON(!m4updev)) + return -EINVAL; + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOMEM; + + dev->archdata.iommu = head; + INIT_LIST_HEAD(&head->client); + head->m4udev = &m4updev->dev; + } else { + head = dev->archdata.iommu; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_free_mem; + } + priv->mtk_m4u_id = args->args[0]; + list_add_tail(&priv->client, &head->client); + + m4udev = head->m4udev; + mtk_mapping = m4udev->archdata.iommu; + if (!mtk_mapping) { + /* MTK iommu support 4GB iova address space. */ + mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, + 0, 1ULL << 32); + if (IS_ERR(mtk_mapping)) { + ret = PTR_ERR(mtk_mapping); + goto err_free_mem; + } + m4udev->archdata.iommu = mtk_mapping; + } + + ret = arm_iommu_attach_device(dev, mtk_mapping); + if (ret) + goto err_release_mapping; + + return 0; + +err_release_mapping: + arm_iommu_release_mapping(mtk_mapping); + m4udev->archdata.iommu = NULL; +err_free_mem: + list_for_each_entry_safe(priv, next, &head->client, client) + kfree(priv); + kfree(head); + dev->archdata.iommu = NULL; + return ret; +} + +static int mtk_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + struct of_phandle_args iommu_spec; + struct of_phandle_iterator it; + int err; + + of_for_each_phandle(&it, err, dev->of_node, "iommus", + "#iommu-cells", 0) { + int count = of_phandle_iterator_args(&it, iommu_spec.args, + MAX_PHANDLE_ARGS); + iommu_spec.np = of_node_get(it.node); + iommu_spec.args_count = count; + + mtk_iommu_create_mapping(dev, &iommu_spec); + of_node_put(iommu_spec.np); + } + + if (!dev->archdata.iommu) /* Not a iommu client device */ + return -ENODEV; + + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + iommu_group_put(group); + return 0; +} + +static void mtk_iommu_remove_device(struct device *dev) +{ + struct mtk_iommu_client_priv *head, *cur, *next; + + head = dev->archdata.iommu; + if (!head) + return; + + list_for_each_entry_safe(cur, next, &head->client, client) { + list_del(&cur->client); + kfree(cur); + } + kfree(head); + dev->archdata.iommu = NULL; + + iommu_group_remove_device(dev); +} + +static struct iommu_group *mtk_iommu_device_group(struct device *dev) +{ + struct mtk_iommu_data *data; + struct mtk_iommu_client_priv *priv; + + priv = dev->archdata.iommu; + if (!priv) + return ERR_PTR(-ENODEV); + + /* All the client devices are in the same m4u iommu-group */ + data = dev_get_drvdata(priv->m4udev); + if (!data->m4u_group) { + data->m4u_group = iommu_group_alloc(); + if (IS_ERR(data->m4u_group)) + dev_err(dev, "Failed to allocate M4U IOMMU group\n"); + } + return data->m4u_group; +} + +static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) +{ + u32 regval; + int ret; + + ret = clk_prepare_enable(data->bclk); + if (ret) { + dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); + return ret; + } + + regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2); + writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); + + regval = F_INT_TRANSLATION_FAULT | + F_INT_MAIN_MULTI_HIT_FAULT | + F_INT_INVALID_PA_FAULT | + F_INT_ENTRY_REPLACEMENT_FAULT | + F_INT_TABLE_WALK_FAULT | + F_INT_TLB_MISS_FAULT | + F_INT_PFH_DMA_FIFO_OVERFLOW | + F_INT_MISS_DMA_FIFO_OVERFLOW; + writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); + + /* protect memory,hw will write here while translation fault */ + writel_relaxed(data->protect_base, + data->base + REG_MMU_IVRP_PADDR); + + writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); + + if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, + dev_name(data->dev), (void *)data)) { + writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); + clk_disable_unprepare(data->bclk); + dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); + return -ENODEV; + } + + return 0; +} + +static struct iommu_ops mtk_iommu_ops = { + .domain_alloc = mtk_iommu_domain_alloc, + .domain_free = mtk_iommu_domain_free, + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = mtk_iommu_iova_to_phys, + .add_device = mtk_iommu_add_device, + .remove_device = mtk_iommu_remove_device, + .device_group = mtk_iommu_device_group, + .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, +}; + +static const struct of_device_id mtk_iommu_of_ids[] = { + { .compatible = "mediatek,mt2701-m4u", }, + {} +}; + +static const struct component_master_ops mtk_iommu_com_ops = { + .bind = mtk_iommu_bind, + .unbind = mtk_iommu_unbind, +}; + +static int mtk_iommu_probe(struct platform_device *pdev) +{ + struct mtk_iommu_data *data; + struct device *dev = &pdev->dev; + struct resource *res; + struct component_match *match = NULL; + struct of_phandle_args larb_spec; + struct of_phandle_iterator it; + void *protect; + int larb_nr, ret, err; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = dev; + + /* Protect memory. HW will access here while translation fault.*/ + protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, + GFP_KERNEL | GFP_DMA); + if (!protect) + return -ENOMEM; + data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(dev, res); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + data->bclk = devm_clk_get(dev, "bclk"); + if (IS_ERR(data->bclk)) + return PTR_ERR(data->bclk); + + larb_nr = 0; + of_for_each_phandle(&it, err, dev->of_node, + "mediatek,larbs", NULL, 0) { + struct platform_device *plarbdev; + int count = of_phandle_iterator_args(&it, larb_spec.args, + MAX_PHANDLE_ARGS); + + if (count) + continue; + + larb_spec.np = of_node_get(it.node); + if (!of_device_is_available(larb_spec.np)) + continue; + + plarbdev = of_find_device_by_node(larb_spec.np); + of_node_put(larb_spec.np); + if (!plarbdev) { + plarbdev = of_platform_device_create( + larb_spec.np, NULL, + platform_bus_type.dev_root); + if (!plarbdev) + return -EPROBE_DEFER; + } + + data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; + component_match_add(dev, &match, compare_of, larb_spec.np); + larb_nr++; + } + + data->smi_imu.larb_nr = larb_nr; + + platform_set_drvdata(pdev, data); + + ret = mtk_iommu_hw_init(data); + if (ret) + return ret; + + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); + + return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); +} + +static int mtk_iommu_remove(struct platform_device *pdev) +{ + struct mtk_iommu_data *data = platform_get_drvdata(pdev); + + if (iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, NULL); + + clk_disable_unprepare(data->bclk); + devm_free_irq(&pdev->dev, data->irq, data); + component_master_del(&pdev->dev, &mtk_iommu_com_ops); + return 0; +} + +static int __maybe_unused mtk_iommu_suspend(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + struct mtk_iommu_suspend_reg *reg = &data->reg; + void __iomem *base = data->base; + + reg->standard_axi_mode = readl_relaxed(base + + REG_MMU_STANDARD_AXI_MODE); + reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); + reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); + reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); + return 0; +} + +static int __maybe_unused mtk_iommu_resume(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + struct mtk_iommu_suspend_reg *reg = &data->reg; + void __iomem *base = data->base; + + writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); + writel_relaxed(reg->standard_axi_mode, + base + REG_MMU_STANDARD_AXI_MODE); + writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); + writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); + writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); + writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); + return 0; +} + +static const struct dev_pm_ops mtk_iommu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) +}; + +static struct platform_driver mtk_iommu_driver = { + .probe = mtk_iommu_probe, + .remove = mtk_iommu_remove, + .driver = { + .name = "mtk-iommu", + .of_match_table = mtk_iommu_of_ids, + .pm = &mtk_iommu_pm_ops, + } +}; + +static int __init m4u_init(void) +{ + return platform_driver_register(&mtk_iommu_driver); +} + +static void __exit m4u_exit(void) +{ + return platform_driver_unregister(&mtk_iommu_driver); +} + +subsys_initcall(m4u_init); +module_exit(m4u_exit); + +MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations"); +MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index af499aea0a1a..57f23eaaa2f9 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -174,7 +174,7 @@ err_put_node: return NULL; } -void __init of_iommu_init(void) +static int __init of_iommu_init(void) { struct device_node *np; const struct of_device_id *match, *matches = &__iommu_of_table; @@ -186,4 +186,7 @@ void __init of_iommu_init(void) pr_err("Failed to initialise IOMMU %s\n", of_node_full_name(np)); } + + return 0; } +postcore_initcall_sync(of_iommu_init); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 25b4627cb57f..9afcbf79f0b0 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -4,11 +4,10 @@ * published by the Free Software Foundation. */ -#include <asm/cacheflush.h> -#include <asm/pgtable.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/dma-iommu.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -77,7 +76,9 @@ struct rk_iommu_domain { struct list_head iommus; + struct platform_device *pdev; u32 *dt; /* page directory table */ + dma_addr_t dt_dma; spinlock_t iommus_lock; /* lock for iommus list */ spinlock_t dt_lock; /* lock for modifying page directory table */ @@ -93,14 +94,12 @@ struct rk_iommu { struct iommu_domain *domain; /* domain to which iommu is attached */ }; -static inline void rk_table_flush(u32 *va, unsigned int count) +static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, + unsigned int count) { - phys_addr_t pa_start = virt_to_phys(va); - phys_addr_t pa_end = virt_to_phys(va + count); - size_t size = pa_end - pa_start; + size_t size = count * sizeof(u32); /* count of u32 entry */ - __cpuc_flush_dcache_area(va, size); - outer_flush_range(pa_start, pa_end); + dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE); } static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) @@ -183,10 +182,9 @@ static inline bool rk_dte_is_pt_valid(u32 dte) return dte & RK_DTE_PT_VALID; } -static u32 rk_mk_dte(u32 *pt) +static inline u32 rk_mk_dte(dma_addr_t pt_dma) { - phys_addr_t pt_phys = virt_to_phys(pt); - return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; + return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; } /* @@ -603,13 +601,16 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, dma_addr_t iova) { + struct device *dev = &rk_domain->pdev->dev; u32 *page_table, *dte_addr; - u32 dte; + u32 dte_index, dte; phys_addr_t pt_phys; + dma_addr_t pt_dma; assert_spin_locked(&rk_domain->dt_lock); - dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; + dte_index = rk_iova_dte_index(iova); + dte_addr = &rk_domain->dt[dte_index]; dte = *dte_addr; if (rk_dte_is_pt_valid(dte)) goto done; @@ -618,19 +619,27 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (!page_table) return ERR_PTR(-ENOMEM); - dte = rk_mk_dte(page_table); - *dte_addr = dte; + pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pt_dma)) { + dev_err(dev, "DMA mapping error while allocating page table\n"); + free_page((unsigned long)page_table); + return ERR_PTR(-ENOMEM); + } - rk_table_flush(page_table, NUM_PT_ENTRIES); - rk_table_flush(dte_addr, 1); + dte = rk_mk_dte(pt_dma); + *dte_addr = dte; + rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); + rk_table_flush(rk_domain, + rk_domain->dt_dma + dte_index * sizeof(u32), 1); done: pt_phys = rk_dte_pt_address(dte); return (u32 *)phys_to_virt(pt_phys); } static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, - u32 *pte_addr, dma_addr_t iova, size_t size) + u32 *pte_addr, dma_addr_t pte_dma, + size_t size) { unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; @@ -645,14 +654,14 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, pte_addr[pte_count] = rk_mk_pte_invalid(pte); } - rk_table_flush(pte_addr, pte_count); + rk_table_flush(rk_domain, pte_dma, pte_count); return pte_count * SPAGE_SIZE; } static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, - dma_addr_t iova, phys_addr_t paddr, size_t size, - int prot) + dma_addr_t pte_dma, dma_addr_t iova, + phys_addr_t paddr, size_t size, int prot) { unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; @@ -671,7 +680,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, paddr += SPAGE_SIZE; } - rk_table_flush(pte_addr, pte_count); + rk_table_flush(rk_domain, pte_dma, pte_total); /* * Zap the first and last iova to evict from iotlb any previously @@ -684,7 +693,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, return 0; unwind: /* Unmap the range of iovas that we just mapped */ - rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); + rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, + pte_count * SPAGE_SIZE); iova += pte_count * SPAGE_SIZE; page_phys = rk_pte_page_address(pte_addr[pte_count]); @@ -699,8 +709,9 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; - dma_addr_t iova = (dma_addr_t)_iova; + dma_addr_t pte_dma, iova = (dma_addr_t)_iova; u32 *page_table, *pte_addr; + u32 dte_index, pte_index; int ret; spin_lock_irqsave(&rk_domain->dt_lock, flags); @@ -718,8 +729,13 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, return PTR_ERR(page_table); } - pte_addr = &page_table[rk_iova_pte_index(iova)]; - ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); + dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; + pte_index = rk_iova_pte_index(iova); + pte_addr = &page_table[pte_index]; + pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); + ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, + paddr, size, prot); + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); return ret; @@ -730,7 +746,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; - dma_addr_t iova = (dma_addr_t)_iova; + dma_addr_t pte_dma, iova = (dma_addr_t)_iova; phys_addr_t pt_phys; u32 dte; u32 *pte_addr; @@ -754,7 +770,8 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, pt_phys = rk_dte_pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); + pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); spin_unlock_irqrestore(&rk_domain->dt_lock, flags); @@ -787,7 +804,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; int ret, i; - phys_addr_t dte_addr; /* * Allow 'virtual devices' (e.g., drm) to attach to domain. @@ -807,14 +823,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, iommu->domain = domain; - ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, + ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); if (ret) return ret; - dte_addr = virt_to_phys(rk_domain->dt); for (i = 0; i < iommu->num_mmu; i++) { - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, + rk_domain->dt_dma); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); } @@ -860,7 +876,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, } rk_iommu_disable_stall(iommu); - devm_free_irq(dev, iommu->irq, iommu); + devm_free_irq(iommu->dev, iommu->irq, iommu); iommu->domain = NULL; @@ -870,14 +886,30 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) { struct rk_iommu_domain *rk_domain; + struct platform_device *pdev; + struct device *iommu_dev; - if (type != IOMMU_DOMAIN_UNMANAGED) + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) return NULL; - rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); - if (!rk_domain) + /* Register a pdev per domain, so DMA API can base on this *dev + * even some virtual master doesn't have an iommu slave + */ + pdev = platform_device_register_simple("rk_iommu_domain", + PLATFORM_DEVID_AUTO, NULL, 0); + if (IS_ERR(pdev)) return NULL; + rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL); + if (!rk_domain) + goto err_unreg_pdev; + + rk_domain->pdev = pdev; + + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&rk_domain->domain)) + goto err_unreg_pdev; + /* * rk32xx iommus use a 2 level pagetable. * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. @@ -885,18 +917,36 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) */ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); if (!rk_domain->dt) - goto err_dt; + goto err_put_cookie; + + iommu_dev = &pdev->dev; + rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt, + SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) { + dev_err(iommu_dev, "DMA map error for DT\n"); + goto err_free_dt; + } - rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); + rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); spin_lock_init(&rk_domain->iommus_lock); spin_lock_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); + rk_domain->domain.geometry.aperture_start = 0; + rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); + rk_domain->domain.geometry.force_aperture = true; + return &rk_domain->domain; -err_dt: - kfree(rk_domain); +err_free_dt: + free_page((unsigned long)rk_domain->dt); +err_put_cookie: + if (type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&rk_domain->domain); +err_unreg_pdev: + platform_device_unregister(pdev); + return NULL; } @@ -912,12 +962,20 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (rk_dte_is_pt_valid(dte)) { phys_addr_t pt_phys = rk_dte_pt_address(dte); u32 *page_table = phys_to_virt(pt_phys); + dma_unmap_single(&rk_domain->pdev->dev, pt_phys, + SPAGE_SIZE, DMA_TO_DEVICE); free_page((unsigned long)page_table); } } + dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma, + SPAGE_SIZE, DMA_TO_DEVICE); free_page((unsigned long)rk_domain->dt); - kfree(rk_domain); + + if (domain->type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&rk_domain->domain); + + platform_device_unregister(rk_domain->pdev); } static bool rk_iommu_is_dev_iommu_master(struct device *dev) @@ -1022,17 +1080,43 @@ static const struct iommu_ops rk_iommu_ops = { .detach_dev = rk_iommu_detach_device, .map = rk_iommu_map, .unmap = rk_iommu_unmap, + .map_sg = default_iommu_map_sg, .add_device = rk_iommu_add_device, .remove_device = rk_iommu_remove_device, .iova_to_phys = rk_iommu_iova_to_phys, .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, }; +static int rk_iommu_domain_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + + /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */ + arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false); + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + return 0; +} + +static struct platform_driver rk_iommu_domain_driver = { + .probe = rk_iommu_domain_probe, + .driver = { + .name = "rk_iommu_domain", + }, +}; + static int rk_iommu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rk_iommu *iommu; struct resource *res; + int num_res = pdev->num_resources; int i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); @@ -1042,12 +1126,13 @@ static int rk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; - iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu, + + iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res, GFP_KERNEL); if (!iommu->bases) return -ENOMEM; - for (i = 0; i < pdev->num_resources; i++) { + for (i = 0; i < num_res; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) continue; @@ -1103,11 +1188,19 @@ static int __init rk_iommu_init(void) if (ret) return ret; - return platform_driver_register(&rk_iommu_driver); + ret = platform_driver_register(&rk_iommu_domain_driver); + if (ret) + return ret; + + ret = platform_driver_register(&rk_iommu_driver); + if (ret) + platform_driver_unregister(&rk_iommu_domain_driver); + return ret; } static void __exit rk_iommu_exit(void) { platform_driver_unregister(&rk_iommu_driver); + platform_driver_unregister(&rk_iommu_domain_driver); } subsys_initcall(rk_iommu_init); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 5495a5ba8039..7f8728984f44 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -21,9 +21,9 @@ config ARM_GIC_MAX_NR config ARM_GIC_V2M bool - depends on ARM_GIC - depends on PCI && PCI_MSI - select PCI_MSI_IRQ_DOMAIN + depends on PCI + select ARM_GIC + select PCI_MSI config GIC_NON_BANKED bool @@ -37,7 +37,8 @@ config ARM_GIC_V3 config ARM_GIC_V3_ITS bool - select PCI_MSI_IRQ_DOMAIN + depends on PCI + depends on PCI_MSI config ARM_NVIC bool @@ -62,13 +63,13 @@ config ARM_VIC_NR config ARMADA_370_XP_IRQ bool select GENERIC_IRQ_CHIP - select PCI_MSI_IRQ_DOMAIN if PCI_MSI + select PCI_MSI if PCI config ALPINE_MSI bool - depends on PCI && PCI_MSI + depends on PCI + select PCI_MSI select GENERIC_IRQ_CHIP - select PCI_MSI_IRQ_DOMAIN config ATMEL_AIC_IRQ bool @@ -117,7 +118,6 @@ config HISILICON_IRQ_MBIGEN bool select ARM_GIC_V3 select ARM_GIC_V3_ITS - select GENERIC_MSI_IRQ_DOMAIN config IMGPDC_IRQ bool @@ -250,12 +250,10 @@ config IRQ_MXS config MVEBU_ODMI bool - select GENERIC_MSI_IRQ_DOMAIN config LS_SCFG_MSI def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE depends on PCI && PCI_MSI - select PCI_MSI_IRQ_DOMAIN config PARTITION_PERCPU bool diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 7c42b1d13faf..8bcee65a0b8c 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask, ARMADA_370_XP_SW_TRIG_INT_OFFS); } -static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int armada_xp_mpic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); - } - - return NOTIFY_OK; + armada_xp_mpic_perf_init(); + armada_xp_mpic_smp_cpu_init(); + return 0; } -static struct notifier_block armada_370_xp_mpic_cpu_notifier = { - .notifier_call = armada_xp_mpic_secondary_init, - .priority = 100, -}; - -static int mpic_cascaded_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int mpic_cascaded_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { - armada_xp_mpic_perf_init(); - enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); - } - - return NOTIFY_OK; + armada_xp_mpic_perf_init(); + enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + return 0; } - -static struct notifier_block mpic_cascaded_cpu_notifier = { - .notifier_call = mpic_cascaded_secondary_init, - .priority = 100, -}; -#endif /* CONFIG_SMP */ +#endif static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { .map = armada_370_xp_mpic_irq_map, @@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, set_handle_irq(armada_370_xp_handle_irq); #ifdef CONFIG_SMP set_smp_cross_call(armada_mpic_send_doorbell); - register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, + "AP_IRQ_ARMADA_XP_STARTING", + armada_xp_mpic_starting_cpu, NULL); #endif } else { #ifdef CONFIG_SMP - register_cpu_notifier(&mpic_cascaded_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING, + "AP_IRQ_ARMADA_CASC_STARTING", + mpic_cascaded_starting_cpu, NULL); #endif irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index df1949c0aa23..d96b2c947e74 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, } } -/* Unmasks the IPI on the CPU when it's online. */ -static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int bcm2836_cpu_starting(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0; - unsigned int mailbox = 0; - - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu); - else if (action == CPU_DYING) - bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu); - - return NOTIFY_OK; + bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, + cpu); + return 0; } -static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { - .notifier_call = bcm2836_arm_irqchip_cpu_notify, - .priority = 100, -}; +static int bcm2836_cpu_dying(unsigned int cpu) +{ + bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, + cpu); + return 0; +} #ifdef CONFIG_ARM static int __init bcm2836_smp_boot_secondary(unsigned int cpu, @@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void) { #ifdef CONFIG_SMP /* Unmask IPIs to the boot CPU. */ - bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, - CPU_STARTING, - (void *)(uintptr_t)smp_processor_id()); - register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); + cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING, + "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting, + bcm2836_cpu_dying); set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index 2223b3f15d68..f913f4db7ae1 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -234,5 +234,5 @@ static int __init clps711x_intc_init_dt(struct device_node *np, return _clps711x_intc_init(np, res.start, resource_size(&res)); } -IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt); +IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt); #endif diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2c5ba0e704bf..6fc56c3466b0 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -538,23 +538,13 @@ static void gic_cpu_init(void) } #ifdef CONFIG_SMP -static int gic_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) + +static int gic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - gic_cpu_init(); - return NOTIFY_OK; + gic_cpu_init(); + return 0; } -/* - * Notifier for enabling the GIC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block gic_cpu_notifier = { - .notifier_call = gic_secondary_init, - .priority = 100, -}; - static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { @@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) static void gic_smp_init(void) { set_smp_cross_call(gic_raise_softirq); - register_cpu_notifier(&gic_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING, + "AP_IRQ_GICV3_STARTING", gic_starting_cpu, + NULL); } static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1de07eb5839c..c2cab572c511 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, return -EINVAL; } -#ifdef CONFIG_SMP -static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int gic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - gic_cpu_init(&gic_data[0]); - return NOTIFY_OK; + gic_cpu_init(&gic_data[0]); + return 0; } -/* - * Notifier for enabling the GIC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block gic_cpu_notifier = { - .notifier_call = gic_secondary_init, - .priority = 100, -}; -#endif - static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -1177,8 +1164,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, gic_cpu_map[i] = 0xff; #ifdef CONFIG_SMP set_smp_cross_call(gic_raise_softirq); - register_cpu_notifier(&gic_cpu_notifier); #endif + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "AP_IRQ_GIC_STARTING", + gic_starting_cpu, NULL); set_handle_irq(gic_handle_irq); if (static_key_true(&supports_deactivate)) pr_info("GIC: Using split EOI/Deactivate mode\n"); diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 9e25d8ce08e5..021b0e0833c1 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, return ret; } -#ifdef CONFIG_SMP -static int hip04_irq_secondary_init(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static int hip04_irq_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - hip04_irq_cpu_init(&hip04_data); - return NOTIFY_OK; + hip04_irq_cpu_init(&hip04_data); + return 0; } -/* - * Notifier for enabling the INTC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block hip04_irq_cpu_notifier = { - .notifier_call = hip04_irq_secondary_init, - .priority = 100, -}; -#endif - static const struct irq_domain_ops hip04_irq_domain_ops = { .map = hip04_irq_domain_map, .xlate = hip04_irq_domain_xlate, @@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent) #ifdef CONFIG_SMP set_smp_cross_call(hip04_raise_softirq); - register_cpu_notifier(&hip04_irq_cpu_notifier); #endif set_handle_irq(hip04_handle_irq); hip04_irq_dist_init(&hip04_data); - hip04_irq_cpu_init(&hip04_data); - + cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING", + hip04_irq_starting_cpu, NULL); return 0; } IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c index dfb8bd390125..b2a98c7b521b 100644 --- a/drivers/leds/leds-powernv.c +++ b/drivers/leds/leds-powernv.c @@ -118,7 +118,7 @@ static int powernv_led_set(struct powernv_led_data *powernv_led, goto out_token; } - rc = be64_to_cpu(msg.params[1]); + rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n", __func__, rc); diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 938467fb82be..22f0634dd3fa 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = { .resume = ledtrig_cpu_syscore_resume, }; -static int ledtrig_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int ledtrig_online_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - ledtrig_cpu(CPU_LED_START); - break; - case CPU_DYING: - ledtrig_cpu(CPU_LED_STOP); - break; - } - - return NOTIFY_OK; + ledtrig_cpu(CPU_LED_START); + return 0; } - -static struct notifier_block ledtrig_cpu_nb = { - .notifier_call = ledtrig_cpu_notify, -}; +static int ledtrig_prepare_down_cpu(unsigned int cpu) +{ + ledtrig_cpu(CPU_LED_STOP); + return 0; +} static int __init ledtrig_cpu_init(void) { int cpu; + int ret; /* Supports up to 9999 cpu cores */ BUILD_BUG_ON(CONFIG_NR_CPUS > 9999); @@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void) } register_syscore_ops(&ledtrig_cpu_syscore_ops); - register_cpu_notifier(&ledtrig_cpu_nb); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING", + ledtrig_online_cpu, ledtrig_prepare_down_cpu); + if (ret < 0) + pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n", + ret); pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index d531f804455d..d6f72c826c1c 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -38,6 +38,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/slab.h> +#include <linux/memblock.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -99,6 +100,7 @@ static DEFINE_MUTEX(smu_mutex); static struct smu_device *smu; static DEFINE_MUTEX(smu_part_access); static int smu_irq_inited; +static unsigned long smu_cmdbuf_abs; static void smu_i2c_retry(unsigned long data); @@ -479,8 +481,13 @@ int __init smu_init (void) printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR); + /* + * SMU based G5s need some memory below 2Gb. Thankfully this is + * called at a time where memblock is still available. + */ + smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); if (smu_cmdbuf_abs == 0) { - printk(KERN_ERR "SMU: Command buffer not allocated !\n"); + printk(KERN_ERR "SMU: Command buffer allocation failed !\n"); ret = -EINVAL; goto fail_np; } diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 5305923752d2..97c372908e78 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -123,4 +123,13 @@ config XGENE_SLIMPRO_MBOX It is used to send short messages between ARM64-bit cores and the SLIMpro Management Engine, primarily for PM. Say Y here if you want to use the APM X-Gene SLIMpro IPCM support. + +config BCM_PDC_MBOX + tristate "Broadcom PDC Mailbox" + depends on ARM64 || COMPILE_TEST + default ARCH_BCM_IPROC + help + Mailbox implementation for the Broadcom PDC ring manager, + which provides access to various offload engines on Broadcom + SoCs. Say Y here if you want to use the Broadcom PDC. endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 0be3e742bb7d..66c38e300dfc 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o + +obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c new file mode 100644 index 000000000000..cbe0c1ee4ba9 --- /dev/null +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -0,0 +1,1531 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ + +/* + * Broadcom PDC Mailbox Driver + * The PDC provides a ring based programming interface to one or more hardware + * offload engines. For example, the PDC driver works with both SPU-M and SPU2 + * cryptographic offload hardware. In some chips the PDC is referred to as MDE. + * + * The PDC driver registers with the Linux mailbox framework as a mailbox + * controller, once for each PDC instance. Ring 0 for each PDC is registered as + * a mailbox channel. The PDC driver uses interrupts to determine when data + * transfers to and from an offload engine are complete. The PDC driver uses + * threaded IRQs so that response messages are handled outside of interrupt + * context. + * + * The PDC driver allows multiple messages to be pending in the descriptor + * rings. The tx_msg_start descriptor index indicates where the last message + * starts. The txin_numd value at this index indicates how many descriptor + * indexes make up the message. Similar state is kept on the receive side. When + * an rx interrupt indicates a response is ready, the PDC driver processes numd + * descriptors from the tx and rx ring, thus processing one response at a time. + */ + +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox/brcm-message.h> +#include <linux/scatterlist.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> + +#define PDC_SUCCESS 0 + +#define RING_ENTRY_SIZE sizeof(struct dma64dd) + +/* # entries in PDC dma ring */ +#define PDC_RING_ENTRIES 128 +#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE) +/* Rings are 8k aligned */ +#define RING_ALIGN_ORDER 13 +#define RING_ALIGN BIT(RING_ALIGN_ORDER) + +#define RX_BUF_ALIGN_ORDER 5 +#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER) + +/* descriptor bumping macros */ +#define XXD(x, max_mask) ((x) & (max_mask)) +#define TXD(x, max_mask) XXD((x), (max_mask)) +#define RXD(x, max_mask) XXD((x), (max_mask)) +#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask)) +#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask)) +#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask)) +#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask)) +#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask)) +#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask)) + +/* Length of BCM header at start of SPU msg, in bytes */ +#define BCM_HDR_LEN 8 + +/* + * PDC driver reserves ringset 0 on each SPU for its own use. The driver does + * not currently support use of multiple ringsets on a single PDC engine. + */ +#define PDC_RINGSET 0 + +/* + * Interrupt mask and status definitions. Enable interrupts for tx and rx on + * ring 0 + */ +#define PDC_XMTINT_0 (24 + PDC_RINGSET) +#define PDC_RCVINT_0 (16 + PDC_RINGSET) +#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0) +#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0) +#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0) +#define PDC_LAZY_FRAMECOUNT 1 +#define PDC_LAZY_TIMEOUT 10000 +#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24)) +#define PDC_INTMASK_OFFSET 0x24 +#define PDC_INTSTATUS_OFFSET 0x20 +#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET) + +/* + * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata + * before frame + */ +#define PDC_SPU2_RESP_HDR_LEN 17 +#define PDC_CKSUM_CTRL BIT(27) +#define PDC_CKSUM_CTRL_OFFSET 0x400 + +#define PDC_SPUM_RESP_HDR_LEN 32 + +/* + * Sets the following bits for write to transmit control reg: + * 0 - XmtEn - enable activity on the tx channel + * 11 - PtyChkDisable - parity check is disabled + * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory + */ +#define PDC_TX_CTL 0x000C0801 + +/* + * Sets the following bits for write to receive control reg: + * 0 - RcvEn - enable activity on the rx channel + * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf + * 9 - SepRxHdrDescEn - place start of new frames only in descriptors + * that have StartOfFrame set + * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all + * remaining bytes in current frame, report error + * in rx frame status for current frame + * 11 - PtyChkDisable - parity check is disabled + * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory + */ +#define PDC_RX_CTL 0x000C0E01 + +#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1) + +/* descriptor flags */ +#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */ +#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */ +#define D64_CTRL1_EOF BIT(30) /* end of frame */ +#define D64_CTRL1_SOF BIT(31) /* start of frame */ + +#define RX_STATUS_OVERFLOW 0x00800000 +#define RX_STATUS_LEN 0x0000FFFF + +#define PDC_TXREGS_OFFSET 0x200 +#define PDC_RXREGS_OFFSET 0x220 + +/* Maximum size buffer the DMA engine can handle */ +#define PDC_DMA_BUF_MAX 16384 + +struct pdc_dma_map { + void *ctx; /* opaque context associated with frame */ +}; + +/* dma descriptor */ +struct dma64dd { + u32 ctrl1; /* misc control bits */ + u32 ctrl2; /* buffer count and address extension */ + u32 addrlow; /* memory address of the date buffer, bits 31:0 */ + u32 addrhigh; /* memory address of the date buffer, bits 63:32 */ +}; + +/* dma registers per channel(xmt or rcv) */ +struct dma64_regs { + u32 control; /* enable, et al */ + u32 ptr; /* last descriptor posted to chip */ + u32 addrlow; /* descriptor ring base address low 32-bits */ + u32 addrhigh; /* descriptor ring base address bits 63:32 */ + u32 status0; /* last rx descriptor written by hw */ + u32 status1; /* driver does not use */ +}; + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* dma registers. matches hw layout. */ +struct dma64 { + struct dma64_regs dmaxmt; /* dma tx */ + u32 PAD[2]; + struct dma64_regs dmarcv; /* dma rx */ + u32 PAD[2]; +}; + +/* PDC registers */ +struct pdc_regs { + u32 devcontrol; /* 0x000 */ + u32 devstatus; /* 0x004 */ + u32 PAD; + u32 biststatus; /* 0x00c */ + u32 PAD[4]; + u32 intstatus; /* 0x020 */ + u32 intmask; /* 0x024 */ + u32 gptimer; /* 0x028 */ + + u32 PAD; + u32 intrcvlazy_0; /* 0x030 */ + u32 intrcvlazy_1; /* 0x034 */ + u32 intrcvlazy_2; /* 0x038 */ + u32 intrcvlazy_3; /* 0x03c */ + + u32 PAD[48]; + u32 removed_intrecvlazy; /* 0x100 */ + u32 flowctlthresh; /* 0x104 */ + u32 wrrthresh; /* 0x108 */ + u32 gmac_idle_cnt_thresh; /* 0x10c */ + + u32 PAD[4]; + u32 ifioaccessaddr; /* 0x120 */ + u32 ifioaccessbyte; /* 0x124 */ + u32 ifioaccessdata; /* 0x128 */ + + u32 PAD[21]; + u32 phyaccess; /* 0x180 */ + u32 PAD; + u32 phycontrol; /* 0x188 */ + u32 txqctl; /* 0x18c */ + u32 rxqctl; /* 0x190 */ + u32 gpioselect; /* 0x194 */ + u32 gpio_output_en; /* 0x198 */ + u32 PAD; /* 0x19c */ + u32 txq_rxq_mem_ctl; /* 0x1a0 */ + u32 memory_ecc_status; /* 0x1a4 */ + u32 serdes_ctl; /* 0x1a8 */ + u32 serdes_status0; /* 0x1ac */ + u32 serdes_status1; /* 0x1b0 */ + u32 PAD[11]; /* 0x1b4-1dc */ + u32 clk_ctl_st; /* 0x1e0 */ + u32 hw_war; /* 0x1e4 */ + u32 pwrctl; /* 0x1e8 */ + u32 PAD[5]; + +#define PDC_NUM_DMA_RINGS 4 + struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */ + + /* more registers follow, but we don't use them */ +}; + +/* structure for allocating/freeing DMA rings */ +struct pdc_ring_alloc { + dma_addr_t dmabase; /* DMA address of start of ring */ + void *vbase; /* base kernel virtual address of ring */ + u32 size; /* ring allocation size in bytes */ +}; + +/* PDC state structure */ +struct pdc_state { + /* synchronize access to this PDC state structure */ + spinlock_t pdc_lock; + + /* Index of the PDC whose state is in this structure instance */ + u8 pdc_idx; + + /* Platform device for this PDC instance */ + struct platform_device *pdev; + + /* + * Each PDC instance has a mailbox controller. PDC receives request + * messages through mailboxes, and sends response messages through the + * mailbox framework. + */ + struct mbox_controller mbc; + + unsigned int pdc_irq; + + /* + * Last interrupt status read from PDC device. Saved in interrupt + * handler so the handler can clear the interrupt in the device, + * and the interrupt thread called later can know which interrupt + * bits are active. + */ + unsigned long intstatus; + + /* Number of bytes of receive status prior to each rx frame */ + u32 rx_status_len; + /* Whether a BCM header is prepended to each frame */ + bool use_bcm_hdr; + /* Sum of length of BCM header and rx status header */ + u32 pdc_resp_hdr_len; + + /* The base virtual address of DMA hw registers */ + void __iomem *pdc_reg_vbase; + + /* Pool for allocation of DMA rings */ + struct dma_pool *ring_pool; + + /* Pool for allocation of metadata buffers for response messages */ + struct dma_pool *rx_buf_pool; + + /* + * The base virtual address of DMA tx/rx descriptor rings. Corresponding + * DMA address and size of ring allocation. + */ + struct pdc_ring_alloc tx_ring_alloc; + struct pdc_ring_alloc rx_ring_alloc; + + struct pdc_regs *regs; /* start of PDC registers */ + + struct dma64_regs *txregs_64; /* dma tx engine registers */ + struct dma64_regs *rxregs_64; /* dma rx engine registers */ + + /* + * Arrays of PDC_RING_ENTRIES descriptors + * To use multiple ringsets, this needs to be extended + */ + struct dma64dd *txd_64; /* tx descriptor ring */ + struct dma64dd *rxd_64; /* rx descriptor ring */ + + /* descriptor ring sizes */ + u32 ntxd; /* # tx descriptors */ + u32 nrxd; /* # rx descriptors */ + u32 nrxpost; /* # rx buffers to keep posted */ + u32 ntxpost; /* max number of tx buffers that can be posted */ + + /* + * Index of next tx descriptor to reclaim. That is, the descriptor + * index of the oldest tx buffer for which the host has yet to process + * the corresponding response. + */ + u32 txin; + + /* + * Index of the first receive descriptor for the sequence of + * message fragments currently under construction. Used to build up + * the rxin_numd count for a message. Updated to rxout when the host + * starts a new sequence of rx buffers for a new message. + */ + u32 tx_msg_start; + + /* Index of next tx descriptor to post. */ + u32 txout; + + /* + * Number of tx descriptors associated with the message that starts + * at this tx descriptor index. + */ + u32 txin_numd[PDC_RING_ENTRIES]; + + /* + * Index of next rx descriptor to reclaim. This is the index of + * the next descriptor whose data has yet to be processed by the host. + */ + u32 rxin; + + /* + * Index of the first receive descriptor for the sequence of + * message fragments currently under construction. Used to build up + * the rxin_numd count for a message. Updated to rxout when the host + * starts a new sequence of rx buffers for a new message. + */ + u32 rx_msg_start; + + /* + * Saved value of current hardware rx descriptor index. + * The last rx buffer written by the hw is the index previous to + * this one. + */ + u32 last_rx_curr; + + /* Index of next rx descriptor to post. */ + u32 rxout; + + /* + * opaque context associated with frame that starts at each + * rx ring index. + */ + void *rxp_ctx[PDC_RING_ENTRIES]; + + /* + * Scatterlists used to form request and reply frames beginning at a + * given ring index. Retained in order to unmap each sg after reply + * is processed + */ + struct scatterlist *src_sg[PDC_RING_ENTRIES]; + struct scatterlist *dst_sg[PDC_RING_ENTRIES]; + + /* + * Number of rx descriptors associated with the message that starts + * at this descriptor index. Not set for every index. For example, + * if descriptor index i points to a scatterlist with 4 entries, then + * the next three descriptor indexes don't have a value set. + */ + u32 rxin_numd[PDC_RING_ENTRIES]; + + void *resp_hdr[PDC_RING_ENTRIES]; + dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES]; + + struct dentry *debugfs_stats; /* debug FS stats file for this PDC */ + + /* counters */ + u32 pdc_requests; /* number of request messages submitted */ + u32 pdc_replies; /* number of reply messages received */ + u32 txnobuf; /* count of tx ring full */ + u32 rxnobuf; /* count of rx ring full */ + u32 rx_oflow; /* count of rx overflows */ +}; + +/* Global variables */ + +struct pdc_globals { + /* Actual number of SPUs in hardware, as reported by device tree */ + u32 num_spu; +}; + +static struct pdc_globals pdcg; + +/* top level debug FS directory for PDC driver */ +static struct dentry *debugfs_dir; + +static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct pdc_state *pdcs; + char *buf; + ssize_t ret, out_offset, out_count; + + out_count = 512; + + buf = kmalloc(out_count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pdcs = filp->private_data; + out_offset = 0; + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "SPU %u stats:\n", pdcs->pdc_idx); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "PDC requests............%u\n", + pdcs->pdc_requests); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "PDC responses...........%u\n", + pdcs->pdc_replies); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "Tx err ring full........%u\n", + pdcs->txnobuf); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "Rx err ring full........%u\n", + pdcs->rxnobuf); + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "Receive overflow........%u\n", + pdcs->rx_oflow); + + if (out_offset > out_count) + out_offset = out_count; + + ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); + kfree(buf); + return ret; +} + +static const struct file_operations pdc_debugfs_stats = { + .owner = THIS_MODULE, + .open = simple_open, + .read = pdc_debugfs_read, +}; + +/** + * pdc_setup_debugfs() - Create the debug FS directories. If the top-level + * directory has not yet been created, create it now. Create a stats file in + * this directory for a SPU. + * @pdcs: PDC state structure + */ +void pdc_setup_debugfs(struct pdc_state *pdcs) +{ + char spu_stats_name[16]; + + if (!debugfs_initialized()) + return; + + snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx); + if (!debugfs_dir) + debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR, + debugfs_dir, pdcs, + &pdc_debugfs_stats); +} + +void pdc_free_debugfs(void) +{ + if (debugfs_dir && simple_empty(debugfs_dir)) { + debugfs_remove_recursive(debugfs_dir); + debugfs_dir = NULL; + } +} + +/** + * pdc_build_rxd() - Build DMA descriptor to receive SPU result. + * @pdcs: PDC state for SPU that will generate result + * @dma_addr: DMA address of buffer that descriptor is being built for + * @buf_len: Length of the receive buffer, in bytes + * @flags: Flags to be stored in descriptor + */ +static inline void +pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr, + u32 buf_len, u32 flags) +{ + struct device *dev = &pdcs->pdev->dev; + + dev_dbg(dev, + "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n", + pdcs->pdc_idx, pdcs->rxout, buf_len, flags); + + iowrite32(lower_32_bits(dma_addr), + (void *)&pdcs->rxd_64[pdcs->rxout].addrlow); + iowrite32(upper_32_bits(dma_addr), + (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh); + iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1); + iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2); + /* bump ring index and return */ + pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost); +} + +/** + * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to + * hardware. + * @pdcs: PDC state for the SPU that will process this request + * @dma_addr: DMA address of packet to be transmitted + * @buf_len: Length of tx buffer, in bytes + * @flags: Flags to be stored in descriptor + */ +static inline void +pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len, + u32 flags) +{ + struct device *dev = &pdcs->pdev->dev; + + dev_dbg(dev, + "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n", + pdcs->pdc_idx, pdcs->txout, buf_len, flags); + + iowrite32(lower_32_bits(dma_addr), + (void *)&pdcs->txd_64[pdcs->txout].addrlow); + iowrite32(upper_32_bits(dma_addr), + (void *)&pdcs->txd_64[pdcs->txout].addrhigh); + iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1); + iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2); + + /* bump ring index and return */ + pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost); +} + +/** + * pdc_receive() - Receive a response message from a given SPU. + * @pdcs: PDC state for the SPU to receive from + * @mssg: mailbox message to be returned to client + * + * When the return code indicates success, the response message is available in + * the receive buffers provided prior to submission of the request. + * + * Input: + * pdcs - PDC state structure for the SPU to be polled + * mssg - mailbox message to be returned to client. This function sets the + * context pointer on the message to help the client associate the + * response with a request. + * + * Return: PDC_SUCCESS if one or more receive descriptors was processed + * -EAGAIN indicates that no response message is available + * -EIO an error occurred + */ +static int +pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) +{ + struct device *dev = &pdcs->pdev->dev; + u32 len, rx_status; + u32 num_frags; + int i; + u8 *resp_hdr; /* virtual addr of start of resp message DMA header */ + u32 frags_rdy; /* number of fragments ready to read */ + u32 rx_idx; /* ring index of start of receive frame */ + dma_addr_t resp_hdr_daddr; + + spin_lock(&pdcs->pdc_lock); + + /* + * return if a complete response message is not yet ready. + * rxin_numd[rxin] is the number of fragments in the next msg + * to read. + */ + frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); + if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { + /* See if the hw has written more fragments than we know */ + pdcs->last_rx_curr = + (ioread32((void *)&pdcs->rxregs_64->status0) & + CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; + frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, + pdcs->nrxpost); + if ((frags_rdy == 0) || + (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { + /* No response ready */ + spin_unlock(&pdcs->pdc_lock); + return -EAGAIN; + } + /* can't read descriptors/data until write index is read */ + rmb(); + } + + num_frags = pdcs->txin_numd[pdcs->txin]; + dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], + sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); + + for (i = 0; i < num_frags; i++) + pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost); + + dev_dbg(dev, "PDC %u reclaimed %d tx descriptors", + pdcs->pdc_idx, num_frags); + + rx_idx = pdcs->rxin; + num_frags = pdcs->rxin_numd[rx_idx]; + /* Return opaque context with result */ + mssg->ctx = pdcs->rxp_ctx[rx_idx]; + pdcs->rxp_ctx[rx_idx] = NULL; + resp_hdr = pdcs->resp_hdr[rx_idx]; + resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx]; + dma_unmap_sg(dev, pdcs->dst_sg[rx_idx], + sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE); + + for (i = 0; i < num_frags; i++) + pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost); + + spin_unlock(&pdcs->pdc_lock); + + dev_dbg(dev, "PDC %u reclaimed %d rx descriptors", + pdcs->pdc_idx, num_frags); + + dev_dbg(dev, + "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n", + pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin, + pdcs->rxout, pdcs->last_rx_curr); + + if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) { + /* + * For SPU-M, get length of response msg and rx overflow status. + */ + rx_status = *((u32 *)resp_hdr); + len = rx_status & RX_STATUS_LEN; + dev_dbg(dev, + "SPU response length %u bytes", len); + if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) { + if (rx_status & RX_STATUS_OVERFLOW) { + dev_err_ratelimited(dev, + "crypto receive overflow"); + pdcs->rx_oflow++; + } else { + dev_info_ratelimited(dev, "crypto rx len = 0"); + } + return -EIO; + } + } + + dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); + + pdcs->pdc_replies++; + /* if we read one or more rx descriptors, claim success */ + if (num_frags > 0) + return PDC_SUCCESS; + else + return -EIO; +} + +/** + * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit + * descriptors for a given SPU. The scatterlist buffers contain the data for a + * SPU request message. + * @spu_idx: The index of the SPU to submit the request to, [0, max_spu) + * @sg: Scatterlist whose buffers contain part of the SPU request + * + * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors + * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length. + * + * Return: PDC_SUCCESS if successful + * < 0 otherwise + */ +static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) +{ + u32 flags = 0; + u32 eot; + u32 tx_avail; + + /* + * Num descriptors needed. Conservatively assume we need a descriptor + * for every entry in sg. + */ + u32 num_desc; + u32 desc_w = 0; /* Number of tx descriptors written */ + u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ + dma_addr_t databufptr; /* DMA address to put in descriptor */ + + num_desc = (u32)sg_nents(sg); + + /* check whether enough tx descriptors are available */ + tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, + pdcs->ntxpost); + if (unlikely(num_desc > tx_avail)) { + pdcs->txnobuf++; + return -ENOSPC; + } + + /* build tx descriptors */ + if (pdcs->tx_msg_start == pdcs->txout) { + /* Start of frame */ + pdcs->txin_numd[pdcs->tx_msg_start] = 0; + pdcs->src_sg[pdcs->txout] = sg; + flags = D64_CTRL1_SOF; + } + + while (sg) { + if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) + eot = D64_CTRL1_EOT; + else + eot = 0; + + /* + * If sg buffer larger than PDC limit, split across + * multiple descriptors + */ + bufcnt = sg_dma_len(sg); + databufptr = sg_dma_address(sg); + while (bufcnt > PDC_DMA_BUF_MAX) { + pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX, + flags | eot); + desc_w++; + bufcnt -= PDC_DMA_BUF_MAX; + databufptr += PDC_DMA_BUF_MAX; + if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) + eot = D64_CTRL1_EOT; + else + eot = 0; + } + sg = sg_next(sg); + if (!sg) + /* Writing last descriptor for frame */ + flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC); + pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot); + desc_w++; + /* Clear start of frame after first descriptor */ + flags &= ~D64_CTRL1_SOF; + } + pdcs->txin_numd[pdcs->tx_msg_start] += desc_w; + + return PDC_SUCCESS; +} + +/** + * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx + * ring. + * @pdcs: PDC state for SPU to process the request + * + * Sets the index of the last descriptor written in both the rx and tx ring. + * + * Return: PDC_SUCCESS + */ +static int pdc_tx_list_final(struct pdc_state *pdcs) +{ + /* + * write barrier to ensure all register writes are complete + * before chip starts to process new request + */ + wmb(); + iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr); + iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr); + pdcs->pdc_requests++; + + return PDC_SUCCESS; +} + +/** + * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC. + * @pdcs: PDC state for SPU handling request + * @dst_sg: scatterlist providing rx buffers for response to be returned to + * mailbox client + * @ctx: Opaque context for this request + * + * Posts a single receive descriptor to hold the metadata that precedes a + * response. For example, with SPU-M, the metadata is a 32-byte DMA header and + * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and + * rx to indicate the start of a new message. + * + * Return: PDC_SUCCESS if successful + * < 0 if an error (e.g., rx ring is full) + */ +static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg, + void *ctx) +{ + u32 flags = 0; + u32 rx_avail; + u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */ + dma_addr_t daddr; + void *vaddr; + + rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, + pdcs->nrxpost); + if (unlikely(rx_pkt_cnt > rx_avail)) { + pdcs->rxnobuf++; + return -ENOSPC; + } + + /* allocate a buffer for the dma rx status */ + vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); + if (!vaddr) + return -ENOMEM; + + /* + * Update msg_start indexes for both tx and rx to indicate the start + * of a new sequence of descriptor indexes that contain the fragments + * of the same message. + */ + pdcs->rx_msg_start = pdcs->rxout; + pdcs->tx_msg_start = pdcs->txout; + + /* This is always the first descriptor in the receive sequence */ + flags = D64_CTRL1_SOF; + pdcs->rxin_numd[pdcs->rx_msg_start] = 1; + + if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) + flags |= D64_CTRL1_EOT; + + pdcs->rxp_ctx[pdcs->rxout] = ctx; + pdcs->dst_sg[pdcs->rxout] = dst_sg; + pdcs->resp_hdr[pdcs->rxout] = vaddr; + pdcs->resp_hdr_daddr[pdcs->rxout] = daddr; + pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags); + return PDC_SUCCESS; +} + +/** + * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive + * descriptors for a given SPU. The caller must have already DMA mapped the + * scatterlist. + * @spu_idx: Indicates which SPU the buffers are for + * @sg: Scatterlist whose buffers are added to the receive ring + * + * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX, + * multiple receive descriptors are written, each with a buffer <= + * PDC_DMA_BUF_MAX. + * + * Return: PDC_SUCCESS if successful + * < 0 otherwise (e.g., receive ring is full) + */ +static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) +{ + u32 flags = 0; + u32 rx_avail; + + /* + * Num descriptors needed. Conservatively assume we need a descriptor + * for every entry from our starting point in the scatterlist. + */ + u32 num_desc; + u32 desc_w = 0; /* Number of tx descriptors written */ + u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ + dma_addr_t databufptr; /* DMA address to put in descriptor */ + + num_desc = (u32)sg_nents(sg); + + rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, + pdcs->nrxpost); + if (unlikely(num_desc > rx_avail)) { + pdcs->rxnobuf++; + return -ENOSPC; + } + + while (sg) { + if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) + flags = D64_CTRL1_EOT; + else + flags = 0; + + /* + * If sg buffer larger than PDC limit, split across + * multiple descriptors + */ + bufcnt = sg_dma_len(sg); + databufptr = sg_dma_address(sg); + while (bufcnt > PDC_DMA_BUF_MAX) { + pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags); + desc_w++; + bufcnt -= PDC_DMA_BUF_MAX; + databufptr += PDC_DMA_BUF_MAX; + if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) + flags = D64_CTRL1_EOT; + else + flags = 0; + } + pdc_build_rxd(pdcs, databufptr, bufcnt, flags); + desc_w++; + sg = sg_next(sg); + } + pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w; + + return PDC_SUCCESS; +} + +/** + * pdc_irq_handler() - Interrupt handler called in interrupt context. + * @irq: Interrupt number that has fired + * @cookie: PDC state for DMA engine that generated the interrupt + * + * We have to clear the device interrupt status flags here. So cache the + * status for later use in the thread function. Other than that, just return + * WAKE_THREAD to invoke the thread function. + * + * Return: IRQ_WAKE_THREAD if interrupt is ours + * IRQ_NONE otherwise + */ +static irqreturn_t pdc_irq_handler(int irq, void *cookie) +{ + struct pdc_state *pdcs = cookie; + u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); + + if (intstatus & PDC_XMTINTEN_0) + set_bit(PDC_XMTINT_0, &pdcs->intstatus); + if (intstatus & PDC_RCVINTEN_0) + set_bit(PDC_RCVINT_0, &pdcs->intstatus); + + /* Clear interrupt flags in device */ + iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); + + /* Wakeup IRQ thread */ + if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK)) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +/** + * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has + * completed or data is available to receive. + * @irq: Interrupt number + * @cookie: PDC state for PDC that generated the interrupt + * + * On DMA tx complete, notify the mailbox client. On DMA rx complete, process + * as many SPU response messages as are available and send each to the mailbox + * client. + * + * Return: IRQ_HANDLED if we recognized and handled the interrupt + * IRQ_NONE otherwise + */ +static irqreturn_t pdc_irq_thread(int irq, void *cookie) +{ + struct pdc_state *pdcs = cookie; + struct mbox_controller *mbc; + struct mbox_chan *chan; + bool tx_int; + bool rx_int; + int rx_status; + struct brcm_message mssg; + + tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus); + rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus); + + if (pdcs && (tx_int || rx_int)) { + dev_dbg(&pdcs->pdev->dev, + "%s() got irq %d with tx_int %s, rx_int %s", + __func__, irq, + tx_int ? "set" : "clear", rx_int ? "set" : "clear"); + + mbc = &pdcs->mbc; + chan = &mbc->chans[0]; + + if (tx_int) { + dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__); + /* only one frame in flight at a time */ + mbox_chan_txdone(chan, PDC_SUCCESS); + } + if (rx_int) { + while (1) { + /* Could be many frames ready */ + memset(&mssg, 0, sizeof(mssg)); + mssg.type = BRCM_MESSAGE_SPU; + rx_status = pdc_receive(pdcs, &mssg); + if (rx_status >= 0) { + dev_dbg(&pdcs->pdev->dev, + "%s(): invoking client rx cb", + __func__); + mbox_chan_received_data(chan, &mssg); + } else { + dev_dbg(&pdcs->pdev->dev, + "%s(): no SPU response available", + __func__); + break; + } + } + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/** + * pdc_ring_init() - Allocate DMA rings and initialize constant fields of + * descriptors in one ringset. + * @pdcs: PDC instance state + * @ringset: index of ringset being used + * + * Return: PDC_SUCCESS if ring initialized + * < 0 otherwise + */ +static int pdc_ring_init(struct pdc_state *pdcs, int ringset) +{ + int i; + int err = PDC_SUCCESS; + struct dma64 *dma_reg; + struct device *dev = &pdcs->pdev->dev; + struct pdc_ring_alloc tx; + struct pdc_ring_alloc rx; + + /* Allocate tx ring */ + tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase); + if (!tx.vbase) { + err = -ENOMEM; + goto done; + } + + /* Allocate rx ring */ + rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase); + if (!rx.vbase) { + err = -ENOMEM; + goto fail_dealloc; + } + + dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase); + dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase); + dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase); + dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase); + + /* lock after ring allocation to avoid scheduling while atomic */ + spin_lock(&pdcs->pdc_lock); + + memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx)); + memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx)); + + pdcs->rxin = 0; + pdcs->rx_msg_start = 0; + pdcs->last_rx_curr = 0; + pdcs->rxout = 0; + pdcs->txin = 0; + pdcs->tx_msg_start = 0; + pdcs->txout = 0; + + /* Set descriptor array base addresses */ + pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase; + pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase; + + /* Tell device the base DMA address of each ring */ + dma_reg = &pdcs->regs->dmaregs[ringset]; + iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase), + (void *)&dma_reg->dmaxmt.addrlow); + iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase), + (void *)&dma_reg->dmaxmt.addrhigh); + + iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase), + (void *)&dma_reg->dmarcv.addrlow); + iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase), + (void *)&dma_reg->dmarcv.addrhigh); + + /* Initialize descriptors */ + for (i = 0; i < PDC_RING_ENTRIES; i++) { + /* Every tx descriptor can be used for start of frame. */ + if (i != pdcs->ntxpost) { + iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF, + (void *)&pdcs->txd_64[i].ctrl1); + } else { + /* Last descriptor in ringset. Set End of Table. */ + iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF | + D64_CTRL1_EOT, + (void *)&pdcs->txd_64[i].ctrl1); + } + + /* Every rx descriptor can be used for start of frame */ + if (i != pdcs->nrxpost) { + iowrite32(D64_CTRL1_SOF, + (void *)&pdcs->rxd_64[i].ctrl1); + } else { + /* Last descriptor in ringset. Set End of Table. */ + iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT, + (void *)&pdcs->rxd_64[i].ctrl1); + } + } + spin_unlock(&pdcs->pdc_lock); + return PDC_SUCCESS; + +fail_dealloc: + dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase); +done: + return err; +} + +static void pdc_ring_free(struct pdc_state *pdcs) +{ + if (pdcs->tx_ring_alloc.vbase) { + dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase, + pdcs->tx_ring_alloc.dmabase); + pdcs->tx_ring_alloc.vbase = NULL; + } + + if (pdcs->rx_ring_alloc.vbase) { + dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase, + pdcs->rx_ring_alloc.dmabase); + pdcs->rx_ring_alloc.vbase = NULL; + } +} + +/** + * pdc_send_data() - mailbox send_data function + * @chan: The mailbox channel on which the data is sent. The channel + * corresponds to a DMA ringset. + * @data: The mailbox message to be sent. The message must be a + * brcm_message structure. + * + * This function is registered as the send_data function for the mailbox + * controller. From the destination scatterlist in the mailbox message, it + * creates a sequence of receive descriptors in the rx ring. From the source + * scatterlist, it creates a sequence of transmit descriptors in the tx ring. + * After creating the descriptors, it writes the rx ptr and tx ptr registers to + * initiate the DMA transfer. + * + * This function does the DMA map and unmap of the src and dst scatterlists in + * the mailbox message. + * + * Return: 0 if successful + * -ENOTSUPP if the mailbox message is a type this driver does not + * support + * < 0 if an error + */ +static int pdc_send_data(struct mbox_chan *chan, void *data) +{ + struct pdc_state *pdcs = chan->con_priv; + struct device *dev = &pdcs->pdev->dev; + struct brcm_message *mssg = data; + int err = PDC_SUCCESS; + int src_nent; + int dst_nent; + int nent; + + if (mssg->type != BRCM_MESSAGE_SPU) + return -ENOTSUPP; + + src_nent = sg_nents(mssg->spu.src); + if (src_nent) { + nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); + if (nent == 0) + return -EIO; + } + + dst_nent = sg_nents(mssg->spu.dst); + if (dst_nent) { + nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, + DMA_FROM_DEVICE); + if (nent == 0) { + dma_unmap_sg(dev, mssg->spu.src, src_nent, + DMA_TO_DEVICE); + return -EIO; + } + } + + spin_lock(&pdcs->pdc_lock); + + /* Create rx descriptors to SPU catch response */ + err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx); + err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst); + + /* Create tx descriptors to submit SPU request */ + err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src); + err |= pdc_tx_list_final(pdcs); /* initiate transfer */ + + spin_unlock(&pdcs->pdc_lock); + + if (err) + dev_err(&pdcs->pdev->dev, + "%s failed with error %d", __func__, err); + + return err; +} + +static int pdc_startup(struct mbox_chan *chan) +{ + return pdc_ring_init(chan->con_priv, PDC_RINGSET); +} + +static void pdc_shutdown(struct mbox_chan *chan) +{ + struct pdc_state *pdcs = chan->con_priv; + + if (pdcs) + dev_dbg(&pdcs->pdev->dev, + "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); + + pdc_ring_free(pdcs); +} + +/** + * pdc_hw_init() - Use the given initialization parameters to initialize the + * state for one of the PDCs. + * @pdcs: state of the PDC + */ +static +void pdc_hw_init(struct pdc_state *pdcs) +{ + struct platform_device *pdev; + struct device *dev; + struct dma64 *dma_reg; + int ringset = PDC_RINGSET; + + pdev = pdcs->pdev; + dev = &pdev->dev; + + dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx); + dev_dbg(dev, "state structure: %p", + pdcs); + dev_dbg(dev, " - base virtual addr of hw regs %p", + pdcs->pdc_reg_vbase); + + /* initialize data structures */ + pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase; + pdcs->txregs_64 = (struct dma64_regs *) + (void *)(((u8 *)pdcs->pdc_reg_vbase) + + PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset)); + pdcs->rxregs_64 = (struct dma64_regs *) + (void *)(((u8 *)pdcs->pdc_reg_vbase) + + PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset)); + + pdcs->ntxd = PDC_RING_ENTRIES; + pdcs->nrxd = PDC_RING_ENTRIES; + pdcs->ntxpost = PDC_RING_ENTRIES - 1; + pdcs->nrxpost = PDC_RING_ENTRIES - 1; + pdcs->regs->intmask = 0; + + dma_reg = &pdcs->regs->dmaregs[ringset]; + iowrite32(0, (void *)&dma_reg->dmaxmt.ptr); + iowrite32(0, (void *)&dma_reg->dmarcv.ptr); + + iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control); + + iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), + (void *)&dma_reg->dmarcv.control); + + if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN) + iowrite32(PDC_CKSUM_CTRL, + pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET); +} + +/** + * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata + * header returned with each response message. + * @pdcs: PDC state structure + * + * The metadata is not returned to the mailbox client. So the PDC driver + * manages these buffers. + * + * Return: PDC_SUCCESS + * -ENOMEM if pool creation fails + */ +static int pdc_rx_buf_pool_create(struct pdc_state *pdcs) +{ + struct platform_device *pdev; + struct device *dev; + + pdev = pdcs->pdev; + dev = &pdev->dev; + + pdcs->pdc_resp_hdr_len = pdcs->rx_status_len; + if (pdcs->use_bcm_hdr) + pdcs->pdc_resp_hdr_len += BCM_HDR_LEN; + + pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev, + pdcs->pdc_resp_hdr_len, + RX_BUF_ALIGN, 0); + if (!pdcs->rx_buf_pool) + return -ENOMEM; + + return PDC_SUCCESS; +} + +/** + * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and + * specify a threaded IRQ handler for deferred handling of interrupts outside of + * interrupt context. + * @pdcs: PDC state + * + * Set the interrupt mask for transmit and receive done. + * Set the lazy interrupt frame count to generate an interrupt for just one pkt. + * + * Return: PDC_SUCCESS + * <0 if threaded irq request fails + */ +static int pdc_interrupts_init(struct pdc_state *pdcs) +{ + struct platform_device *pdev = pdcs->pdev; + struct device *dev = &pdev->dev; + struct device_node *dn = pdev->dev.of_node; + int err; + + pdcs->intstatus = 0; + + /* interrupt configuration */ + iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); + iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET); + + /* read irq from device tree */ + pdcs->pdc_irq = irq_of_parse_and_map(dn, 0); + dev_dbg(dev, "pdc device %s irq %u for pdcs %p", + dev_name(dev), pdcs->pdc_irq, pdcs); + err = devm_request_threaded_irq(dev, pdcs->pdc_irq, + pdc_irq_handler, + pdc_irq_thread, 0, dev_name(dev), pdcs); + if (err) { + dev_err(dev, "threaded tx IRQ %u request failed with err %d\n", + pdcs->pdc_irq, err); + return err; + } + return PDC_SUCCESS; +} + +static const struct mbox_chan_ops pdc_mbox_chan_ops = { + .send_data = pdc_send_data, + .startup = pdc_startup, + .shutdown = pdc_shutdown +}; + +/** + * pdc_mb_init() - Initialize the mailbox controller. + * @pdcs: PDC state + * + * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel + * driver only uses one ringset and thus one mb channel. PDC uses the transmit + * complete interrupt to determine when a mailbox message has successfully been + * transmitted. + * + * Return: 0 on success + * < 0 if there is an allocation or registration failure + */ +static int pdc_mb_init(struct pdc_state *pdcs) +{ + struct device *dev = &pdcs->pdev->dev; + struct mbox_controller *mbc; + int chan_index; + int err; + + mbc = &pdcs->mbc; + mbc->dev = dev; + mbc->ops = &pdc_mbox_chan_ops; + mbc->num_chans = 1; + mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans), + GFP_KERNEL); + if (!mbc->chans) + return -ENOMEM; + + mbc->txdone_irq = true; + mbc->txdone_poll = false; + for (chan_index = 0; chan_index < mbc->num_chans; chan_index++) + mbc->chans[chan_index].con_priv = pdcs; + + /* Register mailbox controller */ + err = mbox_controller_register(mbc); + if (err) { + dev_crit(dev, + "Failed to register PDC mailbox controller. Error %d.", + err); + return err; + } + return 0; +} + +/** + * pdc_dt_read() - Read application-specific data from device tree. + * @pdev: Platform device + * @pdcs: PDC state + * + * Reads the number of bytes of receive status that precede each received frame. + * Reads whether transmit and received frames should be preceded by an 8-byte + * BCM header. + * + * Return: 0 if successful + * -ENODEV if device not available + */ +static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) +{ + struct device *dev = &pdev->dev; + struct device_node *dn = pdev->dev.of_node; + int err; + + err = of_property_read_u32(dn, "brcm,rx-status-len", + &pdcs->rx_status_len); + if (err < 0) + dev_err(dev, + "%s failed to get DMA receive status length from device tree", + __func__); + + pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr"); + + return 0; +} + +/** + * pdc_probe() - Probe function for PDC driver. + * @pdev: PDC platform device + * + * Reserve and map register regions defined in device tree. + * Allocate and initialize tx and rx DMA rings. + * Initialize a mailbox controller for each PDC. + * + * Return: 0 if successful + * < 0 if an error + */ +static int pdc_probe(struct platform_device *pdev) +{ + int err = 0; + struct device *dev = &pdev->dev; + struct resource *pdc_regs; + struct pdc_state *pdcs; + + /* PDC state for one SPU */ + pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL); + if (!pdcs) { + err = -ENOMEM; + goto cleanup; + } + + spin_lock_init(&pdcs->pdc_lock); + pdcs->pdev = pdev; + platform_set_drvdata(pdev, pdcs); + pdcs->pdc_idx = pdcg.num_spu; + pdcg.num_spu++; + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) { + dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err); + goto cleanup; + } + + /* Create DMA pool for tx ring */ + pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE, + RING_ALIGN, 0); + if (!pdcs->ring_pool) { + err = -ENOMEM; + goto cleanup; + } + + err = pdc_dt_read(pdev, pdcs); + if (err) + goto cleanup_ring_pool; + + pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pdc_regs) { + err = -ENODEV; + goto cleanup_ring_pool; + } + dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", + &pdc_regs->start, &pdc_regs->end); + + pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs); + if (IS_ERR(pdcs->pdc_reg_vbase)) { + err = PTR_ERR(pdcs->pdc_reg_vbase); + dev_err(&pdev->dev, "Failed to map registers: %d\n", err); + goto cleanup_ring_pool; + } + + /* create rx buffer pool after dt read to know how big buffers are */ + err = pdc_rx_buf_pool_create(pdcs); + if (err) + goto cleanup_ring_pool; + + pdc_hw_init(pdcs); + + err = pdc_interrupts_init(pdcs); + if (err) + goto cleanup_buf_pool; + + /* Initialize mailbox controller */ + err = pdc_mb_init(pdcs); + if (err) + goto cleanup_buf_pool; + + pdcs->debugfs_stats = NULL; + pdc_setup_debugfs(pdcs); + + dev_dbg(dev, "pdc_probe() successful"); + return PDC_SUCCESS; + +cleanup_buf_pool: + dma_pool_destroy(pdcs->rx_buf_pool); + +cleanup_ring_pool: + dma_pool_destroy(pdcs->ring_pool); + +cleanup: + return err; +} + +static int pdc_remove(struct platform_device *pdev) +{ + struct pdc_state *pdcs = platform_get_drvdata(pdev); + + pdc_free_debugfs(); + + mbox_controller_unregister(&pdcs->mbc); + + dma_pool_destroy(pdcs->rx_buf_pool); + dma_pool_destroy(pdcs->ring_pool); + return 0; +} + +static const struct of_device_id pdc_mbox_of_match[] = { + {.compatible = "brcm,iproc-pdc-mbox"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pdc_mbox_of_match); + +static struct platform_driver pdc_mbox_driver = { + .probe = pdc_probe, + .remove = pdc_remove, + .driver = { + .name = "brcm-iproc-pdc-mbox", + .of_match_table = of_match_ptr(pdc_mbox_of_match), + }, +}; +module_platform_driver(pdc_mbox_driver); + +MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom PDC mailbox driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 58d04726cdd7..9ca96e9db6bf 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -133,6 +133,7 @@ static ssize_t mbox_test_message_write(struct file *filp, out: kfree(tdev->signal); kfree(tdev->message); + tdev->signal = NULL; return ret < 0 ? ret : count; } diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index f80acb36ff07..2dbed87094d7 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c @@ -58,29 +58,29 @@ static ATOMIC_NOTIFIER_HEAD(ipc_notifier); static inline void set_destination(int source, int mbox) { - __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox)); - __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox)); + writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox)); + writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox)); } static inline void clear_destination(int source, int mbox) { - __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox)); - __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox)); + writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox)); + writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox)); } static void __ipc_send(int mbox, u32 *data) { int i; for (i = 0; i < 7; i++) - __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i)); - __raw_writel(0x1, ipc_base + IPCMxSEND(mbox)); + writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i)); + writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox)); } static u32 __ipc_rcv(int mbox, u32 *data) { int i; for (i = 0; i < 7; i++) - data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i)); + data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i)); return data[1]; } @@ -112,15 +112,15 @@ static irqreturn_t ipc_handler(int irq, void *dev) u32 irq_stat; u32 data[7]; - irq_stat = __raw_readl(ipc_base + IPCMMIS(1)); + irq_stat = readl_relaxed(ipc_base + IPCMMIS(1)); if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) { - __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); + writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); complete(&ipc_completion); } if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) { __ipc_rcv(IPC_RX_MBOX, data); atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1); - __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX)); + writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX)); } return IRQ_HANDLED; @@ -146,7 +146,7 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id) if (ipc_base == NULL) return -ENOMEM; - __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); + writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); ipc_irq = adev->irq[0]; ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL); @@ -154,20 +154,20 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id) goto err; /* Init slow mailbox */ - __raw_writel(CHAN_MASK(A9_SOURCE), - ipc_base + IPCMxSOURCE(IPC_TX_MBOX)); - __raw_writel(CHAN_MASK(M3_SOURCE), - ipc_base + IPCMxDSET(IPC_TX_MBOX)); - __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), - ipc_base + IPCMxMSET(IPC_TX_MBOX)); + writel_relaxed(CHAN_MASK(A9_SOURCE), + ipc_base + IPCMxSOURCE(IPC_TX_MBOX)); + writel_relaxed(CHAN_MASK(M3_SOURCE), + ipc_base + IPCMxDSET(IPC_TX_MBOX)); + writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), + ipc_base + IPCMxMSET(IPC_TX_MBOX)); /* Init receive mailbox */ - __raw_writel(CHAN_MASK(M3_SOURCE), - ipc_base + IPCMxSOURCE(IPC_RX_MBOX)); - __raw_writel(CHAN_MASK(A9_SOURCE), - ipc_base + IPCMxDSET(IPC_RX_MBOX)); - __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), - ipc_base + IPCMxMSET(IPC_RX_MBOX)); + writel_relaxed(CHAN_MASK(M3_SOURCE), + ipc_base + IPCMxSOURCE(IPC_RX_MBOX)); + writel_relaxed(CHAN_MASK(A9_SOURCE), + ipc_base + IPCMxDSET(IPC_RX_MBOX)); + writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), + ipc_base + IPCMxMSET(IPC_RX_MBOX)); return 0; err: diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 6d35dd4e9efb..4788b0b989a9 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -142,7 +142,7 @@ static int linear_iterate_devices(struct dm_target *ti, } static long linear_direct_access(struct dm_target *ti, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct linear_c *lc = ti->private; struct block_device *bdev = lc->dev->bdev; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 731e1f5bd895..ce2a910709f7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2303,7 +2303,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) } static long origin_direct_access(struct dm_target *ti, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { DMWARN("device does not support dax."); return -EIO; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 01bb9cf2a8c2..83f1d4667195 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -309,7 +309,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) } static long stripe_direct_access(struct dm_target *ti, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct stripe_c *sc = ti->private; uint32_t stripe; diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 6eecd6b36f76..710ae28fd618 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -149,7 +149,7 @@ static void io_err_release_clone_rq(struct request *clone) } static long io_err_direct_access(struct dm_target *ti, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { return -EIO; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ceb69fc0b10b..25d1d97154a8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -906,7 +906,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); static long dm_blk_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_table *map; diff --git a/drivers/md/md.c b/drivers/md/md.c index 1f123f5a29da..2c3ab6f5e6be 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2482,8 +2482,7 @@ static int add_bound_rdev(struct md_rdev *rdev) if (add_journal) mddev_resume(mddev); if (err) { - unbind_rdev_from_array(rdev); - export_rdev(rdev); + md_kick_rdev_from_array(rdev); return err; } } @@ -2600,6 +2599,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) else err = -EBUSY; } else if (cmd_match(buf, "remove")) { + if (rdev->mddev->pers) { + clear_bit(Blocked, &rdev->flags); + remove_and_add_spares(rdev->mddev, rdev); + } if (rdev->raid_disk >= 0) err = -EBUSY; else { @@ -3176,8 +3179,7 @@ int md_rdev_init(struct md_rdev *rdev) rdev->data_offset = 0; rdev->new_data_offset = 0; rdev->sb_events = 0; - rdev->last_read_error.tv_sec = 0; - rdev->last_read_error.tv_nsec = 0; + rdev->last_read_error = 0; rdev->sb_loaded = 0; rdev->bb_page = NULL; atomic_set(&rdev->nr_pending, 0); @@ -3583,6 +3585,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->to_remove = &md_redundancy_group; } + module_put(oldpers->owner); + rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0) continue; @@ -3940,6 +3944,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) } else err = -EBUSY; } + if (!err) + sysfs_notify_dirent_safe(mddev->sysfs_state); spin_unlock(&mddev->lock); return err ?: len; } @@ -4191,7 +4197,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len) return err; if (mddev->pers) { err = update_size(mddev, sectors); - md_update_sb(mddev, 1); + if (err == 0) + md_update_sb(mddev, 1); } else { if (mddev->dev_sectors == 0 || mddev->dev_sectors > sectors) @@ -7813,6 +7820,7 @@ void md_do_sync(struct md_thread *thread) if (ret) goto skip; + set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) @@ -8151,18 +8159,11 @@ void md_do_sync(struct md_thread *thread) } } skip: - if (mddev_is_clustered(mddev) && - ret == 0) { - /* set CHANGE_PENDING here since maybe another - * update is needed, so other nodes are informed */ - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS)); - md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); - md_cluster_ops->resync_finish(mddev); - } else - set_bit(MD_CHANGE_DEVS, &mddev->flags); + /* set CHANGE_PENDING here since maybe another update is needed, + * so other nodes are informed. It should be harmless for normal + * raid */ + set_mask_bits(&mddev->flags, 0, + BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS)); spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -8188,15 +8189,34 @@ static int remove_and_add_spares(struct mddev *mddev, struct md_rdev *rdev; int spares = 0; int removed = 0; + bool remove_some = false; - rdev_for_each(rdev, mddev) + rdev_for_each(rdev, mddev) { + if ((this == NULL || rdev == this) && + rdev->raid_disk >= 0 && + !test_bit(Blocked, &rdev->flags) && + test_bit(Faulty, &rdev->flags) && + atomic_read(&rdev->nr_pending)==0) { + /* Faulty non-Blocked devices with nr_pending == 0 + * never get nr_pending incremented, + * never get Faulty cleared, and never get Blocked set. + * So we can synchronize_rcu now rather than once per device + */ + remove_some = true; + set_bit(RemoveSynchronized, &rdev->flags); + } + } + + if (remove_some) + synchronize_rcu(); + rdev_for_each(rdev, mddev) { if ((this == NULL || rdev == this) && rdev->raid_disk >= 0 && !test_bit(Blocked, &rdev->flags) && - (test_bit(Faulty, &rdev->flags) || + ((test_bit(RemoveSynchronized, &rdev->flags) || (!test_bit(In_sync, &rdev->flags) && !test_bit(Journal, &rdev->flags))) && - atomic_read(&rdev->nr_pending)==0) { + atomic_read(&rdev->nr_pending)==0)) { if (mddev->pers->hot_remove_disk( mddev, rdev) == 0) { sysfs_unlink_rdev(mddev, rdev); @@ -8204,6 +8224,10 @@ static int remove_and_add_spares(struct mddev *mddev, removed++; } } + if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) + clear_bit(RemoveSynchronized, &rdev->flags); + } + if (removed && mddev->kobj.sd) sysfs_notify(&mddev->kobj, NULL, "degraded"); @@ -8506,6 +8530,11 @@ void md_reap_sync_thread(struct mddev *mddev) rdev->saved_raid_disk = -1; md_update_sb(mddev, 1); + /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can + * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by + * clustered raid */ + if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) + md_cluster_ops->resync_finish(mddev); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); @@ -8803,6 +8832,7 @@ EXPORT_SYMBOL(md_reload_sb); * at boot time. */ +static DEFINE_MUTEX(detected_devices_mutex); static LIST_HEAD(all_detected_devices); struct detected_devices_node { struct list_head list; @@ -8816,7 +8846,9 @@ void md_autodetect_dev(dev_t dev) node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); if (node_detected_dev) { node_detected_dev->dev = dev; + mutex_lock(&detected_devices_mutex); list_add_tail(&node_detected_dev->list, &all_detected_devices); + mutex_unlock(&detected_devices_mutex); } else { printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); @@ -8835,6 +8867,7 @@ static void autostart_arrays(int part) printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); + mutex_lock(&detected_devices_mutex); while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { i_scanned++; node_detected_dev = list_entry(all_detected_devices.next, @@ -8853,6 +8886,7 @@ static void autostart_arrays(int part) list_add(&rdev->same_set, &pending_raid_disks); i_passed++; } + mutex_unlock(&detected_devices_mutex); printk(KERN_INFO "md: Scanned %d and added %d devices.\n", i_scanned, i_passed); diff --git a/drivers/md/md.h b/drivers/md/md.h index b4f335245bd6..20c667579ede 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -99,7 +99,7 @@ struct md_rdev { atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ - struct timespec last_read_error; /* monotonic time since our + time64_t last_read_error; /* monotonic time since our * last read error */ atomic_t corrected_errors; /* number of corrected read errors, @@ -163,6 +163,11 @@ enum flag_bits { * than other devices in the array */ ClusterRemove, + RemoveSynchronized, /* synchronize_rcu() was called after + * this device was known to be faulty, + * so it is safe to remove without + * another synchronize_rcu() call. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, @@ -204,6 +209,9 @@ struct mddev { #define MD_RELOAD_SB 7 /* Reload the superblock because another node * updated it. */ +#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node + * already took resync lock, need to + * release the lock */ int suspended; atomic_t active_io; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 72ea98e89e57..4974682842ae 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -43,7 +43,8 @@ static int multipath_map (struct mpconf *conf) rcu_read_lock(); for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && test_bit(In_sync, &rdev->flags)) { + if (rdev && test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return i; @@ -141,17 +142,19 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) return; } -static void multipath_status (struct seq_file *seq, struct mddev *mddev) +static void multipath_status(struct seq_file *seq, struct mddev *mddev) { struct mpconf *conf = mddev->private; int i; seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); - for (i = 0; i < conf->raid_disks; i++) - seq_printf (seq, "%s", - conf->multipaths[i].rdev && - test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); + rcu_read_lock(); + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); + seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); + } + rcu_read_unlock(); seq_printf (seq, "]"); } @@ -295,12 +298,14 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) goto abort; } p->rdev = NULL; - synchronize_rcu(); - if (atomic_read(&rdev->nr_pending)) { - /* lost the race, try later */ - err = -EBUSY; - p->rdev = rdev; - goto abort; + if (!test_bit(RemoveSynchronized, &rdev->flags)) { + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + goto abort; + } } err = md_integrity_register(mddev); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4e6da4497553..46168ef2e279 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -319,14 +319,13 @@ static void raid1_end_read_request(struct bio *bio) { int uptodate = !bio->bi_error; struct r1bio *r1_bio = bio->bi_private; - int mirror; struct r1conf *conf = r1_bio->mddev->private; + struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; - mirror = r1_bio->read_disk; /* * this branch is our 'one mirror IO has finished' event handler: */ - update_head_pos(mirror, r1_bio); + update_head_pos(r1_bio->read_disk, r1_bio); if (uptodate) set_bit(R1BIO_Uptodate, &r1_bio->state); @@ -339,14 +338,14 @@ static void raid1_end_read_request(struct bio *bio) spin_lock_irqsave(&conf->device_lock, flags); if (r1_bio->mddev->degraded == conf->raid_disks || (r1_bio->mddev->degraded == conf->raid_disks-1 && - test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) + test_bit(In_sync, &rdev->flags))) uptodate = 1; spin_unlock_irqrestore(&conf->device_lock, flags); } if (uptodate) { raid_end_bio_io(r1_bio); - rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); } else { /* * oops, read error: @@ -356,7 +355,7 @@ static void raid1_end_read_request(struct bio *bio) KERN_ERR "md/raid1:%s: %s: " "rescheduling sector %llu\n", mdname(conf->mddev), - bdevname(conf->mirrors[mirror].rdev->bdev, + bdevname(rdev->bdev, b), (unsigned long long)r1_bio->sector); set_bit(R1BIO_ReadError, &r1_bio->state); @@ -403,20 +402,18 @@ static void r1_bio_write_done(struct r1bio *r1_bio) static void raid1_end_write_request(struct bio *bio) { struct r1bio *r1_bio = bio->bi_private; - int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); + int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); struct r1conf *conf = r1_bio->mddev->private; struct bio *to_put = NULL; - - mirror = find_bio_disk(r1_bio, bio); + int mirror = find_bio_disk(r1_bio, bio); + struct md_rdev *rdev = conf->mirrors[mirror].rdev; /* * 'one mirror IO has finished' event handler: */ if (bio->bi_error) { - set_bit(WriteErrorSeen, - &conf->mirrors[mirror].rdev->flags); - if (!test_and_set_bit(WantReplacement, - &conf->mirrors[mirror].rdev->flags)) + set_bit(WriteErrorSeen, &rdev->flags); + if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & conf->mddev->recovery); @@ -445,13 +442,12 @@ static void raid1_end_write_request(struct bio *bio) * before rdev->recovery_offset, but for simplicity we don't * check this here. */ - if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) + if (test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); /* Maybe we can clear some bad blocks. */ - if (is_badblock(conf->mirrors[mirror].rdev, - r1_bio->sector, r1_bio->sectors, + if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, &first_bad, &bad_sectors)) { r1_bio->bios[mirror] = IO_MADE_GOOD; set_bit(R1BIO_MadeGood, &r1_bio->state); @@ -459,7 +455,7 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) + if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); /* @@ -483,8 +479,7 @@ static void raid1_end_write_request(struct bio *bio) } } if (r1_bio->bios[mirror] == NULL) - rdev_dec_pending(conf->mirrors[mirror].rdev, - conf->mddev); + rdev_dec_pending(rdev, conf->mddev); /* * Let's see if all mirrored write operations have finished @@ -689,13 +684,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if (!rdev) goto retry; atomic_inc(&rdev->nr_pending); - if (test_bit(Faulty, &rdev->flags)) { - /* cannot risk returning a device that failed - * before we inc'ed nr_pending - */ - rdev_dec_pending(rdev, conf->mddev); - goto retry; - } sectors = best_good_sectors; if (conf->mirrors[best_disk].next_seq_sect != this_sector) @@ -1666,13 +1654,16 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) goto abort; } p->rdev = NULL; - synchronize_rcu(); - if (atomic_read(&rdev->nr_pending)) { - /* lost the race, try later */ - err = -EBUSY; - p->rdev = rdev; - goto abort; - } else if (conf->mirrors[conf->raid_disks + number].rdev) { + if (!test_bit(RemoveSynchronized, &rdev->flags)) { + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + p->rdev = rdev; + goto abort; + } + } + if (conf->mirrors[conf->raid_disks + number].rdev) { /* We just removed a device that is being replaced. * Move down the replacement. We drain all IO before * doing this to avoid confusion. @@ -1719,11 +1710,9 @@ static void end_sync_write(struct bio *bio) struct r1bio *r1_bio = bio->bi_private; struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; - int mirror=0; sector_t first_bad; int bad_sectors; - - mirror = find_bio_disk(r1_bio, bio); + struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { sector_t sync_blocks = 0; @@ -1736,16 +1725,12 @@ static void end_sync_write(struct bio *bio) s += sync_blocks; sectors_to_go -= sync_blocks; } while (sectors_to_go > 0); - set_bit(WriteErrorSeen, - &conf->mirrors[mirror].rdev->flags); - if (!test_and_set_bit(WantReplacement, - &conf->mirrors[mirror].rdev->flags)) + set_bit(WriteErrorSeen, &rdev->flags); + if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & mddev->recovery); set_bit(R1BIO_WriteError, &r1_bio->state); - } else if (is_badblock(conf->mirrors[mirror].rdev, - r1_bio->sector, - r1_bio->sectors, + } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, &first_bad, &bad_sectors) && !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, r1_bio->sector, @@ -2072,29 +2057,30 @@ static void fix_read_error(struct r1conf *conf, int read_disk, s = PAGE_SIZE >> 9; do { - /* Note: no rcu protection needed here - * as this is synchronous in the raid1d thread - * which is the thread that might remove - * a device. If raid1d ever becomes multi-threaded.... - */ sector_t first_bad; int bad_sectors; - rdev = conf->mirrors[d].rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && (test_bit(In_sync, &rdev->flags) || (!test_bit(Faulty, &rdev->flags) && rdev->recovery_offset >= sect + s)) && is_badblock(rdev, sect, s, - &first_bad, &bad_sectors) == 0 && - sync_page_io(rdev, sect, s<<9, + &first_bad, &bad_sectors) == 0) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + if (sync_page_io(rdev, sect, s<<9, conf->tmppage, REQ_OP_READ, 0, false)) - success = 1; - else { - d++; - if (d == conf->raid_disks * 2) - d = 0; - } + success = 1; + rdev_dec_pending(rdev, mddev); + if (success) + break; + } else + rcu_read_unlock(); + d++; + if (d == conf->raid_disks * 2) + d = 0; } while (!success && d != read_disk); if (!success) { @@ -2110,11 +2096,17 @@ static void fix_read_error(struct r1conf *conf, int read_disk, if (d==0) d = conf->raid_disks * 2; d--; - rdev = conf->mirrors[d].rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && - !test_bit(Faulty, &rdev->flags)) + !test_bit(Faulty, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); r1_sync_page_io(rdev, sect, s, conf->tmppage, WRITE); + rdev_dec_pending(rdev, mddev); + } else + rcu_read_unlock(); } d = start; while (d != read_disk) { @@ -2122,9 +2114,12 @@ static void fix_read_error(struct r1conf *conf, int read_disk, if (d==0) d = conf->raid_disks * 2; d--; - rdev = conf->mirrors[d].rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); @@ -2133,10 +2128,12 @@ static void fix_read_error(struct r1conf *conf, int read_disk, "(%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)(sect + - rdev->data_offset), + rdev->data_offset), bdevname(rdev->bdev, b)); } - } + rdev_dec_pending(rdev, mddev); + } else + rcu_read_unlock(); } sectors -= s; sect += s; @@ -2534,6 +2531,13 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, return sync_blocks; } + /* + * If there is non-resync activity waiting for a turn, then let it + * though before starting on this new sync request. + */ + if (conf->nr_waiting) + schedule_timeout_uninterruptible(1); + /* we are incrementing sector_nr below. To be safe, we check against * sector_nr + two times RESYNC_SECTORS */ diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 26ae74fd0d01..ed29fc899f06 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -707,7 +707,6 @@ static struct md_rdev *read_balance(struct r10conf *conf, raid10_find_phys(conf, r10_bio); rcu_read_lock(); -retry: sectors = r10_bio->sectors; best_slot = -1; best_rdev = NULL; @@ -804,13 +803,6 @@ retry: if (slot >= 0) { atomic_inc(&rdev->nr_pending); - if (test_bit(Faulty, &rdev->flags)) { - /* Cannot risk returning a device that failed - * before we inc'ed nr_pending - */ - rdev_dec_pending(rdev, conf->mddev); - goto retry; - } r10_bio->read_slot = slot; } else rdev = NULL; @@ -913,7 +905,7 @@ static void raise_barrier(struct r10conf *conf, int force) /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, - !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, conf->resync_lock); spin_unlock_irq(&conf->resync_lock); @@ -944,23 +936,23 @@ static void wait_barrier(struct r10conf *conf) */ wait_event_lock_irq(conf->wait_barrier, !conf->barrier || - (conf->nr_pending && + (atomic_read(&conf->nr_pending) && current->bio_list && !bio_list_empty(current->bio_list)), conf->resync_lock); conf->nr_waiting--; + if (!conf->nr_waiting) + wake_up(&conf->wait_barrier); } - conf->nr_pending++; + atomic_inc(&conf->nr_pending); spin_unlock_irq(&conf->resync_lock); } static void allow_barrier(struct r10conf *conf) { - unsigned long flags; - spin_lock_irqsave(&conf->resync_lock, flags); - conf->nr_pending--; - spin_unlock_irqrestore(&conf->resync_lock, flags); - wake_up(&conf->wait_barrier); + if ((atomic_dec_and_test(&conf->nr_pending)) || + (conf->array_freeze_pending)) + wake_up(&conf->wait_barrier); } static void freeze_array(struct r10conf *conf, int extra) @@ -978,13 +970,15 @@ static void freeze_array(struct r10conf *conf, int extra) * we continue. */ spin_lock_irq(&conf->resync_lock); + conf->array_freeze_pending++; conf->barrier++; conf->nr_waiting++; wait_event_lock_irq_cmd(conf->wait_barrier, - conf->nr_pending == conf->nr_queued+extra, + atomic_read(&conf->nr_pending) == conf->nr_queued+extra, conf->resync_lock, flush_pending_writes(conf)); + conf->array_freeze_pending--; spin_unlock_irq(&conf->resync_lock); } @@ -1499,10 +1493,12 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev) } seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, conf->geo.raid_disks - mddev->degraded); - for (i = 0; i < conf->geo.raid_disks; i++) - seq_printf(seq, "%s", - conf->mirrors[i].rdev && - test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); + rcu_read_lock(); + for (i = 0; i < conf->geo.raid_disks; i++) { + struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); + } + rcu_read_unlock(); seq_printf(seq, "]"); } @@ -1600,7 +1596,7 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) static void print_conf(struct r10conf *conf) { int i; - struct raid10_info *tmp; + struct md_rdev *rdev; printk(KERN_DEBUG "RAID10 conf printout:\n"); if (!conf) { @@ -1610,14 +1606,16 @@ static void print_conf(struct r10conf *conf) printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, conf->geo.raid_disks); + /* This is only called with ->reconfix_mutex held, so + * rcu protection of rdev is not needed */ for (i = 0; i < conf->geo.raid_disks; i++) { char b[BDEVNAME_SIZE]; - tmp = conf->mirrors + i; - if (tmp->rdev) + rdev = conf->mirrors[i].rdev; + if (rdev) printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", - i, !test_bit(In_sync, &tmp->rdev->flags), - !test_bit(Faulty, &tmp->rdev->flags), - bdevname(tmp->rdev->bdev,b)); + i, !test_bit(In_sync, &rdev->flags), + !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev,b)); } } @@ -1766,7 +1764,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) err = -EBUSY; goto abort; } - /* Only remove faulty devices if recovery + /* Only remove non-faulty devices if recovery * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && @@ -1778,13 +1776,16 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) goto abort; } *rdevp = NULL; - synchronize_rcu(); - if (atomic_read(&rdev->nr_pending)) { - /* lost the race, try later */ - err = -EBUSY; - *rdevp = rdev; - goto abort; - } else if (p->replacement) { + if (!test_bit(RemoveSynchronized, &rdev->flags)) { + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + *rdevp = rdev; + goto abort; + } + } + if (p->replacement) { /* We must have just cleared 'rdev' */ p->rdev = p->replacement; clear_bit(Replacement, &p->replacement->flags); @@ -2171,21 +2172,20 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) */ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) { - struct timespec cur_time_mon; + long cur_time_mon; unsigned long hours_since_last; unsigned int read_errors = atomic_read(&rdev->read_errors); - ktime_get_ts(&cur_time_mon); + cur_time_mon = ktime_get_seconds(); - if (rdev->last_read_error.tv_sec == 0 && - rdev->last_read_error.tv_nsec == 0) { + if (rdev->last_read_error == 0) { /* first time we've seen a read error */ rdev->last_read_error = cur_time_mon; return; } - hours_since_last = (cur_time_mon.tv_sec - - rdev->last_read_error.tv_sec) / 3600; + hours_since_last = (long)(cur_time_mon - + rdev->last_read_error) / 3600; rdev->last_read_error = cur_time_mon; @@ -2264,7 +2264,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 printk(KERN_NOTICE "md/raid10:%s: %s: Failing raid device\n", mdname(mddev), b); - md_error(mddev, conf->mirrors[d].rdev); + md_error(mddev, rdev); r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; return; } @@ -2287,6 +2287,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, &first_bad, &bad_sectors) == 0) { atomic_inc(&rdev->nr_pending); @@ -2340,6 +2341,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); if (!rdev || + test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) continue; @@ -2379,6 +2381,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 d = r10_bio->devs[sl].devnum; rdev = rcu_dereference(conf->mirrors[d].rdev); if (!rdev || + test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) continue; @@ -2876,11 +2879,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* Completed a full sync so the replacements * are now fully recovered. */ - for (i = 0; i < conf->geo.raid_disks; i++) - if (conf->mirrors[i].replacement) - conf->mirrors[i].replacement - ->recovery_offset - = MaxSector; + rcu_read_lock(); + for (i = 0; i < conf->geo.raid_disks; i++) { + struct md_rdev *rdev = + rcu_dereference(conf->mirrors[i].replacement); + if (rdev) + rdev->recovery_offset = MaxSector; + } + rcu_read_unlock(); } conf->fullsync = 0; } @@ -2911,6 +2917,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, max_sector > (sector_nr | chunk_mask)) max_sector = (sector_nr | chunk_mask) + 1; + /* + * If there is non-resync activity waiting for a turn, then let it + * though before starting on this new sync request. + */ + if (conf->nr_waiting) + schedule_timeout_uninterruptible(1); + /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that * have bi_end_io, bi_sector, bi_bdev set, @@ -2939,14 +2952,20 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, int must_sync; int any_working; struct raid10_info *mirror = &conf->mirrors[i]; + struct md_rdev *mrdev, *mreplace; - if ((mirror->rdev == NULL || - test_bit(In_sync, &mirror->rdev->flags)) - && - (mirror->replacement == NULL || - test_bit(Faulty, - &mirror->replacement->flags))) + rcu_read_lock(); + mrdev = rcu_dereference(mirror->rdev); + mreplace = rcu_dereference(mirror->replacement); + + if ((mrdev == NULL || + test_bit(Faulty, &mrdev->flags) || + test_bit(In_sync, &mrdev->flags)) && + (mreplace == NULL || + test_bit(Faulty, &mreplace->flags))) { + rcu_read_unlock(); continue; + } still_degraded = 0; /* want to reconstruct this device */ @@ -2956,8 +2975,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* last stripe is not complete - don't * try to recover this sector. */ + rcu_read_unlock(); continue; } + if (mreplace && test_bit(Faulty, &mreplace->flags)) + mreplace = NULL; /* Unless we are doing a full sync, or a replacement * we only need to recover the block if it is set in * the bitmap @@ -2967,14 +2989,19 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (sync_blocks < max_sync) max_sync = sync_blocks; if (!must_sync && - mirror->replacement == NULL && + mreplace == NULL && !conf->fullsync) { /* yep, skip the sync_blocks here, but don't assume * that there will never be anything to do here */ chunks_skipped = -1; + rcu_read_unlock(); continue; } + atomic_inc(&mrdev->nr_pending); + if (mreplace) + atomic_inc(&mreplace->nr_pending); + rcu_read_unlock(); r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio->state = 0; @@ -2993,12 +3020,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* Need to check if the array will still be * degraded */ - for (j = 0; j < conf->geo.raid_disks; j++) - if (conf->mirrors[j].rdev == NULL || - test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { + rcu_read_lock(); + for (j = 0; j < conf->geo.raid_disks; j++) { + struct md_rdev *rdev = rcu_dereference( + conf->mirrors[j].rdev); + if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { still_degraded = 1; break; } + } must_sync = bitmap_start_sync(mddev->bitmap, sect, &sync_blocks, still_degraded); @@ -3008,15 +3038,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, int k; int d = r10_bio->devs[j].devnum; sector_t from_addr, to_addr; - struct md_rdev *rdev; + struct md_rdev *rdev = + rcu_dereference(conf->mirrors[d].rdev); sector_t sector, first_bad; int bad_sectors; - if (!conf->mirrors[d].rdev || - !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) + if (!rdev || + !test_bit(In_sync, &rdev->flags)) continue; /* This is where we read from */ any_working = 1; - rdev = conf->mirrors[d].rdev; sector = r10_bio->devs[j].addr; if (is_badblock(rdev, sector, max_sync, @@ -3055,8 +3085,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->devs[1].devnum = i; r10_bio->devs[1].addr = to_addr; - rdev = mirror->rdev; - if (!test_bit(In_sync, &rdev->flags)) { + if (!test_bit(In_sync, &mrdev->flags)) { bio = r10_bio->devs[1].bio; bio_reset(bio); bio->bi_next = biolist; @@ -3065,8 +3094,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_end_io = end_sync_write; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr - + rdev->data_offset; - bio->bi_bdev = rdev->bdev; + + mrdev->data_offset; + bio->bi_bdev = mrdev->bdev; atomic_inc(&r10_bio->remaining); } else r10_bio->devs[1].bio->bi_end_io = NULL; @@ -3075,8 +3104,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r10_bio->devs[1].repl_bio; if (bio) bio->bi_end_io = NULL; - rdev = mirror->replacement; - /* Note: if rdev != NULL, then bio + /* Note: if mreplace != NULL, then bio * cannot be NULL as r10buf_pool_alloc will * have allocated it. * So the second test here is pointless. @@ -3084,8 +3112,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, * this comment keeps human reviewers * happy. */ - if (rdev == NULL || bio == NULL || - test_bit(Faulty, &rdev->flags)) + if (mreplace == NULL || bio == NULL || + test_bit(Faulty, &mreplace->flags)) break; bio_reset(bio); bio->bi_next = biolist; @@ -3094,11 +3122,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_end_io = end_sync_write; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_iter.bi_sector = to_addr + - rdev->data_offset; - bio->bi_bdev = rdev->bdev; + mreplace->data_offset; + bio->bi_bdev = mreplace->bdev; atomic_inc(&r10_bio->remaining); break; } + rcu_read_unlock(); if (j == conf->copies) { /* Cannot recover, so abort the recovery or * record a bad block */ @@ -3111,15 +3140,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (r10_bio->devs[k].devnum == i) break; if (!test_bit(In_sync, - &mirror->rdev->flags) + &mrdev->flags) && !rdev_set_badblocks( - mirror->rdev, + mrdev, r10_bio->devs[k].addr, max_sync, 0)) any_working = 0; - if (mirror->replacement && + if (mreplace && !rdev_set_badblocks( - mirror->replacement, + mreplace, r10_bio->devs[k].addr, max_sync, 0)) any_working = 0; @@ -3137,8 +3166,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (rb2) atomic_dec(&rb2->remaining); r10_bio = rb2; + rdev_dec_pending(mrdev, mddev); + if (mreplace) + rdev_dec_pending(mreplace, mddev); break; } + rdev_dec_pending(mrdev, mddev); + if (mreplace) + rdev_dec_pending(mreplace, mddev); } if (biolist == NULL) { while (r10_bio) { @@ -3183,6 +3218,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, int d = r10_bio->devs[i].devnum; sector_t first_bad, sector; int bad_sectors; + struct md_rdev *rdev; if (r10_bio->devs[i].repl_bio) r10_bio->devs[i].repl_bio->bi_end_io = NULL; @@ -3190,12 +3226,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r10_bio->devs[i].bio; bio_reset(bio); bio->bi_error = -EIO; - if (conf->mirrors[d].rdev == NULL || - test_bit(Faulty, &conf->mirrors[d].rdev->flags)) + rcu_read_lock(); + rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { + rcu_read_unlock(); continue; + } sector = r10_bio->devs[i].addr; - if (is_badblock(conf->mirrors[d].rdev, - sector, max_sync, + if (is_badblock(rdev, sector, max_sync, &first_bad, &bad_sectors)) { if (first_bad > sector) max_sync = first_bad - sector; @@ -3203,25 +3241,28 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bad_sectors -= (sector - first_bad); if (max_sync > bad_sectors) max_sync = bad_sectors; + rcu_read_unlock(); continue; } } - atomic_inc(&conf->mirrors[d].rdev->nr_pending); + atomic_inc(&rdev->nr_pending); atomic_inc(&r10_bio->remaining); bio->bi_next = biolist; biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio_set_op_attrs(bio, REQ_OP_READ, 0); - bio->bi_iter.bi_sector = sector + - conf->mirrors[d].rdev->data_offset; - bio->bi_bdev = conf->mirrors[d].rdev->bdev; + bio->bi_iter.bi_sector = sector + rdev->data_offset; + bio->bi_bdev = rdev->bdev; count++; - if (conf->mirrors[d].replacement == NULL || - test_bit(Faulty, - &conf->mirrors[d].replacement->flags)) + rdev = rcu_dereference(conf->mirrors[d].replacement); + if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { + rcu_read_unlock(); continue; + } + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); /* Need to set up for writing to the replacement */ bio = r10_bio->devs[i].repl_bio; @@ -3229,15 +3270,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_error = -EIO; sector = r10_bio->devs[i].addr; - atomic_inc(&conf->mirrors[d].rdev->nr_pending); bio->bi_next = biolist; biolist = bio; bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - bio->bi_iter.bi_sector = sector + - conf->mirrors[d].replacement->data_offset; - bio->bi_bdev = conf->mirrors[d].replacement->bdev; + bio->bi_iter.bi_sector = sector + rdev->data_offset; + bio->bi_bdev = rdev->bdev; count++; } @@ -3504,6 +3543,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) spin_lock_init(&conf->resync_lock); init_waitqueue_head(&conf->wait_barrier); + atomic_set(&conf->nr_pending, 0); conf->thread = md_register_thread(raid10d, mddev, "raid10"); if (!conf->thread) @@ -4333,15 +4373,16 @@ read_more: blist = read_bio; read_bio->bi_next = NULL; + rcu_read_lock(); for (s = 0; s < conf->copies*2; s++) { struct bio *b; int d = r10_bio->devs[s/2].devnum; struct md_rdev *rdev2; if (s&1) { - rdev2 = conf->mirrors[d].replacement; + rdev2 = rcu_dereference(conf->mirrors[d].replacement); b = r10_bio->devs[s/2].repl_bio; } else { - rdev2 = conf->mirrors[d].rdev; + rdev2 = rcu_dereference(conf->mirrors[d].rdev); b = r10_bio->devs[s/2].bio; } if (!rdev2 || test_bit(Faulty, &rdev2->flags)) @@ -4386,6 +4427,7 @@ read_more: nr_sectors += len >> 9; } bio_full: + rcu_read_unlock(); r10_bio->sectors = nr_sectors; /* Now submit the read */ @@ -4437,16 +4479,20 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) struct bio *b; int d = r10_bio->devs[s/2].devnum; struct md_rdev *rdev; + rcu_read_lock(); if (s&1) { - rdev = conf->mirrors[d].replacement; + rdev = rcu_dereference(conf->mirrors[d].replacement); b = r10_bio->devs[s/2].repl_bio; } else { - rdev = conf->mirrors[d].rdev; + rdev = rcu_dereference(conf->mirrors[d].rdev); b = r10_bio->devs[s/2].bio; } - if (!rdev || test_bit(Faulty, &rdev->flags)) + if (!rdev || test_bit(Faulty, &rdev->flags)) { + rcu_read_unlock(); continue; + } atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); md_sync_acct(b->bi_bdev, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; @@ -4507,9 +4553,10 @@ static int handle_reshape_read_error(struct mddev *mddev, if (s > (PAGE_SIZE >> 9)) s = PAGE_SIZE >> 9; + rcu_read_lock(); while (!success) { int d = r10b->devs[slot].devnum; - struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); sector_t addr; if (rdev == NULL || test_bit(Faulty, &rdev->flags) || @@ -4517,11 +4564,15 @@ static int handle_reshape_read_error(struct mddev *mddev, goto failed; addr = r10b->devs[slot].addr + idx * PAGE_SIZE; + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); success = sync_page_io(rdev, addr, s << 9, bvec[idx].bv_page, REQ_OP_READ, 0, false); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); if (success) break; failed: @@ -4531,6 +4582,7 @@ static int handle_reshape_read_error(struct mddev *mddev, if (slot == first_slot) break; } + rcu_read_unlock(); if (!success) { /* couldn't read this block, must give up */ set_bit(MD_RECOVERY_INTR, @@ -4600,16 +4652,18 @@ static void raid10_finish_reshape(struct mddev *mddev) } } else { int d; + rcu_read_lock(); for (d = conf->geo.raid_disks ; d < conf->geo.raid_disks - mddev->delta_disks; d++) { - struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev) clear_bit(In_sync, &rdev->flags); - rdev = conf->mirrors[d].replacement; + rdev = rcu_dereference(conf->mirrors[d].replacement); if (rdev) clear_bit(In_sync, &rdev->flags); } + rcu_read_unlock(); } mddev->layout = mddev->new_layout; mddev->chunk_sectors = 1 << conf->geo.chunk_shift; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 6fc2c75759bf..18ec1f7a98bf 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -64,10 +64,11 @@ struct r10conf { int pending_count; spinlock_t resync_lock; - int nr_pending; + atomic_t nr_pending; int nr_waiting; int nr_queued; int barrier; + int array_freeze_pending; sector_t next_resync; int fullsync; /* set to 1 if a full sync is needed, * (fresh device added). diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6953d78297b0..d189e894b921 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3080,7 +3080,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct md_rdev *rdev; rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && test_bit(In_sync, &rdev->flags)) + if (rdev && test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags)) atomic_inc(&rdev->nr_pending); else rdev = NULL; @@ -3210,15 +3211,16 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, /* During recovery devices cannot be removed, so * locking and refcounting of rdevs is not needed */ + rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - struct md_rdev *rdev = conf->disks[i].rdev; + struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && !rdev_set_badblocks(rdev, sh->sector, STRIPE_SECTORS, 0)) abort = 1; - rdev = conf->disks[i].replacement; + rdev = rcu_dereference(conf->disks[i].replacement); if (rdev && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) @@ -3226,6 +3228,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, STRIPE_SECTORS, 0)) abort = 1; } + rcu_read_unlock(); if (abort) conf->recovery_disabled = conf->mddev->recovery_disabled; @@ -3237,15 +3240,16 @@ static int want_replace(struct stripe_head *sh, int disk_idx) { struct md_rdev *rdev; int rv = 0; - /* Doing recovery so rcu locking not required */ - rdev = sh->raid_conf->disks[disk_idx].replacement; + + rcu_read_lock(); + rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); if (rdev && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && (rdev->recovery_offset <= sh->sector || rdev->mddev->recovery_cp <= sh->sector)) rv = 1; - + rcu_read_unlock(); return rv; } @@ -3600,7 +3604,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, pr_debug("for sector %llu, rmw=%d rcw=%d\n", (unsigned long long)sh->sector, rmw, rcw); set_bit(STRIPE_HANDLE, &sh->state); - if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) { + if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { /* prefer read-modify-write, but need to get some data */ if (conf->mddev->queue) blk_add_trace_msg(conf->mddev->queue, @@ -3627,7 +3631,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, } } } - if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) { + if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { /* want reconstruct write, but need to get some data */ int qread =0; rcw = 0; @@ -7066,10 +7070,12 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, conf->chunk_sectors / 2, mddev->layout); seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); - for (i = 0; i < conf->raid_disks; i++) - seq_printf (seq, "%s", - conf->disks[i].rdev && - test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); + rcu_read_lock(); + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); + seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); + } + rcu_read_unlock(); seq_printf (seq, "]"); } @@ -7191,12 +7197,15 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) goto abort; } *rdevp = NULL; - synchronize_rcu(); - if (atomic_read(&rdev->nr_pending)) { - /* lost the race, try later */ - err = -EBUSY; - *rdevp = rdev; - } else if (p->replacement) { + if (!test_bit(RemoveSynchronized, &rdev->flags)) { + synchronize_rcu(); + if (atomic_read(&rdev->nr_pending)) { + /* lost the race, try later */ + err = -EBUSY; + *rdevp = rdev; + } + } + if (p->replacement) { /* We must have just cleared 'rdev' */ p->rdev = p->replacement; clear_bit(Replacement, &p->replacement->flags); diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 4afc999c0780..6b01e126fe73 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -45,7 +45,7 @@ #include <media/v4l2-ioctl.h> #include <video/omapvrfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 49de1475e473..80c79fabdf95 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -12,7 +12,7 @@ #define OMAP_VOUTDEF_H #include <media/v4l2-ctrls.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #define YUYV_BPP 2 diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c index 80b0d88f125c..58a25fdf0cce 100644 --- a/drivers/media/platform/omap/omap_voutlib.c +++ b/drivers/media/platform/omap/omap_voutlib.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index bd4d68500085..370e16e07867 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -336,7 +336,7 @@ config IR_TTUSBIR config IR_RX51 tristate "Nokia N900 IR transmitter diode" - depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM + depends on OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS && LIRC ---help--- Say Y or M here if you want to enable support for the IR transmitter diode built in the Nokia N900 (RX51) device. diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c index 4e1711a40466..82fb6f2ca011 100644 --- a/drivers/media/rc/ir-rx51.c +++ b/drivers/media/rc/ir-rx51.c @@ -12,22 +12,17 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ - +#include <linux/clk.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/wait.h> - -#include <plat/dmtimer.h> -#include <plat/clock.h> +#include <linux/pwm.h> +#include <linux/of.h> +#include <linux/hrtimer.h> #include <media/lirc.h> #include <media/lirc_dev.h> @@ -41,100 +36,51 @@ #define WBUF_LEN 256 -#define TIMER_MAX_VALUE 0xffffffff - struct lirc_rx51 { - struct omap_dm_timer *pwm_timer; - struct omap_dm_timer *pulse_timer; + struct pwm_device *pwm; + struct hrtimer timer; struct device *dev; struct lirc_rx51_platform_data *pdata; wait_queue_head_t wqueue; - unsigned long fclk_khz; unsigned int freq; /* carrier frequency */ unsigned int duty_cycle; /* carrier duty cycle */ - unsigned int irq_num; - unsigned int match; int wbuf[WBUF_LEN]; int wbuf_index; unsigned long device_is_open; - int pwm_timer_num; }; -static void lirc_rx51_on(struct lirc_rx51 *lirc_rx51) +static inline void lirc_rx51_on(struct lirc_rx51 *lirc_rx51) { - omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1, - OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE); + pwm_enable(lirc_rx51->pwm); } -static void lirc_rx51_off(struct lirc_rx51 *lirc_rx51) +static inline void lirc_rx51_off(struct lirc_rx51 *lirc_rx51) { - omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1, - OMAP_TIMER_TRIGGER_NONE); + pwm_disable(lirc_rx51->pwm); } static int init_timing_params(struct lirc_rx51 *lirc_rx51) { - u32 load, match; + struct pwm_device *pwm = lirc_rx51->pwm; + int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, lirc_rx51->freq); - load = -(lirc_rx51->fclk_khz * 1000 / lirc_rx51->freq); - match = -(lirc_rx51->duty_cycle * -load / 100); - omap_dm_timer_set_load(lirc_rx51->pwm_timer, 1, load); - omap_dm_timer_set_match(lirc_rx51->pwm_timer, 1, match); - omap_dm_timer_write_counter(lirc_rx51->pwm_timer, TIMER_MAX_VALUE - 2); - omap_dm_timer_start(lirc_rx51->pwm_timer); - omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); - omap_dm_timer_start(lirc_rx51->pulse_timer); + duty = DIV_ROUND_CLOSEST(lirc_rx51->duty_cycle * period, 100); - lirc_rx51->match = 0; + pwm_config(pwm, duty, period); return 0; } -#define tics_after(a, b) ((long)(b) - (long)(a) < 0) - -static int pulse_timer_set_timeout(struct lirc_rx51 *lirc_rx51, int usec) +static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer) { - int counter; - - BUG_ON(usec < 0); - - if (lirc_rx51->match == 0) - counter = omap_dm_timer_read_counter(lirc_rx51->pulse_timer); - else - counter = lirc_rx51->match; - - counter += (u32)(lirc_rx51->fclk_khz * usec / (1000)); - omap_dm_timer_set_match(lirc_rx51->pulse_timer, 1, counter); - omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, - OMAP_TIMER_INT_MATCH); - if (tics_after(omap_dm_timer_read_counter(lirc_rx51->pulse_timer), - counter)) { - return 1; - } - return 0; -} - -static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr) -{ - unsigned int retval; - struct lirc_rx51 *lirc_rx51 = ptr; - - retval = omap_dm_timer_read_status(lirc_rx51->pulse_timer); - if (!retval) - return IRQ_NONE; + struct lirc_rx51 *lirc_rx51 = + container_of(timer, struct lirc_rx51, timer); + ktime_t now; - if (retval & ~OMAP_TIMER_INT_MATCH) - dev_err_ratelimited(lirc_rx51->dev, - ": Unexpected interrupt source: %x\n", retval); - - omap_dm_timer_write_status(lirc_rx51->pulse_timer, - OMAP_TIMER_INT_MATCH | - OMAP_TIMER_INT_OVERFLOW | - OMAP_TIMER_INT_CAPTURE); if (lirc_rx51->wbuf_index < 0) { dev_err_ratelimited(lirc_rx51->dev, - ": BUG wbuf_index has value of %i\n", + "BUG wbuf_index has value of %i\n", lirc_rx51->wbuf_index); goto end; } @@ -144,6 +90,8 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr) * pulses until we catch up. */ do { + u64 ns; + if (lirc_rx51->wbuf_index >= WBUF_LEN) goto end; if (lirc_rx51->wbuf[lirc_rx51->wbuf_index] == -1) @@ -154,84 +102,24 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr) else lirc_rx51_on(lirc_rx51); - retval = pulse_timer_set_timeout(lirc_rx51, - lirc_rx51->wbuf[lirc_rx51->wbuf_index]); + ns = 1000 * lirc_rx51->wbuf[lirc_rx51->wbuf_index]; + hrtimer_add_expires_ns(timer, ns); + lirc_rx51->wbuf_index++; - } while (retval); + now = timer->base->get_time(); + + } while (hrtimer_get_expires_tv64(timer) < now.tv64); - return IRQ_HANDLED; + return HRTIMER_RESTART; end: /* Stop TX here */ lirc_rx51_off(lirc_rx51); lirc_rx51->wbuf_index = -1; - omap_dm_timer_stop(lirc_rx51->pwm_timer); - omap_dm_timer_stop(lirc_rx51->pulse_timer); - omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); - wake_up_interruptible(&lirc_rx51->wqueue); - - return IRQ_HANDLED; -} - -static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51) -{ - struct clk *clk_fclk; - int retval, pwm_timer = lirc_rx51->pwm_timer_num; - - lirc_rx51->pwm_timer = omap_dm_timer_request_specific(pwm_timer); - if (lirc_rx51->pwm_timer == NULL) { - dev_err(lirc_rx51->dev, ": Error requesting GPT%d timer\n", - pwm_timer); - return -EBUSY; - } - - lirc_rx51->pulse_timer = omap_dm_timer_request(); - if (lirc_rx51->pulse_timer == NULL) { - dev_err(lirc_rx51->dev, ": Error requesting pulse timer\n"); - retval = -EBUSY; - goto err1; - } - - omap_dm_timer_set_source(lirc_rx51->pwm_timer, OMAP_TIMER_SRC_SYS_CLK); - omap_dm_timer_set_source(lirc_rx51->pulse_timer, - OMAP_TIMER_SRC_SYS_CLK); - - omap_dm_timer_enable(lirc_rx51->pwm_timer); - omap_dm_timer_enable(lirc_rx51->pulse_timer); - - lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer); - retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler, - IRQF_SHARED, "lirc_pulse_timer", lirc_rx51); - if (retval) { - dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n"); - goto err2; - } - - clk_fclk = omap_dm_timer_get_fclk(lirc_rx51->pwm_timer); - lirc_rx51->fclk_khz = clk_fclk->rate / 1000; - - return 0; -err2: - omap_dm_timer_free(lirc_rx51->pulse_timer); -err1: - omap_dm_timer_free(lirc_rx51->pwm_timer); - - return retval; -} - -static int lirc_rx51_free_port(struct lirc_rx51 *lirc_rx51) -{ - omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); - free_irq(lirc_rx51->irq_num, lirc_rx51); - lirc_rx51_off(lirc_rx51); - omap_dm_timer_disable(lirc_rx51->pwm_timer); - omap_dm_timer_disable(lirc_rx51->pulse_timer); - omap_dm_timer_free(lirc_rx51->pwm_timer); - omap_dm_timer_free(lirc_rx51->pulse_timer); - lirc_rx51->wbuf_index = -1; + wake_up_interruptible(&lirc_rx51->wqueue); - return 0; + return HRTIMER_NORESTART; } static ssize_t lirc_rx51_write(struct file *file, const char *buf, @@ -270,8 +158,9 @@ static ssize_t lirc_rx51_write(struct file *file, const char *buf, lirc_rx51_on(lirc_rx51); lirc_rx51->wbuf_index = 1; - pulse_timer_set_timeout(lirc_rx51, lirc_rx51->wbuf[0]); - + hrtimer_start(&lirc_rx51->timer, + ns_to_ktime(1000 * lirc_rx51->wbuf[0]), + HRTIMER_MODE_REL); /* * Don't return back to the userspace until the transfer has * finished @@ -371,14 +260,24 @@ static int lirc_rx51_open(struct inode *inode, struct file *file) if (test_and_set_bit(1, &lirc_rx51->device_is_open)) return -EBUSY; - return lirc_rx51_init_port(lirc_rx51); + lirc_rx51->pwm = pwm_get(lirc_rx51->dev, NULL); + if (IS_ERR(lirc_rx51->pwm)) { + int res = PTR_ERR(lirc_rx51->pwm); + + dev_err(lirc_rx51->dev, "pwm_get failed: %d\n", res); + return res; + } + + return 0; } static int lirc_rx51_release(struct inode *inode, struct file *file) { struct lirc_rx51 *lirc_rx51 = file->private_data; - lirc_rx51_free_port(lirc_rx51); + hrtimer_cancel(&lirc_rx51->timer); + lirc_rx51_off(lirc_rx51); + pwm_put(lirc_rx51->pwm); clear_bit(1, &lirc_rx51->device_is_open); @@ -386,7 +285,6 @@ static int lirc_rx51_release(struct inode *inode, struct file *file) } static struct lirc_rx51 lirc_rx51 = { - .freq = 38000, .duty_cycle = 50, .wbuf_index = -1, }; @@ -444,9 +342,32 @@ static int lirc_rx51_resume(struct platform_device *dev) static int lirc_rx51_probe(struct platform_device *dev) { + struct pwm_device *pwm; + lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES; lirc_rx51.pdata = dev->dev.platform_data; - lirc_rx51.pwm_timer_num = lirc_rx51.pdata->pwm_timer; + + if (!lirc_rx51.pdata) { + dev_err(&dev->dev, "Platform Data is missing\n"); + return -ENXIO; + } + + pwm = pwm_get(&dev->dev, NULL); + if (IS_ERR(pwm)) { + int err = PTR_ERR(pwm); + + if (err != -EPROBE_DEFER) + dev_err(&dev->dev, "pwm_get failed: %d\n", err); + return err; + } + + /* Use default, in case userspace does not set the carrier */ + lirc_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC); + pwm_put(pwm); + + hrtimer_init(&lirc_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + lirc_rx51.timer.function = lirc_rx51_timer_cb; + lirc_rx51.dev = &dev->dev; lirc_rx51_driver.dev = &dev->dev; lirc_rx51_driver.minor = lirc_register_driver(&lirc_rx51_driver); @@ -457,8 +378,6 @@ static int lirc_rx51_probe(struct platform_device *dev) lirc_rx51_driver.minor); return lirc_rx51_driver.minor; } - dev_info(lirc_rx51.dev, "registration ok, minor: %d, pwm: %d\n", - lirc_rx51_driver.minor, lirc_rx51.pwm_timer_num); return 0; } @@ -468,6 +387,14 @@ static int lirc_rx51_remove(struct platform_device *dev) return lirc_unregister_driver(lirc_rx51_driver.minor); } +static const struct of_device_id lirc_rx51_match[] = { + { + .compatible = "nokia,n900-ir", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, lirc_rx51_match); + struct platform_driver lirc_rx51_platform_driver = { .probe = lirc_rx51_probe, .remove = lirc_rx51_remove, @@ -475,7 +402,7 @@ struct platform_driver lirc_rx51_platform_driver = { .resume = lirc_rx51_resume, .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, + .of_match_table = of_match_ptr(lirc_rx51_match), }, }; module_platform_driver(lirc_rx51_platform_driver); diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 81ddb17575a9..4b4c0c3c3d2f 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -25,6 +25,17 @@ config ATMEL_SDRAMC Starting with the at91sam9g45, this controller supports SDR, DDR and LP-DDR memories. +config ATMEL_EBI + bool "Atmel EBI driver" + default y + depends on ARCH_AT91 && OF + select MFD_SYSCON + help + Driver for Atmel EBI controller. + Used to configure the EBI (external bus interface) when the device- + tree is used. This bus supports NANDs, external ethernet controller, + SRAMs, ATA devices, etc. + config TI_AEMIF tristate "Texas Instruments AEMIF driver" depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF @@ -104,7 +115,7 @@ config FSL_CORENET_CF config FSL_IFC bool - depends on FSL_SOC + depends on FSL_SOC || ARCH_LAYERSCAPE config JZ4780_NEMC bool "Ingenic JZ4780 SoC NEMC driver" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index cb0b7a1df11a..b20ae38b5bfb 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_OF) += of_memory.o endif obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o +obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o obj-$(CONFIG_TI_AEMIF) += ti-aemif.o obj-$(CONFIG_TI_EMIF) += emif.o obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c new file mode 100644 index 000000000000..f87ad6f5d2dc --- /dev/null +++ b/drivers/memory/atmel-ebi.c @@ -0,0 +1,766 @@ +/* + * EBI driver for Atmel chips + * inspired by the fsl weim bus driver + * + * Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/atmel-matrix.h> +#include <linux/mfd/syscon/atmel-smc.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +struct at91sam9_smc_timings { + u32 ncs_rd_setup_ns; + u32 nrd_setup_ns; + u32 ncs_wr_setup_ns; + u32 nwe_setup_ns; + u32 ncs_rd_pulse_ns; + u32 nrd_pulse_ns; + u32 ncs_wr_pulse_ns; + u32 nwe_pulse_ns; + u32 nrd_cycle_ns; + u32 nwe_cycle_ns; + u32 tdf_ns; +}; + +struct at91sam9_smc_generic_fields { + struct regmap_field *setup; + struct regmap_field *pulse; + struct regmap_field *cycle; + struct regmap_field *mode; +}; + +struct at91sam9_ebi_dev_config { + struct at91sam9_smc_timings timings; + u32 mode; +}; + +struct at91_ebi_dev_config { + int cs; + union { + struct at91sam9_ebi_dev_config sam9; + }; +}; + +struct at91_ebi; + +struct at91_ebi_dev { + struct list_head node; + struct at91_ebi *ebi; + u32 mode; + int numcs; + struct at91_ebi_dev_config configs[]; +}; + +struct at91_ebi_caps { + unsigned int available_cs; + const struct reg_field *ebi_csa; + void (*get_config)(struct at91_ebi_dev *ebid, + struct at91_ebi_dev_config *conf); + int (*xlate_config)(struct at91_ebi_dev *ebid, + struct device_node *configs_np, + struct at91_ebi_dev_config *conf); + int (*apply_config)(struct at91_ebi_dev *ebid, + struct at91_ebi_dev_config *conf); + int (*init)(struct at91_ebi *ebi); +}; + +struct at91_ebi { + struct clk *clk; + struct regmap *smc; + struct regmap *matrix; + + struct regmap_field *ebi_csa; + + struct device *dev; + const struct at91_ebi_caps *caps; + struct list_head devs; + union { + struct at91sam9_smc_generic_fields sam9; + }; +}; + +static void at91sam9_ebi_get_config(struct at91_ebi_dev *ebid, + struct at91_ebi_dev_config *conf) +{ + struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9; + unsigned int clk_rate = clk_get_rate(ebid->ebi->clk); + struct at91sam9_ebi_dev_config *config = &conf->sam9; + struct at91sam9_smc_timings *timings = &config->timings; + unsigned int val; + + regmap_fields_read(fields->mode, conf->cs, &val); + config->mode = val & ~AT91_SMC_TDF; + + val = (val & AT91_SMC_TDF) >> 16; + timings->tdf_ns = clk_rate * val; + + regmap_fields_read(fields->setup, conf->cs, &val); + timings->ncs_rd_setup_ns = (val >> 24) & 0x1f; + timings->ncs_rd_setup_ns += ((val >> 29) & 0x1) * 128; + timings->ncs_rd_setup_ns *= clk_rate; + timings->nrd_setup_ns = (val >> 16) & 0x1f; + timings->nrd_setup_ns += ((val >> 21) & 0x1) * 128; + timings->nrd_setup_ns *= clk_rate; + timings->ncs_wr_setup_ns = (val >> 8) & 0x1f; + timings->ncs_wr_setup_ns += ((val >> 13) & 0x1) * 128; + timings->ncs_wr_setup_ns *= clk_rate; + timings->nwe_setup_ns = val & 0x1f; + timings->nwe_setup_ns += ((val >> 5) & 0x1) * 128; + timings->nwe_setup_ns *= clk_rate; + + regmap_fields_read(fields->pulse, conf->cs, &val); + timings->ncs_rd_pulse_ns = (val >> 24) & 0x3f; + timings->ncs_rd_pulse_ns += ((val >> 30) & 0x1) * 256; + timings->ncs_rd_pulse_ns *= clk_rate; + timings->nrd_pulse_ns = (val >> 16) & 0x3f; + timings->nrd_pulse_ns += ((val >> 22) & 0x1) * 256; + timings->nrd_pulse_ns *= clk_rate; + timings->ncs_wr_pulse_ns = (val >> 8) & 0x3f; + timings->ncs_wr_pulse_ns += ((val >> 14) & 0x1) * 256; + timings->ncs_wr_pulse_ns *= clk_rate; + timings->nwe_pulse_ns = val & 0x3f; + timings->nwe_pulse_ns += ((val >> 6) & 0x1) * 256; + timings->nwe_pulse_ns *= clk_rate; + + regmap_fields_read(fields->cycle, conf->cs, &val); + timings->nrd_cycle_ns = (val >> 16) & 0x7f; + timings->nrd_cycle_ns += ((val >> 23) & 0x3) * 256; + timings->nrd_cycle_ns *= clk_rate; + timings->nwe_cycle_ns = val & 0x7f; + timings->nwe_cycle_ns += ((val >> 7) & 0x3) * 256; + timings->nwe_cycle_ns *= clk_rate; +} + +static int at91_xlate_timing(struct device_node *np, const char *prop, + u32 *val, bool *required) +{ + if (!of_property_read_u32(np, prop, val)) { + *required = true; + return 0; + } + + if (*required) + return -EINVAL; + + return 0; +} + +static int at91sam9_smc_xslate_timings(struct at91_ebi_dev *ebid, + struct device_node *np, + struct at91sam9_smc_timings *timings, + bool *required) +{ + int ret; + + ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-setup-ns", + &timings->ncs_rd_setup_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nrd-setup-ns", + &timings->nrd_setup_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-setup-ns", + &timings->ncs_wr_setup_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nwe-setup-ns", + &timings->nwe_setup_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-pulse-ns", + &timings->ncs_rd_pulse_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nrd-pulse-ns", + &timings->nrd_pulse_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-pulse-ns", + &timings->ncs_wr_pulse_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nwe-pulse-ns", + &timings->nwe_pulse_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nwe-cycle-ns", + &timings->nwe_cycle_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-nrd-cycle-ns", + &timings->nrd_cycle_ns, required); + if (ret) + goto out; + + ret = at91_xlate_timing(np, "atmel,smc-tdf-ns", + &timings->tdf_ns, required); + +out: + if (ret) + dev_err(ebid->ebi->dev, + "missing or invalid timings definition in %s", + np->full_name); + + return ret; +} + +static int at91sam9_ebi_xslate_config(struct at91_ebi_dev *ebid, + struct device_node *np, + struct at91_ebi_dev_config *conf) +{ + struct at91sam9_ebi_dev_config *config = &conf->sam9; + bool required = false; + const char *tmp_str; + u32 tmp; + int ret; + + ret = of_property_read_u32(np, "atmel,smc-bus-width", &tmp); + if (!ret) { + switch (tmp) { + case 8: + config->mode |= AT91_SMC_DBW_8; + break; + + case 16: + config->mode |= AT91_SMC_DBW_16; + break; + + case 32: + config->mode |= AT91_SMC_DBW_32; + break; + + default: + return -EINVAL; + } + + required = true; + } + + if (of_property_read_bool(np, "atmel,smc-tdf-optimized")) { + config->mode |= AT91_SMC_TDFMODE_OPTIMIZED; + required = true; + } + + tmp_str = NULL; + of_property_read_string(np, "atmel,smc-byte-access-type", &tmp_str); + if (tmp_str && !strcmp(tmp_str, "write")) { + config->mode |= AT91_SMC_BAT_WRITE; + required = true; + } + + tmp_str = NULL; + of_property_read_string(np, "atmel,smc-read-mode", &tmp_str); + if (tmp_str && !strcmp(tmp_str, "nrd")) { + config->mode |= AT91_SMC_READMODE_NRD; + required = true; + } + + tmp_str = NULL; + of_property_read_string(np, "atmel,smc-write-mode", &tmp_str); + if (tmp_str && !strcmp(tmp_str, "nwe")) { + config->mode |= AT91_SMC_WRITEMODE_NWE; + required = true; + } + + tmp_str = NULL; + of_property_read_string(np, "atmel,smc-exnw-mode", &tmp_str); + if (tmp_str) { + if (!strcmp(tmp_str, "frozen")) + config->mode |= AT91_SMC_EXNWMODE_FROZEN; + else if (!strcmp(tmp_str, "ready")) + config->mode |= AT91_SMC_EXNWMODE_READY; + else if (strcmp(tmp_str, "disabled")) + return -EINVAL; + + required = true; + } + + ret = of_property_read_u32(np, "atmel,smc-page-mode", &tmp); + if (!ret) { + switch (tmp) { + case 4: + config->mode |= AT91_SMC_PS_4; + break; + + case 8: + config->mode |= AT91_SMC_PS_8; + break; + + case 16: + config->mode |= AT91_SMC_PS_16; + break; + + case 32: + config->mode |= AT91_SMC_PS_32; + break; + + default: + return -EINVAL; + } + + config->mode |= AT91_SMC_PMEN; + required = true; + } + + ret = at91sam9_smc_xslate_timings(ebid, np, &config->timings, + &required); + if (ret) + return ret; + + return required; +} + +static int at91sam9_ebi_apply_config(struct at91_ebi_dev *ebid, + struct at91_ebi_dev_config *conf) +{ + unsigned int clk_rate = clk_get_rate(ebid->ebi->clk); + struct at91sam9_ebi_dev_config *config = &conf->sam9; + struct at91sam9_smc_timings *timings = &config->timings; + struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9; + u32 coded_val; + u32 val; + + coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate, + timings->ncs_rd_setup_ns); + val = AT91SAM9_SMC_NCS_NRDSETUP(coded_val); + coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate, + timings->nrd_setup_ns); + val |= AT91SAM9_SMC_NRDSETUP(coded_val); + coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate, + timings->ncs_wr_setup_ns); + val |= AT91SAM9_SMC_NCS_WRSETUP(coded_val); + coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate, + timings->nwe_setup_ns); + val |= AT91SAM9_SMC_NWESETUP(coded_val); + regmap_fields_write(fields->setup, conf->cs, val); + + coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate, + timings->ncs_rd_pulse_ns); + val = AT91SAM9_SMC_NCS_NRDPULSE(coded_val); + coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate, + timings->nrd_pulse_ns); + val |= AT91SAM9_SMC_NRDPULSE(coded_val); + coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate, + timings->ncs_wr_pulse_ns); + val |= AT91SAM9_SMC_NCS_WRPULSE(coded_val); + coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate, + timings->nwe_pulse_ns); + val |= AT91SAM9_SMC_NWEPULSE(coded_val); + regmap_fields_write(fields->pulse, conf->cs, val); + + coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate, + timings->nrd_cycle_ns); + val = AT91SAM9_SMC_NRDCYCLE(coded_val); + coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate, + timings->nwe_cycle_ns); + val |= AT91SAM9_SMC_NWECYCLE(coded_val); + regmap_fields_write(fields->cycle, conf->cs, val); + + val = DIV_ROUND_UP(timings->tdf_ns, clk_rate); + if (val > AT91_SMC_TDF_MAX) + val = AT91_SMC_TDF_MAX; + regmap_fields_write(fields->mode, conf->cs, + config->mode | AT91_SMC_TDF_(val)); + + return 0; +} + +static int at91sam9_ebi_init(struct at91_ebi *ebi) +{ + struct at91sam9_smc_generic_fields *fields = &ebi->sam9; + struct reg_field field = REG_FIELD(0, 0, 31); + + field.id_size = fls(ebi->caps->available_cs); + field.id_offset = AT91SAM9_SMC_GENERIC_BLK_SZ; + + field.reg = AT91SAM9_SMC_SETUP(AT91SAM9_SMC_GENERIC); + fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->setup)) + return PTR_ERR(fields->setup); + + field.reg = AT91SAM9_SMC_PULSE(AT91SAM9_SMC_GENERIC); + fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->pulse)) + return PTR_ERR(fields->pulse); + + field.reg = AT91SAM9_SMC_CYCLE(AT91SAM9_SMC_GENERIC); + fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->cycle)) + return PTR_ERR(fields->cycle); + + field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC); + fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->mode)) + return PTR_ERR(fields->mode); + + return 0; +} + +static int sama5d3_ebi_init(struct at91_ebi *ebi) +{ + struct at91sam9_smc_generic_fields *fields = &ebi->sam9; + struct reg_field field = REG_FIELD(0, 0, 31); + + field.id_size = fls(ebi->caps->available_cs); + field.id_offset = SAMA5_SMC_GENERIC_BLK_SZ; + + field.reg = AT91SAM9_SMC_SETUP(SAMA5_SMC_GENERIC); + fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->setup)) + return PTR_ERR(fields->setup); + + field.reg = AT91SAM9_SMC_PULSE(SAMA5_SMC_GENERIC); + fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->pulse)) + return PTR_ERR(fields->pulse); + + field.reg = AT91SAM9_SMC_CYCLE(SAMA5_SMC_GENERIC); + fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->cycle)) + return PTR_ERR(fields->cycle); + + field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC); + fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); + if (IS_ERR(fields->mode)) + return PTR_ERR(fields->mode); + + return 0; +} + +static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np, + int reg_cells) +{ + const struct at91_ebi_caps *caps = ebi->caps; + struct at91_ebi_dev_config conf = { }; + struct device *dev = ebi->dev; + struct at91_ebi_dev *ebid; + int ret, numcs = 0, i; + bool apply = false; + + numcs = of_property_count_elems_of_size(np, "reg", + reg_cells * sizeof(u32)); + if (numcs <= 0) { + dev_err(dev, "invalid reg property in %s\n", np->full_name); + return -EINVAL; + } + + ebid = devm_kzalloc(ebi->dev, + sizeof(*ebid) + (numcs * sizeof(*ebid->configs)), + GFP_KERNEL); + if (!ebid) + return -ENOMEM; + + ebid->ebi = ebi; + + ret = caps->xlate_config(ebid, np, &conf); + if (ret < 0) + return ret; + else if (ret) + apply = true; + + for (i = 0; i < numcs; i++) { + u32 cs; + + ret = of_property_read_u32_index(np, "reg", i * reg_cells, + &cs); + if (ret) + return ret; + + if (cs > AT91_MATRIX_EBI_NUM_CS || + !(ebi->caps->available_cs & BIT(cs))) { + dev_err(dev, "invalid reg property in %s\n", + np->full_name); + return -EINVAL; + } + + ebid->configs[i].cs = cs; + + if (apply) { + conf.cs = cs; + ret = caps->apply_config(ebid, &conf); + if (ret) + return ret; + } + + caps->get_config(ebid, &ebid->configs[i]); + + /* + * Attach the EBI device to the generic SMC logic if at least + * one "atmel,smc-" property is present. + */ + if (ebi->ebi_csa && ret) + regmap_field_update_bits(ebi->ebi_csa, + BIT(cs), 0); + } + + list_add_tail(&ebid->node, &ebi->devs); + + return 0; +} + +static const struct reg_field at91sam9260_ebi_csa = + REG_FIELD(AT91SAM9260_MATRIX_EBICSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9260_ebi_caps = { + .available_cs = 0xff, + .ebi_csa = &at91sam9260_ebi_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct reg_field at91sam9261_ebi_csa = + REG_FIELD(AT91SAM9261_MATRIX_EBICSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9261_ebi_caps = { + .available_cs = 0xff, + .ebi_csa = &at91sam9261_ebi_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct reg_field at91sam9263_ebi0_csa = + REG_FIELD(AT91SAM9263_MATRIX_EBI0CSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9263_ebi0_caps = { + .available_cs = 0x3f, + .ebi_csa = &at91sam9263_ebi0_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct reg_field at91sam9263_ebi1_csa = + REG_FIELD(AT91SAM9263_MATRIX_EBI1CSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9263_ebi1_caps = { + .available_cs = 0x7, + .ebi_csa = &at91sam9263_ebi1_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct reg_field at91sam9rl_ebi_csa = + REG_FIELD(AT91SAM9RL_MATRIX_EBICSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9rl_ebi_caps = { + .available_cs = 0x3f, + .ebi_csa = &at91sam9rl_ebi_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct reg_field at91sam9g45_ebi_csa = + REG_FIELD(AT91SAM9G45_MATRIX_EBICSA, 0, + AT91_MATRIX_EBI_NUM_CS - 1); + +static const struct at91_ebi_caps at91sam9g45_ebi_caps = { + .available_cs = 0x3f, + .ebi_csa = &at91sam9g45_ebi_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct at91_ebi_caps at91sam9x5_ebi_caps = { + .available_cs = 0x3f, + .ebi_csa = &at91sam9263_ebi0_csa, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = at91sam9_ebi_init, +}; + +static const struct at91_ebi_caps sama5d3_ebi_caps = { + .available_cs = 0xf, + .get_config = at91sam9_ebi_get_config, + .xlate_config = at91sam9_ebi_xslate_config, + .apply_config = at91sam9_ebi_apply_config, + .init = sama5d3_ebi_init, +}; + +static const struct of_device_id at91_ebi_id_table[] = { + { + .compatible = "atmel,at91sam9260-ebi", + .data = &at91sam9260_ebi_caps, + }, + { + .compatible = "atmel,at91sam9261-ebi", + .data = &at91sam9261_ebi_caps, + }, + { + .compatible = "atmel,at91sam9263-ebi0", + .data = &at91sam9263_ebi0_caps, + }, + { + .compatible = "atmel,at91sam9263-ebi1", + .data = &at91sam9263_ebi1_caps, + }, + { + .compatible = "atmel,at91sam9rl-ebi", + .data = &at91sam9rl_ebi_caps, + }, + { + .compatible = "atmel,at91sam9g45-ebi", + .data = &at91sam9g45_ebi_caps, + }, + { + .compatible = "atmel,at91sam9x5-ebi", + .data = &at91sam9x5_ebi_caps, + }, + { + .compatible = "atmel,sama5d3-ebi", + .data = &sama5d3_ebi_caps, + }, + { /* sentinel */ } +}; + +static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np) +{ + struct device *dev = ebi->dev; + struct property *newprop; + + newprop = devm_kzalloc(dev, sizeof(*newprop), GFP_KERNEL); + if (!newprop) + return -ENOMEM; + + newprop->name = devm_kstrdup(dev, "status", GFP_KERNEL); + if (!newprop->name) + return -ENOMEM; + + newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL); + if (!newprop->name) + return -ENOMEM; + + newprop->length = sizeof("disabled"); + + return of_update_property(np, newprop); +} + +static int at91_ebi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *child, *np = dev->of_node; + const struct of_device_id *match; + struct at91_ebi *ebi; + int ret, reg_cells; + struct clk *clk; + u32 val; + + match = of_match_device(at91_ebi_id_table, dev); + if (!match || !match->data) + return -EINVAL; + + ebi = devm_kzalloc(dev, sizeof(*ebi), GFP_KERNEL); + if (!ebi) + return -ENOMEM; + + INIT_LIST_HEAD(&ebi->devs); + ebi->caps = match->data; + ebi->dev = dev; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ebi->clk = clk; + + ebi->smc = syscon_regmap_lookup_by_phandle(np, "atmel,smc"); + if (IS_ERR(ebi->smc)) + return PTR_ERR(ebi->smc); + + /* + * The sama5d3 does not provide an EBICSA register and thus does need + * to access the matrix registers. + */ + if (ebi->caps->ebi_csa) { + ebi->matrix = + syscon_regmap_lookup_by_phandle(np, "atmel,matrix"); + if (IS_ERR(ebi->matrix)) + return PTR_ERR(ebi->matrix); + + ebi->ebi_csa = regmap_field_alloc(ebi->matrix, + *ebi->caps->ebi_csa); + if (IS_ERR(ebi->ebi_csa)) + return PTR_ERR(ebi->ebi_csa); + } + + ret = ebi->caps->init(ebi); + if (ret) + return ret; + + ret = of_property_read_u32(np, "#address-cells", &val); + if (ret) { + dev_err(dev, "missing #address-cells property\n"); + return ret; + } + + reg_cells = val; + + ret = of_property_read_u32(np, "#size-cells", &val); + if (ret) { + dev_err(dev, "missing #address-cells property\n"); + return ret; + } + + reg_cells += val; + + for_each_available_child_of_node(np, child) { + if (!of_find_property(child, "reg", NULL)) + continue; + + ret = at91_ebi_dev_setup(ebi, child, reg_cells); + if (ret) { + dev_err(dev, "failed to configure EBI bus for %s, disabling the device", + child->full_name); + + ret = at91_ebi_dev_disable(ebi, child); + if (ret) + return ret; + } + } + + return of_platform_populate(np, NULL, NULL, dev); +} + +static struct platform_driver at91_ebi_driver = { + .driver = { + .name = "atmel-ebi", + .of_match_table = at91_ebi_id_table, + }, +}; +builtin_platform_driver_probe(at91_ebi_driver, at91_ebi_probe); diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c index a3ebc8a87479..53a341f3b305 100644 --- a/drivers/memory/atmel-sdramc.c +++ b/drivers/memory/atmel-sdramc.c @@ -1,6 +1,8 @@ /* * Atmel (Multi-port DDR-)SDRAM Controller driver * + * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com> + * * Copyright (C) 2014 Atmel * * This program is free software: you can redistribute it and/or modify @@ -20,7 +22,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -48,7 +50,6 @@ static const struct of_device_id atmel_ramc_of_match[] = { { .compatible = "atmel,sama5d3-ddramc", .data = &sama5d3_caps, }, {}, }; -MODULE_DEVICE_TABLE(of, atmel_ramc_of_match); static int atmel_ramc_probe(struct platform_device *pdev) { @@ -90,8 +91,4 @@ static int __init atmel_ramc_init(void) { return platform_driver_register(&atmel_ramc_driver); } -module_init(atmel_ramc_init); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>"); -MODULE_DESCRIPTION("Atmel (Multi-port DDR-)SDRAM Controller"); +device_initcall(atmel_ramc_init); diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 904b4af5f142..1b182b117f9c 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -31,7 +31,9 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/fsl_ifc.h> -#include <asm/prom.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; EXPORT_SYMBOL(fsl_ifc_ctrl_dev); diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index f6b57579185a..4afbc412f959 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -21,19 +21,50 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <soc/mediatek/smi.h> +#include <dt-bindings/memory/mt2701-larb-port.h> #define SMI_LARB_MMU_EN 0xf00 +#define REG_SMI_SECUR_CON_BASE 0x5c0 + +/* every register control 8 port, register offset 0x4 */ +#define REG_SMI_SECUR_CON_OFFSET(id) (((id) >> 3) << 2) +#define REG_SMI_SECUR_CON_ADDR(id) \ + (REG_SMI_SECUR_CON_BASE + REG_SMI_SECUR_CON_OFFSET(id)) + +/* + * every port have 4 bit to control, bit[port + 3] control virtual or physical, + * bit[port + 2 : port + 1] control the domain, bit[port] control the security + * or non-security. + */ +#define SMI_SECUR_CON_VAL_MSK(id) (~(0xf << (((id) & 0x7) << 2))) +#define SMI_SECUR_CON_VAL_VIRT(id) BIT((((id) & 0x7) << 2) + 3) +/* mt2701 domain should be set to 3 */ +#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1)) + +struct mtk_smi_larb_gen { + int port_in_larb[MTK_LARB_NR_MAX + 1]; + void (*config_port)(struct device *); +}; struct mtk_smi { - struct device *dev; - struct clk *clk_apb, *clk_smi; + struct device *dev; + struct clk *clk_apb, *clk_smi; + struct clk *clk_async; /*only needed by mt2701*/ + void __iomem *smi_ao_base; }; struct mtk_smi_larb { /* larb: local arbiter */ - struct mtk_smi smi; - void __iomem *base; - struct device *smi_common_dev; - u32 *mmu; + struct mtk_smi smi; + void __iomem *base; + struct device *smi_common_dev; + const struct mtk_smi_larb_gen *larb_gen; + int larbid; + u32 *mmu; +}; + +enum mtk_smi_gen { + MTK_SMI_GEN1, + MTK_SMI_GEN2 }; static int mtk_smi_enable(const struct mtk_smi *smi) @@ -71,6 +102,7 @@ static void mtk_smi_disable(const struct mtk_smi *smi) int mtk_smi_larb_get(struct device *larbdev) { struct mtk_smi_larb *larb = dev_get_drvdata(larbdev); + const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen; struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev); int ret; @@ -87,7 +119,7 @@ int mtk_smi_larb_get(struct device *larbdev) } /* Configure the iommu info for this larb */ - writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN); + larb_gen->config_port(larbdev); return 0; } @@ -126,6 +158,45 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } +static void mtk_smi_larb_config_port(struct device *dev) +{ + struct mtk_smi_larb *larb = dev_get_drvdata(dev); + + writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN); +} + + +static void mtk_smi_larb_config_port_gen1(struct device *dev) +{ + struct mtk_smi_larb *larb = dev_get_drvdata(dev); + const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen; + struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev); + int i, m4u_port_id, larb_port_num; + u32 sec_con_val, reg_val; + + m4u_port_id = larb_gen->port_in_larb[larb->larbid]; + larb_port_num = larb_gen->port_in_larb[larb->larbid + 1] + - larb_gen->port_in_larb[larb->larbid]; + + for (i = 0; i < larb_port_num; i++, m4u_port_id++) { + if (*larb->mmu & BIT(i)) { + /* bit[port + 3] controls the virtual or physical */ + sec_con_val = SMI_SECUR_CON_VAL_VIRT(m4u_port_id); + } else { + /* do not need to enable m4u for this port */ + continue; + } + reg_val = readl(common->smi_ao_base + + REG_SMI_SECUR_CON_ADDR(m4u_port_id)); + reg_val &= SMI_SECUR_CON_VAL_MSK(m4u_port_id); + reg_val |= sec_con_val; + reg_val |= SMI_SECUR_CON_VAL_DOMAIN(m4u_port_id); + writel(reg_val, + common->smi_ao_base + + REG_SMI_SECUR_CON_ADDR(m4u_port_id)); + } +} + static void mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data) { @@ -137,6 +208,31 @@ static const struct component_ops mtk_smi_larb_component_ops = { .unbind = mtk_smi_larb_unbind, }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = { + /* mt8173 do not need the port in larb */ + .config_port = mtk_smi_larb_config_port, +}; + +static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { + .port_in_larb = { + LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, + LARB2_PORT_OFFSET, LARB3_PORT_OFFSET + }, + .config_port = mtk_smi_larb_config_port_gen1, +}; + +static const struct of_device_id mtk_smi_larb_of_ids[] = { + { + .compatible = "mediatek,mt8173-smi-larb", + .data = &mtk_smi_larb_mt8173 + }, + { + .compatible = "mediatek,mt2701-smi-larb", + .data = &mtk_smi_larb_mt2701 + }, + {} +}; + static int mtk_smi_larb_probe(struct platform_device *pdev) { struct mtk_smi_larb *larb; @@ -144,14 +240,20 @@ static int mtk_smi_larb_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *smi_node; struct platform_device *smi_pdev; + const struct of_device_id *of_id; if (!dev->pm_domain) return -EPROBE_DEFER; + of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node); + if (!of_id) + return -EINVAL; + larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL); if (!larb) return -ENOMEM; + larb->larb_gen = of_id->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); larb->base = devm_ioremap_resource(dev, res); if (IS_ERR(larb->base)) @@ -191,24 +293,34 @@ static int mtk_smi_larb_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mtk_smi_larb_of_ids[] = { - { .compatible = "mediatek,mt8173-smi-larb",}, - {} -}; - static struct platform_driver mtk_smi_larb_driver = { .probe = mtk_smi_larb_probe, - .remove = mtk_smi_larb_remove, + .remove = mtk_smi_larb_remove, .driver = { .name = "mtk-smi-larb", .of_match_table = mtk_smi_larb_of_ids, } }; +static const struct of_device_id mtk_smi_common_of_ids[] = { + { + .compatible = "mediatek,mt8173-smi-common", + .data = (void *)MTK_SMI_GEN2 + }, + { + .compatible = "mediatek,mt2701-smi-common", + .data = (void *)MTK_SMI_GEN1 + }, + {} +}; + static int mtk_smi_common_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_smi *common; + struct resource *res; + const struct of_device_id *of_id; + enum mtk_smi_gen smi_gen; if (!dev->pm_domain) return -EPROBE_DEFER; @@ -226,6 +338,29 @@ static int mtk_smi_common_probe(struct platform_device *pdev) if (IS_ERR(common->clk_smi)) return PTR_ERR(common->clk_smi); + of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node); + if (!of_id) + return -EINVAL; + + /* + * for mtk smi gen 1, we need to get the ao(always on) base to config + * m4u port, and we need to enable the aync clock for transform the smi + * clock into emi clock domain, but for mtk smi gen2, there's no smi ao + * base. + */ + smi_gen = (enum mtk_smi_gen)of_id->data; + if (smi_gen == MTK_SMI_GEN1) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + common->smi_ao_base = devm_ioremap_resource(dev, res); + if (IS_ERR(common->smi_ao_base)) + return PTR_ERR(common->smi_ao_base); + + common->clk_async = devm_clk_get(dev, "async"); + if (IS_ERR(common->clk_async)) + return PTR_ERR(common->clk_async); + + clk_prepare_enable(common->clk_async); + } pm_runtime_enable(dev); platform_set_drvdata(pdev, common); return 0; @@ -237,11 +372,6 @@ static int mtk_smi_common_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mtk_smi_common_of_ids[] = { - { .compatible = "mediatek,mt8173-smi-common", }, - {} -}; - static struct platform_driver mtk_smi_common_driver = { .probe = mtk_smi_common_probe, .remove = mtk_smi_common_remove, @@ -272,4 +402,5 @@ err_unreg_smi: platform_driver_unregister(&mtk_smi_common_driver); return ret; } + subsys_initcall(mtk_smi_init); diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 15508df24e5d..869c83fb3c5d 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -20,7 +20,6 @@ #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> @@ -1807,7 +1806,6 @@ static const struct of_device_id gpmc_dt_ids[] = { { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ { } }; -MODULE_DEVICE_TABLE(of, gpmc_dt_ids); /** * gpmc_read_settings_dt - read gpmc settings from device-tree @@ -2134,8 +2132,7 @@ no_timings: /* is child a common bus? */ if (of_match_node(of_default_bus_match_table, child)) /* create children and other common bus children */ - if (of_platform_populate(child, of_default_bus_match_table, - NULL, &pdev->dev)) + if (of_platform_default_populate(child, NULL, &pdev->dev)) goto err_child_fail; return 0; @@ -2155,68 +2152,6 @@ err: return ret; } -static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) -{ - return 1; /* we're input only */ -} - -static int gpmc_gpio_direction_input(struct gpio_chip *chip, - unsigned int offset) -{ - return 0; /* we're input only */ -} - -static int gpmc_gpio_direction_output(struct gpio_chip *chip, - unsigned int offset, int value) -{ - return -EINVAL; /* we're input only */ -} - -static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) -{ -} - -static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - u32 reg; - - offset += 8; - - reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset); - - return !!reg; -} - -static int gpmc_gpio_init(struct gpmc_device *gpmc) -{ - int ret; - - gpmc->gpio_chip.parent = gpmc->dev; - gpmc->gpio_chip.owner = THIS_MODULE; - gpmc->gpio_chip.label = DEVICE_NAME; - gpmc->gpio_chip.ngpio = gpmc_nr_waitpins; - gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction; - gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input; - gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output; - gpmc->gpio_chip.set = gpmc_gpio_set; - gpmc->gpio_chip.get = gpmc_gpio_get; - gpmc->gpio_chip.base = -1; - - ret = gpiochip_add(&gpmc->gpio_chip); - if (ret < 0) { - dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret); - return ret; - } - - return 0; -} - -static void gpmc_gpio_exit(struct gpmc_device *gpmc) -{ - gpiochip_remove(&gpmc->gpio_chip); -} - static int gpmc_probe_dt(struct platform_device *pdev) { int ret; @@ -2281,7 +2216,69 @@ static int gpmc_probe_dt_children(struct platform_device *pdev) { return 0; } -#endif +#endif /* CONFIG_OF */ + +static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + return 1; /* we're input only */ +} + +static int gpmc_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + return 0; /* we're input only */ +} + +static int gpmc_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + return -EINVAL; /* we're input only */ +} + +static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ +} + +static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + u32 reg; + + offset += 8; + + reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset); + + return !!reg; +} + +static int gpmc_gpio_init(struct gpmc_device *gpmc) +{ + int ret; + + gpmc->gpio_chip.parent = gpmc->dev; + gpmc->gpio_chip.owner = THIS_MODULE; + gpmc->gpio_chip.label = DEVICE_NAME; + gpmc->gpio_chip.ngpio = gpmc_nr_waitpins; + gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction; + gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input; + gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output; + gpmc->gpio_chip.set = gpmc_gpio_set; + gpmc->gpio_chip.get = gpmc_gpio_get; + gpmc->gpio_chip.base = -1; + + ret = gpiochip_add(&gpmc->gpio_chip); + if (ret < 0) { + dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret); + return ret; + } + + return 0; +} + +static void gpmc_gpio_exit(struct gpmc_device *gpmc) +{ + gpiochip_remove(&gpmc->gpio_chip); +} static int gpmc_probe(struct platform_device *pdev) { @@ -2437,15 +2434,7 @@ static __init int gpmc_init(void) { return platform_driver_register(&gpmc_driver); } - -static __exit void gpmc_exit(void) -{ - platform_driver_unregister(&gpmc_driver); - -} - postcore_initcall(gpmc_init); -module_exit(gpmc_exit); static struct omap3_gpmc_regs gpmc_context; diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index 96756fb4d6bd..bf827a666694 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -11,7 +11,7 @@ */ #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> @@ -91,17 +91,17 @@ static int exynos_srom_configure_bank(struct exynos_srom *srom, if (width == 2) cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT; - bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW); + bw = readl_relaxed(srom->reg_base + EXYNOS_SROM_BW); bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank); - __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW); + writel_relaxed(bw, srom->reg_base + EXYNOS_SROM_BW); - __raw_writel(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) | - (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) | - (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) | - (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) | - (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) | - (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT), - srom->reg_base + EXYNOS_SROM_BC0 + bank); + writel_relaxed(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) | + (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) | + (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) | + (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) | + (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) | + (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT), + srom->reg_base + EXYNOS_SROM_BC0 + bank); return 0; } @@ -134,7 +134,7 @@ static int exynos_srom_probe(struct platform_device *pdev) platform_set_drvdata(pdev, srom); srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets, - sizeof(exynos_srom_offsets)); + ARRAY_SIZE(exynos_srom_offsets)); if (!srom->reg_offset) { iounmap(srom->reg_base); return -ENOMEM; @@ -159,16 +159,6 @@ static int exynos_srom_probe(struct platform_device *pdev) return of_platform_populate(np, NULL, NULL, dev); } -static int exynos_srom_remove(struct platform_device *pdev) -{ - struct exynos_srom *srom = platform_get_drvdata(pdev); - - kfree(srom->reg_offset); - iounmap(srom->reg_base); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static void exynos_srom_save(void __iomem *base, struct exynos_srom_reg_dump *rd, @@ -211,21 +201,16 @@ static const struct of_device_id of_exynos_srom_ids[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, of_exynos_srom_ids); static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume); static struct platform_driver exynos_srom_driver = { .probe = exynos_srom_probe, - .remove = exynos_srom_remove, .driver = { .name = "exynos-srom", .of_match_table = of_exynos_srom_ids, .pm = &exynos_srom_pm_ops, + .suppress_bind_attrs = true, }, }; -module_platform_driver(exynos_srom_driver); - -MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>"); -MODULE_DESCRIPTION("Exynos SROM Controller Driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver(exynos_srom_driver); diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index a1ae0cc2b86d..a4803ac192bb 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -186,8 +186,10 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node) timing = &mc->timings[i++]; err = load_one_timing(mc, timing, child); - if (err) + if (err) { + of_node_put(child); return err; + } } return 0; @@ -206,15 +208,13 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc) for_each_child_of_node(mc->dev->of_node, node) { err = of_property_read_u32(node, "nvidia,ram-code", &node_ram_code); - if (err || (node_ram_code != ram_code)) { - of_node_put(node); + if (err || (node_ram_code != ram_code)) continue; - } err = load_timings(mc, node); + of_node_put(node); if (err) return err; - of_node_put(node); break; } diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c index 3dac7be39654..06cc781ebac1 100644 --- a/drivers/memory/tegra/tegra124-emc.c +++ b/drivers/memory/tegra/tegra124-emc.c @@ -970,8 +970,10 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, timing = &emc->timings[i++]; err = load_one_timing_from_dt(emc, timing, child); - if (err) + if (err) { + of_node_put(child); return err; + } } sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, @@ -995,10 +997,8 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code) u32 value; err = of_property_read_u32(np, "nvidia,ram-code", &value); - if (err || (value != ram_code)) { - of_node_put(np); + if (err || (value != ram_code)) continue; - } return np; } diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 40bb8ae5853c..aacf584f2a42 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -2338,23 +2338,11 @@ static struct memstick_driver msb_driver = { .resume = msb_resume }; -static int major; - static int __init msb_init(void) { - int rc = register_blkdev(0, DRIVER_NAME); - - if (rc < 0) { - pr_err("failed to register major (error %d)\n", rc); - return rc; - } - - major = rc; - rc = memstick_register_driver(&msb_driver); - if (rc) { - unregister_blkdev(major, DRIVER_NAME); + int rc = memstick_register_driver(&msb_driver); + if (rc) pr_err("failed to register memstick driver (error %d)\n", rc); - } return rc; } @@ -2362,7 +2350,6 @@ static int __init msb_init(void) static void __exit msb_exit(void) { memstick_unregister_driver(&msb_driver); - unregister_blkdev(major, DRIVER_NAME); idr_destroy(&msb_disk_idr); } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index ff031a7735a5..2d1fb6420592 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -18,6 +18,17 @@ config MFD_CS5535 This is the core driver for CS5535/CS5536 MFD functions. This is necessary for using the board's GPIO and MFGPT functionality. +config MFD_ALTERA_A10SR + bool "Altera Arria10 DevKit System Resource chip" + depends on ARCH_SOCFPGA && SPI_MASTER=y && OF + select REGMAP_SPI + select MFD_CORE + help + Support for the Altera Arria10 DevKit MAX5 System Resource chip + using the SPI interface. This driver provides common support for + accessing the external gpio extender (LEDs & buttons) and + power supply alarms (hwmon). + config MFD_ACT8945A tristate "Active-semi ACT8945A" select MFD_CORE @@ -480,6 +491,8 @@ config MFD_KEMPLD * COMe-cDC2 (microETXexpress-DC) * COMe-cHL6 * COMe-cPC2 (microETXexpress-PC) + * COMe-cSL6 + * COMe-mAL10 * COMe-mBT10 * COMe-mCT10 * COMe-mTT10 (nanoETXexpress-TT) @@ -524,8 +537,8 @@ config MFD_88PM860X battery-charger under the corresponding menus. config MFD_MAX14577 - bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support" - depends on I2C=y + tristate "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support" + depends on I2C select MFD_CORE select REGMAP_I2C select REGMAP_IRQ diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 42a66e19e191..2ba3ba35f745 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -205,3 +205,5 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o obj-$(CONFIG_MFD_MT6397) += mt6397-core.o + +obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index f3d689176fc2..589eebfc13df 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -1087,7 +1087,6 @@ static int ab8500_probe(struct platform_device *pdev) "Vbus Detect (USB)", "USB ID Detect", "UART Factory Mode Detect"}; - struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev); const struct platform_device_id *platid = platform_get_device_id(pdev); enum ab8500_version version = AB8500_VERSION_UNDEFINED; struct device_node *np = pdev->dev.of_node; @@ -1219,9 +1218,6 @@ static int ab8500_probe(struct platform_device *pdev) pr_cont("None\n"); } - if (plat && plat->init) - plat->init(ab8500); - if (is_ab9540(ab8500)) { ret = get_register_interruptible(ab8500, AB8500_CHARGER, AB8500_CH_USBCH_STAT1_REG, &value); diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c index b9f0010309f9..207cc497958a 100644 --- a/drivers/mfd/ab8500-sysctrl.c +++ b/drivers/mfd/ab8500-sysctrl.c @@ -127,45 +127,11 @@ EXPORT_SYMBOL(ab8500_sysctrl_write); static int ab8500_sysctrl_probe(struct platform_device *pdev) { - struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); - struct ab8500_platform_data *plat; - struct ab8500_sysctrl_platform_data *pdata; - - plat = dev_get_platdata(pdev->dev.parent); - - if (!plat) - return -EINVAL; - sysctrl_dev = &pdev->dev; if (!pm_power_off) pm_power_off = ab8500_power_off; - pdata = plat->sysctrl; - if (pdata) { - int last, ret, i, j; - - if (is_ab8505(ab8500)) - last = AB8500_SYSCLKREQ4RFCLKBUF; - else - last = AB8500_SYSCLKREQ8RFCLKBUF; - - for (i = AB8500_SYSCLKREQ1RFCLKBUF; i <= last; i++) { - j = i - AB8500_SYSCLKREQ1RFCLKBUF; - ret = ab8500_sysctrl_write(i, 0xff, - pdata->initial_req_buf_config[j]); - dev_dbg(&pdev->dev, - "Setting SysClkReq%dRfClkBuf 0x%X\n", - j + 1, - pdata->initial_req_buf_config[j]); - if (ret < 0) { - dev_err(&pdev->dev, - "Can't set sysClkReq%dRfClkBuf: %d\n", - j + 1, ret); - } - } - } - return 0; } diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c new file mode 100644 index 000000000000..c05aa4ff57fd --- /dev/null +++ b/drivers/mfd/altera-a10sr.c @@ -0,0 +1,169 @@ +/* + * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * SPI access for Altera Arria10 MAX5 System Resource Chip + * + * Adapted from DA9052 + */ + +#include <linux/mfd/altera-a10sr.h> +#include <linux/mfd/core.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +static const struct mfd_cell altr_a10sr_subdev_info[] = { + { + .name = "altr_a10sr_gpio", + .of_compatible = "altr,a10sr-gpio", + }, +}; + +static bool altr_a10sr_reg_readable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ALTR_A10SR_VERSION_READ: + case ALTR_A10SR_LED_REG: + case ALTR_A10SR_PBDSW_REG: + case ALTR_A10SR_PBDSW_IRQ_REG: + case ALTR_A10SR_PWR_GOOD1_REG: + case ALTR_A10SR_PWR_GOOD2_REG: + case ALTR_A10SR_PWR_GOOD3_REG: + case ALTR_A10SR_FMCAB_REG: + case ALTR_A10SR_HPS_RST_REG: + case ALTR_A10SR_USB_QSPI_REG: + case ALTR_A10SR_SFPA_REG: + case ALTR_A10SR_SFPB_REG: + case ALTR_A10SR_I2C_M_REG: + case ALTR_A10SR_WARM_RST_REG: + case ALTR_A10SR_WR_KEY_REG: + case ALTR_A10SR_PMBUS_REG: + return true; + default: + return false; + } +} + +static bool altr_a10sr_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ALTR_A10SR_LED_REG: + case ALTR_A10SR_PBDSW_IRQ_REG: + case ALTR_A10SR_FMCAB_REG: + case ALTR_A10SR_HPS_RST_REG: + case ALTR_A10SR_USB_QSPI_REG: + case ALTR_A10SR_SFPA_REG: + case ALTR_A10SR_SFPB_REG: + case ALTR_A10SR_WARM_RST_REG: + case ALTR_A10SR_WR_KEY_REG: + case ALTR_A10SR_PMBUS_REG: + return true; + default: + return false; + } +} + +static bool altr_a10sr_reg_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ALTR_A10SR_PBDSW_REG: + case ALTR_A10SR_PBDSW_IRQ_REG: + case ALTR_A10SR_PWR_GOOD1_REG: + case ALTR_A10SR_PWR_GOOD2_REG: + case ALTR_A10SR_PWR_GOOD3_REG: + case ALTR_A10SR_HPS_RST_REG: + case ALTR_A10SR_I2C_M_REG: + case ALTR_A10SR_WARM_RST_REG: + case ALTR_A10SR_WR_KEY_REG: + case ALTR_A10SR_PMBUS_REG: + return true; + default: + return false; + } +} + +const struct regmap_config altr_a10sr_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .cache_type = REGCACHE_NONE, + + .use_single_rw = true, + .read_flag_mask = 1, + .write_flag_mask = 0, + + .max_register = ALTR_A10SR_WR_KEY_REG, + .readable_reg = altr_a10sr_reg_readable, + .writeable_reg = altr_a10sr_reg_writeable, + .volatile_reg = altr_a10sr_reg_volatile, + +}; + +static int altr_a10sr_spi_probe(struct spi_device *spi) +{ + int ret; + struct altr_a10sr *a10sr; + + a10sr = devm_kzalloc(&spi->dev, sizeof(*a10sr), + GFP_KERNEL); + if (!a10sr) + return -ENOMEM; + + spi->mode = SPI_MODE_3; + spi->bits_per_word = 8; + spi_setup(spi); + + a10sr->dev = &spi->dev; + + spi_set_drvdata(spi, a10sr); + + a10sr->regmap = devm_regmap_init_spi(spi, &altr_a10sr_regmap_config); + if (IS_ERR(a10sr->regmap)) { + ret = PTR_ERR(a10sr->regmap); + dev_err(&spi->dev, "Failed to allocate register map: %d\n", + ret); + return ret; + } + + ret = devm_mfd_add_devices(a10sr->dev, PLATFORM_DEVID_AUTO, + altr_a10sr_subdev_info, + ARRAY_SIZE(altr_a10sr_subdev_info), + NULL, 0, NULL); + if (ret) + dev_err(a10sr->dev, "Failed to register sub-devices: %d\n", + ret); + + return ret; +} + +static const struct of_device_id altr_a10sr_spi_of_match[] = { + { .compatible = "altr,a10sr" }, + { }, +}; +MODULE_DEVICE_TABLE(of, altr_a10sr_spi_of_match); + +static struct spi_driver altr_a10sr_spi_driver = { + .probe = altr_a10sr_spi_probe, + .driver = { + .name = "altr_a10sr", + .of_match_table = of_match_ptr(altr_a10sr_spi_of_match), + }, +}; + +module_spi_driver(altr_a10sr_spi_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>"); +MODULE_DESCRIPTION("Altera Arria10 DevKit System Resource MFD Driver"); diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bf2717967597..e4f97b3c824b 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1462,7 +1462,7 @@ int arizona_dev_init(struct arizona *arizona) /* Set up for interrupts */ ret = arizona_irq_init(arizona); if (ret != 0) - goto err_reset; + goto err_pm; pm_runtime_set_autosuspend_delay(arizona->dev, 100); pm_runtime_use_autosuspend(arizona->dev); @@ -1486,6 +1486,8 @@ int arizona_dev_init(struct arizona *arizona) err_irq: arizona_irq_exit(arizona); +err_pm: + pm_runtime_disable(arizona->dev); err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index edeb4951366a..5e18d3c77582 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -109,8 +109,20 @@ static irqreturn_t arizona_irq_thread(int irq, void *data) do { poll = false; - if (arizona->aod_irq_chip) - handle_nested_irq(irq_find_mapping(arizona->virq, 0)); + if (arizona->aod_irq_chip) { + /* + * Check the AOD status register to determine whether + * the nested IRQ handler should be called. + */ + ret = regmap_read(arizona->regmap, + ARIZONA_AOD_IRQ1, &val); + if (ret) + dev_warn(arizona->dev, + "Failed to read AOD IRQ1 %d\n", ret); + else if (val) + handle_nested_irq( + irq_find_mapping(arizona->virq, 0)); + } /* * Check if one of the main interrupts is asserted and only diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index e4e32978c377..fd80b0981f0f 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -17,6 +17,7 @@ */ #include <linux/err.h> +#include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> @@ -93,7 +94,10 @@ static const struct regmap_range axp22x_writeable_ranges[] = { }; static const struct regmap_range axp22x_volatile_ranges[] = { + regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE), regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE), + regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE), + regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES), }; static const struct regmap_access_table axp22x_writeable_table = { @@ -157,6 +161,11 @@ static struct resource axp20x_usb_power_supply_resources[] = { DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"), }; +static struct resource axp22x_usb_power_supply_resources[] = { + DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"), + DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"), +}; + static struct resource axp22x_pek_resources[] = { { .name = "PEK_DBR", @@ -524,6 +533,11 @@ static struct mfd_cell axp22x_cells[] = { .resources = axp22x_pek_resources, }, { .name = "axp20x-regulator", + }, { + .name = "axp20x-usb-power-supply", + .of_compatible = "x-powers,axp221-usb-power-supply", + .num_resources = ARRAY_SIZE(axp22x_usb_power_supply_resources), + .resources = axp22x_usb_power_supply_resources, }, }; @@ -664,6 +678,9 @@ static void axp20x_power_off(void) regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL, AXP20X_OFF); + + /* Give capacitors etc. time to drain to avoid kernel panic msg. */ + msleep(500); } int axp20x_match_device(struct axp20x_dev *axp20x) diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index c0a86aeb1733..388e268b9bcf 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -3094,8 +3094,7 @@ static void db8500_prcmu_update_cpufreq(void) } } -static int db8500_prcmu_register_ab8500(struct device *parent, - struct ab8500_platform_data *pdata) +static int db8500_prcmu_register_ab8500(struct device *parent) { struct device_node *np; struct resource ab8500_resource; @@ -3103,8 +3102,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent, .name = "ab8500-core", .of_compatible = "stericsson,ab8500", .id = AB8500_VERSION_AB8500, - .platform_data = pdata, - .pdata_size = sizeof(struct ab8500_platform_data), .resources = &ab8500_resource, .num_resources = 1, }; @@ -3133,7 +3130,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent, static int db8500_prcmu_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev); int irq = 0, err = 0; struct resource *res; @@ -3149,7 +3145,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev) return -ENOMEM; } init_prcm_registers(); - dbx500_fw_version_init(pdev, pdata->version_offset); + dbx500_fw_version_init(pdev, DB8500_PRCMU_FW_VERSION_OFFSET); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm"); if (!res) { dev_err(&pdev->dev, "no prcmu tcdm region provided\n"); @@ -3204,7 +3200,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev) } } - err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata); + err = db8500_prcmu_register_ab8500(&pdev->dev); if (err) { mfd_remove_devices(&pdev->dev); pr_err("prcmu: Failed to add ab8500 subdevice\n"); diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c index 14661ec5ef7f..270e19c0bba1 100644 --- a/drivers/mfd/dm355evm_msp.c +++ b/drivers/mfd/dm355evm_msp.c @@ -199,11 +199,8 @@ static struct device *add_child(struct i2c_client *client, const char *name, int status; pdev = platform_device_alloc(name, -1); - if (!pdev) { - dev_dbg(&client->dev, "can't alloc dev\n"); - status = -ENOMEM; - goto err; - } + if (!pdev) + return ERR_PTR(-ENOMEM); device_init_wakeup(&pdev->dev, can_wakeup); pdev->dev.parent = &client->dev; diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index 05ddc7882362..0fc62995695b 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -24,19 +24,15 @@ #include <linux/platform_device.h> #include <linux/regmap.h> -static const struct mfd_cell hi655x_pmic_devs[] = { - { .name = "hi655x-regulator", }, -}; - static const struct regmap_irq hi655x_irqs[] = { - { .reg_offset = 0, .mask = OTMP_D1R_INT }, - { .reg_offset = 0, .mask = VSYS_2P5_R_INT }, - { .reg_offset = 0, .mask = VSYS_UV_D3R_INT }, - { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT }, - { .reg_offset = 0, .mask = PWRON_D4SR_INT }, - { .reg_offset = 0, .mask = PWRON_D20F_INT }, - { .reg_offset = 0, .mask = PWRON_D20R_INT }, - { .reg_offset = 0, .mask = RESERVE_INT }, + { .reg_offset = 0, .mask = OTMP_D1R_INT_MASK }, + { .reg_offset = 0, .mask = VSYS_2P5_R_INT_MASK }, + { .reg_offset = 0, .mask = VSYS_UV_D3R_INT_MASK }, + { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT_MASK }, + { .reg_offset = 0, .mask = PWRON_D4SR_INT_MASK }, + { .reg_offset = 0, .mask = PWRON_D20F_INT_MASK }, + { .reg_offset = 0, .mask = PWRON_D20R_INT_MASK }, + { .reg_offset = 0, .mask = RESERVE_INT_MASK }, }; static const struct regmap_irq_chip hi655x_irq_chip = { @@ -45,6 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = { .num_regs = 1, .num_irqs = ARRAY_SIZE(hi655x_irqs), .status_base = HI655X_IRQ_STAT_BASE, + .ack_base = HI655X_IRQ_STAT_BASE, .mask_base = HI655X_IRQ_MASK_BASE, }; @@ -55,6 +52,34 @@ static struct regmap_config hi655x_regmap_config = { .max_register = HI655X_BUS_ADDR(0xFFF), }; +static struct resource pwrkey_resources[] = { + { + .name = "down", + .start = PWRON_D20R_INT, + .end = PWRON_D20R_INT, + .flags = IORESOURCE_IRQ, + }, { + .name = "up", + .start = PWRON_D20F_INT, + .end = PWRON_D20F_INT, + .flags = IORESOURCE_IRQ, + }, { + .name = "hold 4s", + .start = PWRON_D4SR_INT, + .end = PWRON_D4SR_INT, + .flags = IORESOURCE_IRQ, + }, +}; + +static const struct mfd_cell hi655x_pmic_devs[] = { + { + .name = "hi65xx-powerkey", + .num_resources = ARRAY_SIZE(pwrkey_resources), + .resources = &pwrkey_resources[0], + }, + { .name = "hi655x-regulator", }, +}; + static void hi655x_local_irq_clear(struct regmap *map) { int i; @@ -80,12 +105,9 @@ static int hi655x_pmic_probe(struct platform_device *pdev) pmic->dev = dev; pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!pmic->res) - return -ENOENT; - base = devm_ioremap_resource(dev, pmic->res); - if (!base) - return -ENOMEM; + if (IS_ERR(base)) + return PTR_ERR(base); pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base, &hi655x_regmap_config); @@ -123,7 +145,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pmic); ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, hi655x_pmic_devs, - ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, NULL); + ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, + regmap_irq_get_domain(pmic->irq_data)); if (ret) { dev_err(dev, "Failed to register device %d\n", ret); regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data); diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 05b924542ee2..da5722d7c540 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -624,6 +624,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = { .driver_data = (void *)&kempld_platform_data_generic, .callback = kempld_create_platform_device, }, { + .ident = "CSL6", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"), + DMI_MATCH(DMI_BOARD_NAME, "COMe-cSL6"), + }, + .driver_data = (void *)&kempld_platform_data_generic, + .callback = kempld_create_platform_device, + }, { .ident = "CVV6", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"), @@ -647,6 +655,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = { .driver_data = (void *)&kempld_platform_data_generic, .callback = kempld_create_platform_device, }, { + .ident = "MAL1", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"), + DMI_MATCH(DMI_BOARD_NAME, "COMe-mAL10"), + }, + .driver_data = (void *)&kempld_platform_data_generic, + .callback = kempld_create_platform_device, + }, { .ident = "MBR1", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"), diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 2280b3fdcf68..6c245128ab2e 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -561,7 +561,7 @@ static int __init max14577_i2c_init(void) return i2c_add_driver(&max14577_i2c_driver); } -subsys_initcall(max14577_i2c_init); +module_init(max14577_i2c_init); static void __exit max14577_i2c_exit(void) { diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index f32fbb8e8129..258757e216c4 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -31,25 +31,25 @@ #include <linux/interrupt.h> #include <linux/mfd/core.h> #include <linux/mfd/max77620.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/slab.h> -static struct resource gpio_resources[] = { +static const struct resource gpio_resources[] = { DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO), }; -static struct resource power_resources[] = { +static const struct resource power_resources[] = { DEFINE_RES_IRQ(MAX77620_IRQ_LBT_MBATLOW), }; -static struct resource rtc_resources[] = { +static const struct resource rtc_resources[] = { DEFINE_RES_IRQ(MAX77620_IRQ_TOP_RTC), }; -static struct resource thermal_resources[] = { +static const struct resource thermal_resources[] = { DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM1), DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM2), }; @@ -111,15 +111,6 @@ static const struct mfd_cell max20024_children[] = { }, }; -static struct regmap_irq_chip max77620_top_irq_chip = { - .name = "max77620-top", - .irqs = max77620_top_irqs, - .num_irqs = ARRAY_SIZE(max77620_top_irqs), - .num_regs = 2, - .status_base = MAX77620_REG_IRQTOP, - .mask_base = MAX77620_REG_IRQTOPM, -}; - static const struct regmap_range max77620_readable_ranges[] = { regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4), }; @@ -180,6 +171,51 @@ static const struct regmap_config max20024_regmap_config = { .volatile_table = &max77620_volatile_table, }; +/* + * MAX77620 and MAX20024 has the following steps of the interrupt handling + * for TOP interrupts: + * 1. When interrupt occurs from PMIC, mask the PMIC interrupt by setting GLBLM. + * 2. Read IRQTOP and service the interrupt. + * 3. Once all interrupts has been checked and serviced, the interrupt service + * routine un-masks the hardware interrupt line by clearing GLBLM. + */ +static int max77620_irq_global_mask(void *irq_drv_data) +{ + struct max77620_chip *chip = irq_drv_data; + int ret; + + ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT, + MAX77620_GLBLM_MASK, MAX77620_GLBLM_MASK); + if (ret < 0) + dev_err(chip->dev, "Failed to set GLBLM: %d\n", ret); + + return ret; +} + +static int max77620_irq_global_unmask(void *irq_drv_data) +{ + struct max77620_chip *chip = irq_drv_data; + int ret; + + ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT, + MAX77620_GLBLM_MASK, 0); + if (ret < 0) + dev_err(chip->dev, "Failed to reset GLBLM: %d\n", ret); + + return ret; +} + +static struct regmap_irq_chip max77620_top_irq_chip = { + .name = "max77620-top", + .irqs = max77620_top_irqs, + .num_irqs = ARRAY_SIZE(max77620_top_irqs), + .num_regs = 2, + .status_base = MAX77620_REG_IRQTOP, + .mask_base = MAX77620_REG_IRQTOPM, + .handle_pre_irq = max77620_irq_global_mask, + .handle_post_irq = max77620_irq_global_unmask, +}; + /* max77620_get_fps_period_reg_value: Get FPS bit field value from * requested periods. * MAX77620 supports the FPS period of 40, 80, 160, 320, 540, 1280, 2560 @@ -433,6 +469,7 @@ static int max77620_probe(struct i2c_client *client, if (ret < 0) return ret; + max77620_top_irq_chip.irq_drv_data = chip; ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq, IRQF_ONESHOT | IRQF_SHARED, chip->irq_base, &max77620_top_irq_chip, @@ -568,7 +605,6 @@ static const struct i2c_device_id max77620_id[] = { {"max20024", MAX20024}, {}, }; -MODULE_DEVICE_TABLE(i2c, max77620_id); static const struct dev_pm_ops max77620_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(max77620_i2c_suspend, max77620_i2c_resume) @@ -582,11 +618,4 @@ static struct i2c_driver max77620_driver = { .probe = max77620_probe, .id_table = max77620_id, }; - -module_i2c_driver(max77620_driver); - -MODULE_DESCRIPTION("MAX77620/MAX20024 Multi Function Device Core Driver"); -MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); -MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>"); -MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>"); -MODULE_LICENSE("GPL v2"); +builtin_i2c_driver(max77620_driver); diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c index 7cfc95b49c5d..dc5caeaaa6a1 100644 --- a/drivers/mfd/max77843.c +++ b/drivers/mfd/max77843.c @@ -15,7 +15,7 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/mfd/core.h> #include <linux/mfd/max77693-common.h> #include <linux/mfd/max77843-private.h> @@ -171,19 +171,6 @@ err_pmic_id: return ret; } -static int max77843_remove(struct i2c_client *i2c) -{ - struct max77693_dev *max77843 = i2c_get_clientdata(i2c); - - mfd_remove_devices(max77843->dev); - - regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys); - - i2c_unregister_device(max77843->i2c_chg); - - return 0; -} - static const struct of_device_id max77843_dt_match[] = { { .compatible = "maxim,max77843", }, { }, @@ -193,7 +180,6 @@ static const struct i2c_device_id max77843_id[] = { { "max77843", TYPE_MAX77843, }, { }, }; -MODULE_DEVICE_TABLE(i2c, max77843_id); static int __maybe_unused max77843_suspend(struct device *dev) { @@ -226,9 +212,9 @@ static struct i2c_driver max77843_i2c_driver = { .name = "max77843", .pm = &max77843_pm, .of_match_table = max77843_dt_match, + .suppress_bind_attrs = true, }, .probe = max77843_probe, - .remove = max77843_remove, .id_table = max77843_id, }; @@ -237,9 +223,3 @@ static int __init max77843_i2c_init(void) return i2c_add_driver(&max77843_i2c_driver); } subsys_initcall(max77843_i2c_init); - -static void __exit max77843_i2c_exit(void) -{ - i2c_del_driver(&max77843_i2c_driver); -} -module_exit(max77843_i2c_exit); diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index 70443b161a5b..5c80aea3211f 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/mfd/max8925.h> @@ -133,7 +133,6 @@ static const struct i2c_device_id max8925_id_table[] = { { "max8925", 0 }, { }, }; -MODULE_DEVICE_TABLE(i2c, max8925_id_table); static int max8925_dt_init(struct device_node *np, struct device *dev, struct max8925_platform_data *pdata) @@ -240,7 +239,6 @@ static const struct of_device_id max8925_dt_ids[] = { { .compatible = "maxim,max8925", }, {}, }; -MODULE_DEVICE_TABLE(of, max8925_dt_ids); static struct i2c_driver max8925_driver = { .driver = { @@ -264,13 +262,3 @@ static int __init max8925_i2c_init(void) return ret; } subsys_initcall(max8925_i2c_init); - -static void __exit max8925_i2c_exit(void) -{ - i2c_del_driver(&max8925_driver); -} -module_exit(max8925_i2c_exit); - -MODULE_DESCRIPTION("I2C Driver for Maxim 8925"); -MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index f316348e3d98..2d6e2c392786 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -2,7 +2,7 @@ * max8997.c - mfd core driver for the Maxim 8966 and 8997 * * Copyright (C) 2011 Samsung Electronics - * MyungJoo Ham <myungjoo.ham@smasung.com> + * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,7 +28,7 @@ #include <linux/of_irq.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/mutex.h> #include <linux/mfd/core.h> #include <linux/mfd/max8997.h> @@ -55,7 +55,6 @@ static const struct of_device_id max8997_pmic_dt_match[] = { { .compatible = "maxim,max8997-pmic", .data = (void *)TYPE_MAX8997 }, {}, }; -MODULE_DEVICE_TABLE(of, max8997_pmic_dt_match); #endif int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest) @@ -263,24 +262,11 @@ err_i2c_haptic: return ret; } -static int max8997_i2c_remove(struct i2c_client *i2c) -{ - struct max8997_dev *max8997 = i2c_get_clientdata(i2c); - - mfd_remove_devices(max8997->dev); - i2c_unregister_device(max8997->muic); - i2c_unregister_device(max8997->haptic); - i2c_unregister_device(max8997->rtc); - - return 0; -} - static const struct i2c_device_id max8997_i2c_id[] = { { "max8997", TYPE_MAX8997 }, { "max8966", TYPE_MAX8966 }, { } }; -MODULE_DEVICE_TABLE(i2c, max8998_i2c_id); static u8 max8997_dumpaddr_pmic[] = { MAX8997_REG_INT1MSK, @@ -510,10 +496,10 @@ static struct i2c_driver max8997_i2c_driver = { .driver = { .name = "max8997", .pm = &max8997_pm, + .suppress_bind_attrs = true, .of_match_table = of_match_ptr(max8997_pmic_dt_match), }, .probe = max8997_i2c_probe, - .remove = max8997_i2c_remove, .id_table = max8997_i2c_id, }; @@ -523,13 +509,3 @@ static int __init max8997_i2c_init(void) } /* init early so consumer devices can complete system boot */ subsys_initcall(max8997_i2c_init); - -static void __exit max8997_i2c_exit(void) -{ - i2c_del_driver(&max8997_i2c_driver); -} -module_exit(max8997_i2c_exit); - -MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver"); -MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index ab28b29400f6..4c33b8063bc3 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c @@ -21,8 +21,6 @@ */ #include <linux/err.h> -#include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> @@ -138,7 +136,6 @@ static const struct of_device_id max8998_dt_match[] = { { .compatible = "ti,lp3974", .data = (void *)TYPE_LP3974 }, {}, }; -MODULE_DEVICE_TABLE(of, max8998_dt_match); #endif /* @@ -254,23 +251,11 @@ err: return ret; } -static int max8998_i2c_remove(struct i2c_client *i2c) -{ - struct max8998_dev *max8998 = i2c_get_clientdata(i2c); - - mfd_remove_devices(max8998->dev); - max8998_irq_exit(max8998); - i2c_unregister_device(max8998->rtc); - - return 0; -} - static const struct i2c_device_id max8998_i2c_id[] = { { "max8998", TYPE_MAX8998 }, { "lp3974", TYPE_LP3974}, { } }; -MODULE_DEVICE_TABLE(i2c, max8998_i2c_id); static int max8998_suspend(struct device *dev) { @@ -378,10 +363,10 @@ static struct i2c_driver max8998_i2c_driver = { .driver = { .name = "max8998", .pm = &max8998_pm, + .suppress_bind_attrs = true, .of_match_table = of_match_ptr(max8998_dt_match), }, .probe = max8998_i2c_probe, - .remove = max8998_i2c_remove, .id_table = max8998_i2c_id, }; @@ -391,13 +376,3 @@ static int __init max8998_i2c_init(void) } /* init early so consumer devices can complete system boot */ subsys_initcall(max8998_i2c_init); - -static void __exit max8998_i2c_exit(void) -{ - i2c_del_driver(&max8998_i2c_driver); -} -module_exit(max8998_i2c_exit); - -MODULE_DESCRIPTION("MAXIM 8998 multi-function core driver"); -MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index c30290f33430..1aa74c4c3ced 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -30,6 +30,8 @@ #include <linux/platform_data/usb-omap.h> #include <linux/of.h> +#include "omap-usb.h" + #define USBTLL_DRIVER_NAME "usbhs_tll" /* TLL Register Set */ diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c index 1be47ad6441b..2e44323455dd 100644 --- a/drivers/mfd/qcom_rpm.c +++ b/drivers/mfd/qcom_rpm.c @@ -34,7 +34,13 @@ struct qcom_rpm_resource { struct qcom_rpm_data { u32 version; const struct qcom_rpm_resource *resource_table; - unsigned n_resources; + unsigned int n_resources; + unsigned int req_ctx_off; + unsigned int req_sel_off; + unsigned int ack_ctx_off; + unsigned int ack_sel_off; + unsigned int req_sel_size; + unsigned int ack_sel_size; }; struct qcom_rpm { @@ -61,17 +67,11 @@ struct qcom_rpm { #define RPM_REQUEST_TIMEOUT (5 * HZ) -#define RPM_REQUEST_CONTEXT 3 -#define RPM_REQ_SELECT 11 -#define RPM_ACK_CONTEXT 15 -#define RPM_ACK_SELECTOR 23 -#define RPM_SELECT_SIZE 7 +#define RPM_MAX_SEL_SIZE 7 #define RPM_NOTIFICATION BIT(30) #define RPM_REJECTED BIT(31) -#define RPM_SIGNAL BIT(2) - static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = { [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 }, [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 }, @@ -157,6 +157,12 @@ static const struct qcom_rpm_data apq8064_template = { .version = 3, .resource_table = apq8064_rpm_resource_table, .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table), + .req_ctx_off = 3, + .req_sel_off = 11, + .ack_ctx_off = 15, + .ack_sel_off = 23, + .req_sel_size = 4, + .ack_sel_size = 7, }; static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = { @@ -240,6 +246,12 @@ static const struct qcom_rpm_data msm8660_template = { .version = 2, .resource_table = msm8660_rpm_resource_table, .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table), + .req_ctx_off = 3, + .req_sel_off = 11, + .ack_ctx_off = 19, + .ack_sel_off = 27, + .req_sel_size = 7, + .ack_sel_size = 7, }; static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = { @@ -322,6 +334,12 @@ static const struct qcom_rpm_data msm8960_template = { .version = 3, .resource_table = msm8960_rpm_resource_table, .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table), + .req_ctx_off = 3, + .req_sel_off = 11, + .ack_ctx_off = 15, + .ack_sel_off = 23, + .req_sel_size = 4, + .ack_sel_size = 7, }; static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = { @@ -362,6 +380,12 @@ static const struct qcom_rpm_data ipq806x_template = { .version = 3, .resource_table = ipq806x_rpm_resource_table, .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table), + .req_ctx_off = 3, + .req_sel_off = 11, + .ack_ctx_off = 15, + .ack_sel_off = 23, + .req_sel_size = 4, + .ack_sel_size = 7, }; static const struct of_device_id qcom_rpm_of_match[] = { @@ -380,7 +404,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm, { const struct qcom_rpm_resource *res; const struct qcom_rpm_data *data = rpm->data; - u32 sel_mask[RPM_SELECT_SIZE] = { 0 }; + u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 }; int left; int ret = 0; int i; @@ -398,12 +422,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm, writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i)); bitmap_set((unsigned long *)sel_mask, res->select_id, 1); - for (i = 0; i < ARRAY_SIZE(sel_mask); i++) { + for (i = 0; i < rpm->data->req_sel_size; i++) { writel_relaxed(sel_mask[i], - RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i)); + RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i)); } - writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT)); + writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off)); reinit_completion(&rpm->ack); regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit)); @@ -426,10 +450,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev) u32 ack; int i; - ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); - for (i = 0; i < RPM_SELECT_SIZE; i++) - writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i)); - writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); + ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); + for (i = 0; i < rpm->data->ack_sel_size; i++) + writel_relaxed(0, + RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i)); + writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); if (ack & RPM_NOTIFICATION) { dev_warn(rpm->dev, "ignoring notification!\n"); diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c index fb4ce6d04c30..c180b7533bba 100644 --- a/drivers/mfd/si476x-i2c.c +++ b/drivers/mfd/si476x-i2c.c @@ -600,7 +600,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core, unknown_revision: dev_err(&core->client->dev, "Unsupported version of the firmware: %d.%d.%d, " - "reverting to A10 comptible functions\n", + "reverting to A10 compatible functions\n", major, minor1, minor2); return SI476X_REVISION_A10; diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c index 7f89e89b8a5e..cd18c09827ef 100644 --- a/drivers/mfd/smsc-ece1099.c +++ b/drivers/mfd/smsc-ece1099.c @@ -36,7 +36,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c, { struct smsc *smsc; int devid, rev, venid_l, venid_h; - int ret = 0; + int ret; smsc = devm_kzalloc(&i2c->dev, sizeof(struct smsc), GFP_KERNEL); @@ -46,10 +46,8 @@ static int smsc_i2c_probe(struct i2c_client *i2c, } smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config); - if (IS_ERR(smsc->regmap)) { - ret = PTR_ERR(smsc->regmap); - goto err; - } + if (IS_ERR(smsc->regmap)) + return PTR_ERR(smsc->regmap); i2c_set_clientdata(i2c, smsc); smsc->dev = &i2c->dev; @@ -68,7 +66,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c, ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk); if (ret) - goto err; + return ret; #ifdef CONFIG_OF if (i2c->dev.of_node) @@ -76,7 +74,6 @@ static int smsc_i2c_probe(struct i2c_client *i2c, NULL, NULL, &i2c->dev); #endif -err: return ret; } diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index fb8f9e8b75df..94c7cc02fdab 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -23,6 +23,27 @@ #include <linux/regulator/consumer.h> #include "stmpe.h" +/** + * struct stmpe_platform_data - STMPE platform data + * @id: device id to distinguish between multiple STMPEs on the same board + * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) + * @irq_trigger: IRQ trigger to use for the interrupt to the host + * @autosleep: bool to enable/disable stmpe autosleep + * @autosleep_timeout: inactivity timeout in milliseconds for autosleep + * @irq_over_gpio: true if gpio is used to get irq + * @irq_gpio: gpio number over which irq will be requested (significant only if + * irq_over_gpio is true) + */ +struct stmpe_platform_data { + int id; + unsigned int blocks; + unsigned int irq_trigger; + bool autosleep; + bool irq_over_gpio; + int irq_gpio; + int autosleep_timeout; +}; + static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) { return stmpe->variant->enable(stmpe, blocks, true); @@ -1187,24 +1208,19 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata, /* Called from client specific probe routines */ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) { - struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev); + struct stmpe_platform_data *pdata; struct device_node *np = ci->dev->of_node; struct stmpe *stmpe; int ret; - if (!pdata) { - if (!np) - return -EINVAL; - - pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; + pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; - stmpe_of_probe(pdata, np); + stmpe_of_probe(pdata, np); - if (of_find_property(np, "interrupts", NULL) == NULL) - ci->irq = -1; - } + if (of_find_property(np, "interrupts", NULL) == NULL) + ci->irq = -1; stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL); if (!stmpe) diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index e4e4b22eebc9..c8f027b4ea4c 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -27,20 +27,6 @@ #include <linux/mfd/ti_am335x_tscadc.h> -static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg) -{ - unsigned int val; - - regmap_read(tsadc->regmap_tscadc, reg, &val); - return val; -} - -static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg, - unsigned int val) -{ - regmap_write(tsadc->regmap_tscadc, reg, val); -} - static const struct regmap_config tscadc_regmap_config = { .name = "ti_tscadc", .reg_bits = 32, @@ -48,89 +34,89 @@ static const struct regmap_config tscadc_regmap_config = { .val_bits = 32, }; -void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val) +void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tscadc, u32 val) { unsigned long flags; - spin_lock_irqsave(&tsadc->reg_lock, flags); - tsadc->reg_se_cache |= val; - if (tsadc->adc_waiting) - wake_up(&tsadc->reg_se_wait); - else if (!tsadc->adc_in_use) - tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache); + spin_lock_irqsave(&tscadc->reg_lock, flags); + tscadc->reg_se_cache |= val; + if (tscadc->adc_waiting) + wake_up(&tscadc->reg_se_wait); + else if (!tscadc->adc_in_use) + regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache); - spin_unlock_irqrestore(&tsadc->reg_lock, flags); + spin_unlock_irqrestore(&tscadc->reg_lock, flags); } EXPORT_SYMBOL_GPL(am335x_tsc_se_set_cache); -static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc) +static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tscadc) { DEFINE_WAIT(wait); u32 reg; - reg = tscadc_readl(tsadc, REG_ADCFSM); + regmap_read(tscadc->regmap, REG_ADCFSM, ®); if (reg & SEQ_STATUS) { - tsadc->adc_waiting = true; - prepare_to_wait(&tsadc->reg_se_wait, &wait, + tscadc->adc_waiting = true; + prepare_to_wait(&tscadc->reg_se_wait, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&tsadc->reg_lock); + spin_unlock_irq(&tscadc->reg_lock); schedule(); - spin_lock_irq(&tsadc->reg_lock); - finish_wait(&tsadc->reg_se_wait, &wait); + spin_lock_irq(&tscadc->reg_lock); + finish_wait(&tscadc->reg_se_wait, &wait); /* * Sequencer should either be idle or * busy applying the charge step. */ - reg = tscadc_readl(tsadc, REG_ADCFSM); + regmap_read(tscadc->regmap, REG_ADCFSM, ®); WARN_ON((reg & SEQ_STATUS) && !(reg & CHARGE_STEP)); - tsadc->adc_waiting = false; + tscadc->adc_waiting = false; } - tsadc->adc_in_use = true; + tscadc->adc_in_use = true; } -void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val) +void am335x_tsc_se_set_once(struct ti_tscadc_dev *tscadc, u32 val) { - spin_lock_irq(&tsadc->reg_lock); - am335x_tscadc_need_adc(tsadc); + spin_lock_irq(&tscadc->reg_lock); + am335x_tscadc_need_adc(tscadc); - tscadc_writel(tsadc, REG_SE, val); - spin_unlock_irq(&tsadc->reg_lock); + regmap_write(tscadc->regmap, REG_SE, val); + spin_unlock_irq(&tscadc->reg_lock); } EXPORT_SYMBOL_GPL(am335x_tsc_se_set_once); -void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc) +void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tscadc) { unsigned long flags; - spin_lock_irqsave(&tsadc->reg_lock, flags); - tsadc->adc_in_use = false; - tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache); - spin_unlock_irqrestore(&tsadc->reg_lock, flags); + spin_lock_irqsave(&tscadc->reg_lock, flags); + tscadc->adc_in_use = false; + regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache); + spin_unlock_irqrestore(&tscadc->reg_lock, flags); } EXPORT_SYMBOL_GPL(am335x_tsc_se_adc_done); -void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val) +void am335x_tsc_se_clr(struct ti_tscadc_dev *tscadc, u32 val) { unsigned long flags; - spin_lock_irqsave(&tsadc->reg_lock, flags); - tsadc->reg_se_cache &= ~val; - tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache); - spin_unlock_irqrestore(&tsadc->reg_lock, flags); + spin_lock_irqsave(&tscadc->reg_lock, flags); + tscadc->reg_se_cache &= ~val; + regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache); + spin_unlock_irqrestore(&tscadc->reg_lock, flags); } EXPORT_SYMBOL_GPL(am335x_tsc_se_clr); -static void tscadc_idle_config(struct ti_tscadc_dev *config) +static void tscadc_idle_config(struct ti_tscadc_dev *tscadc) { unsigned int idleconfig; idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM | STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN; - tscadc_writel(config, REG_IDLECONFIG, idleconfig); + regmap_write(tscadc->regmap, REG_IDLECONFIG, idleconfig); } static int ti_tscadc_probe(struct platform_device *pdev) @@ -182,8 +168,7 @@ static int ti_tscadc_probe(struct platform_device *pdev) } /* Allocate memory for device */ - tscadc = devm_kzalloc(&pdev->dev, - sizeof(struct ti_tscadc_dev), GFP_KERNEL); + tscadc = devm_kzalloc(&pdev->dev, sizeof(*tscadc), GFP_KERNEL); if (!tscadc) { dev_err(&pdev->dev, "failed to allocate memory.\n"); return -ENOMEM; @@ -202,11 +187,11 @@ static int ti_tscadc_probe(struct platform_device *pdev) if (IS_ERR(tscadc->tscadc_base)) return PTR_ERR(tscadc->tscadc_base); - tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev, + tscadc->regmap = devm_regmap_init_mmio(&pdev->dev, tscadc->tscadc_base, &tscadc_regmap_config); - if (IS_ERR(tscadc->regmap_tscadc)) { + if (IS_ERR(tscadc->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); - err = PTR_ERR(tscadc->regmap_tscadc); + err = PTR_ERR(tscadc->regmap); goto ret; } @@ -236,11 +221,11 @@ static int ti_tscadc_probe(struct platform_device *pdev) /* TSCADC_CLKDIV needs to be configured to the value minus 1 */ tscadc->clk_div--; - tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div); + regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div); /* Set the control register bits */ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID; - tscadc_writel(tscadc, REG_CTRL, ctrl); + regmap_write(tscadc->regmap, REG_CTRL, ctrl); /* Set register bits for Idle Config Mode */ if (tsc_wires > 0) { @@ -254,7 +239,7 @@ static int ti_tscadc_probe(struct platform_device *pdev) /* Enable the TSC module enable bit */ ctrl |= CNTRLREG_TSCSSENB; - tscadc_writel(tscadc, REG_CTRL, ctrl); + regmap_write(tscadc->regmap, REG_CTRL, ctrl); tscadc->used_cells = 0; tscadc->tsc_cell = -1; @@ -300,7 +285,7 @@ static int ti_tscadc_remove(struct platform_device *pdev) { struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev); - tscadc_writel(tscadc, REG_SE, 0x00); + regmap_write(tscadc->regmap, REG_SE, 0x00); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -310,51 +295,43 @@ static int ti_tscadc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tscadc_suspend(struct device *dev) +static int __maybe_unused tscadc_suspend(struct device *dev) { - struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev); + struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev); - tscadc_writel(tscadc_dev, REG_SE, 0x00); + regmap_write(tscadc->regmap, REG_SE, 0x00); pm_runtime_put_sync(dev); return 0; } -static int tscadc_resume(struct device *dev) +static int __maybe_unused tscadc_resume(struct device *dev) { - struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev); + struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev); u32 ctrl; pm_runtime_get_sync(dev); /* context restore */ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID; - tscadc_writel(tscadc_dev, REG_CTRL, ctrl); + regmap_write(tscadc->regmap, REG_CTRL, ctrl); - if (tscadc_dev->tsc_cell != -1) { - if (tscadc_dev->tsc_wires == 5) + if (tscadc->tsc_cell != -1) { + if (tscadc->tsc_wires == 5) ctrl |= CNTRLREG_5WIRE | CNTRLREG_TSCENB; else ctrl |= CNTRLREG_4WIRE | CNTRLREG_TSCENB; - tscadc_idle_config(tscadc_dev); + tscadc_idle_config(tscadc); } ctrl |= CNTRLREG_TSCSSENB; - tscadc_writel(tscadc_dev, REG_CTRL, ctrl); + regmap_write(tscadc->regmap, REG_CTRL, ctrl); - tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div); + regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div); return 0; } -static const struct dev_pm_ops tscadc_pm_ops = { - .suspend = tscadc_suspend, - .resume = tscadc_resume, -}; -#define TSCADC_PM_OPS (&tscadc_pm_ops) -#else -#define TSCADC_PM_OPS NULL -#endif +static SIMPLE_DEV_PM_OPS(tscadc_pm_ops, tscadc_suspend, tscadc_resume); static const struct of_device_id ti_tscadc_dt_ids[] = { { .compatible = "ti,am3359-tscadc", }, @@ -365,7 +342,7 @@ MODULE_DEVICE_TABLE(of, ti_tscadc_dt_ids); static struct platform_driver ti_tscadc_driver = { .driver = { .name = "ti_am3359-tscadc", - .pm = TSCADC_PM_OPS, + .pm = &tscadc_pm_ops, .of_match_table = ti_tscadc_dt_ids, }, .probe = ti_tscadc_probe, diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c index 40beb2f4350c..1f308c4e3694 100644 --- a/drivers/mfd/tps6507x.c +++ b/drivers/mfd/tps6507x.c @@ -105,8 +105,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c, } static const struct i2c_device_id tps6507x_i2c_id[] = { - { "tps6507x", 0 }, - { } + { "tps6507x", 0 }, + { } }; MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id); diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 831696ee2472..a49d3db6d936 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -622,11 +622,8 @@ add_numbered_child(unsigned mod_no, const char *name, int num, twl = &twl_priv->twl_modules[sid]; pdev = platform_device_alloc(name, num); - if (!pdev) { - dev_dbg(&twl->client->dev, "can't alloc dev\n"); - status = -ENOMEM; - goto err; - } + if (!pdev) + return ERR_PTR(-ENOMEM); pdev->dev.parent = &twl->client->dev; @@ -634,7 +631,7 @@ add_numbered_child(unsigned mod_no, const char *name, int num, status = platform_device_add_data(pdev, pdata, pdata_len); if (status < 0) { dev_dbg(&pdev->dev, "can't add platform_data\n"); - goto err; + goto put_device; } } @@ -647,21 +644,22 @@ add_numbered_child(unsigned mod_no, const char *name, int num, status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1); if (status < 0) { dev_dbg(&pdev->dev, "can't add irqs\n"); - goto err; + goto put_device; } } status = platform_device_add(pdev); - if (status == 0) - device_init_wakeup(&pdev->dev, can_wakeup); + if (status) + goto put_device; + + device_init_wakeup(&pdev->dev, can_wakeup); -err: - if (status < 0) { - platform_device_put(pdev); - dev_err(&twl->client->dev, "can't add %s dev\n", name); - return ERR_PTR(status); - } return &pdev->dev; + +put_device: + platform_device_put(pdev); + dev_err(&twl->client->dev, "failed to add device %s\n", name); + return ERR_PTR(status); } static inline struct device *add_child(unsigned mod_no, const char *name, diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index 852d5874aabb..ab328ec49353 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -323,8 +323,7 @@ int twl6040_power(struct twl6040 *twl6040, int on) /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; - twl6040->sysclk = 19200000; - twl6040->mclk = 32768; + twl6040->sysclk_rate = 19200000; } else { /* already powered-down */ if (!twl6040->power_count) { @@ -352,8 +351,12 @@ int twl6040_power(struct twl6040 *twl6040, int on) regcache_cache_only(twl6040->regmap, true); regcache_mark_dirty(twl6040->regmap); - twl6040->sysclk = 0; - twl6040->mclk = 0; + twl6040->sysclk_rate = 0; + + if (twl6040->pll == TWL6040_SYSCLK_SEL_HPPLL) { + clk_disable_unprepare(twl6040->mclk); + twl6040->mclk_rate = 0; + } clk_disable_unprepare(twl6040->clk32k); } @@ -377,15 +380,15 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, /* Force full reconfiguration when switching between PLL */ if (pll_id != twl6040->pll) { - twl6040->sysclk = 0; - twl6040->mclk = 0; + twl6040->sysclk_rate = 0; + twl6040->mclk_rate = 0; } switch (pll_id) { case TWL6040_SYSCLK_SEL_LPPLL: /* low-power PLL divider */ /* Change the sysclk configuration only if it has been canged */ - if (twl6040->sysclk != freq_out) { + if (twl6040->sysclk_rate != freq_out) { switch (freq_out) { case 17640000: lppllctl |= TWL6040_LPLLFIN; @@ -427,6 +430,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, ret = -EINVAL; goto pll_out; } + + clk_disable_unprepare(twl6040->mclk); break; case TWL6040_SYSCLK_SEL_HPPLL: /* high-performance PLL can provide only 19.2 MHz */ @@ -437,7 +442,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, goto pll_out; } - if (twl6040->mclk != freq_in) { + if (twl6040->mclk_rate != freq_in) { hppllctl &= ~TWL6040_MCLK_MSK; switch (freq_in) { @@ -468,6 +473,9 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, goto pll_out; } + /* When switching to HPPLL, enable the mclk first */ + if (pll_id != twl6040->pll) + clk_prepare_enable(twl6040->mclk); /* * enable clock slicer to ensure input waveform is * square @@ -483,6 +491,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, lppllctl &= ~TWL6040_LPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); + + twl6040->mclk_rate = freq_in; } break; default: @@ -491,8 +501,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, goto pll_out; } - twl6040->sysclk = freq_out; - twl6040->mclk = freq_in; + twl6040->sysclk_rate = freq_out; twl6040->pll = pll_id; pll_out: @@ -512,7 +521,7 @@ EXPORT_SYMBOL(twl6040_get_pll); unsigned int twl6040_get_sysclk(struct twl6040 *twl6040) { - return twl6040->sysclk; + return twl6040->sysclk_rate; } EXPORT_SYMBOL(twl6040_get_sysclk); @@ -655,10 +664,18 @@ static int twl6040_probe(struct i2c_client *client, if (IS_ERR(twl6040->clk32k)) { if (PTR_ERR(twl6040->clk32k) == -EPROBE_DEFER) return -EPROBE_DEFER; - dev_info(&client->dev, "clk32k is not handled\n"); + dev_dbg(&client->dev, "clk32k is not handled\n"); twl6040->clk32k = NULL; } + twl6040->mclk = devm_clk_get(&client->dev, "mclk"); + if (IS_ERR(twl6040->mclk)) { + if (PTR_ERR(twl6040->mclk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(&client->dev, "mclk is not handled\n"); + twl6040->mclk = NULL; + } + twl6040->supplies[0].supply = "vio"; twl6040->supplies[1].supply = "v2v1"; ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES, diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 8756d06e2bb8..b75cf830d08a 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -7,11 +7,7 @@ config CXL_BASE default n select PPC_COPRO_BASE -config CXL_KERNEL_API - bool - default n - -config CXL_EEH +config CXL_AFU_DRIVER_OPS bool default n @@ -19,8 +15,7 @@ config CXL tristate "Support for IBM Coherent Accelerators (CXL)" depends on PPC_POWERNV && PCI_MSI && EEH select CXL_BASE - select CXL_KERNEL_API - select CXL_EEH + select CXL_AFU_DRIVER_OPS default m help Select this option to enable driver support for IBM Coherent @@ -33,3 +28,11 @@ config CXL CAPI adapters are found in POWER8 based systems. If unsure, say N. + +config CXL_BIMODAL + bool "Support for bi-modal CAPI cards" + depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m + default y + help + Select this option to enable support for bi-modal CAPI cards, such as + the Mellanox CX-4. diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index 8a55c1aa11aa..56e9a4732ef0 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile @@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror cxl-y += main.o file.o irq.o fault.o native.o cxl-y += context.o sysfs.o debugfs.o pci.o trace.o -cxl-y += vphb.o api.o +cxl-y += vphb.o phb.o api.o cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o obj-$(CONFIG_CXL) += cxl.o obj-$(CONFIG_CXL_BASE) += base.o diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 6d228ccd884d..f3d34b941f85 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -13,6 +13,8 @@ #include <linux/file.h> #include <misc/cxl.h> #include <linux/fs.h> +#include <asm/pnv-pci.h> +#include <linux/msi.h> #include "cxl.h" @@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) int rc; afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return ERR_CAST(afu); ctx = cxl_context_alloc(); if (IS_ERR(ctx)) { @@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) return 0; } +int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) +{ + if (*ctx == NULL || *afu_irq == 0) { + *afu_irq = 1; + *ctx = cxl_get_context(pdev); + } else { + (*afu_irq)++; + if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) { + *ctx = list_next_entry(*ctx, extra_irq_contexts); + *afu_irq = 1; + } + } + return cxl_find_afu_irq(*ctx, *afu_irq); +} +/* Exported via cxl_base */ + +int cxl_set_priv(struct cxl_context *ctx, void *priv) +{ + if (!ctx) + return -EINVAL; + + ctx->priv = priv; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_priv); + +void *cxl_get_priv(struct cxl_context *ctx) +{ + if (!ctx) + return ERR_PTR(-EINVAL); + + return ctx->priv; +} +EXPORT_SYMBOL_GPL(cxl_get_priv); + int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) { int res; @@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) if (num == 0) num = ctx->afu->pp_irqs; res = afu_allocate_irqs(ctx, num); - if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { + if (res) + return res; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) { /* In a guest, the PSL interrupt is not multiplexed. It was * allocated above, and we need to set its handler */ @@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) if (hwirq) cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); } + + if (ctx->status == STARTED) { + if (cxl_ops->update_ivtes) + cxl_ops->update_ivtes(ctx); + else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); + } + return res; } EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); @@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file) } EXPORT_SYMBOL_GPL(cxl_fops_get_context); +void cxl_set_driver_ops(struct cxl_context *ctx, + struct cxl_afu_driver_ops *ops) +{ + WARN_ON(!ops->fetch_event || !ops->event_delivered); + atomic_set(&ctx->afu_driver_events, 0); + ctx->afu_driver_ops = ops; +} +EXPORT_SYMBOL_GPL(cxl_set_driver_ops); + +void cxl_context_events_pending(struct cxl_context *ctx, + unsigned int new_events) +{ + atomic_add(new_events, &ctx->afu_driver_events); + wake_up_all(&ctx->wq); +} +EXPORT_SYMBOL_GPL(cxl_context_events_pending); + int cxl_start_work(struct cxl_context *ctx, struct cxl_ioctl_start_work *work) { @@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) { struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); } EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); + +int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; + + if (irqs > afu->adapter->user_irqs) + return -EINVAL; + + /* Limit user_irqs to prevent the user increasing this via sysfs */ + afu->adapter->user_irqs = irqs; + afu->irqs_max = irqs; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process); + +int cxl_get_max_irqs_per_process(struct pci_dev *dev) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; + + return afu->irqs_max; +} +EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process); + +/* + * This is a special interrupt allocation routine called from the PHB's MSI + * setup function. When capi interrupts are allocated in this manner they must + * still be associated with a running context, but since the MSI APIs have no + * way to specify this we use the default context associated with the device. + * + * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU + * interrupt number, so in order to overcome this their driver informs us of + * the restriction by setting the maximum interrupts per context, and we + * allocate additional contexts as necessary so that we can keep the AFU + * interrupt number within the supported range. + */ +int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + struct cxl_context *ctx, *new_ctx, *default_ctx; + int remaining; + int rc; + + ctx = default_ctx = cxl_get_context(pdev); + if (WARN_ON(!default_ctx)) + return -ENODEV; + + remaining = nvec; + while (remaining > 0) { + rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max)); + if (rc) { + pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev)); + return rc; + } + remaining -= ctx->afu->irqs_max; + + if (ctx != default_ctx && default_ctx->status == STARTED) { + WARN_ON(cxl_start_context(ctx, + be64_to_cpu(default_ctx->elem->common.wed), + NULL)); + } + + if (remaining > 0) { + new_ctx = cxl_dev_context_init(pdev); + if (!new_ctx) { + pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); + return -ENOSPC; + } + list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts); + ctx = new_ctx; + } + } + + return 0; +} +/* Exported via cxl_base */ + +void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct cxl_context *ctx, *pos, *tmp; + + ctx = cxl_get_context(pdev); + if (WARN_ON(!ctx)) + return; + + cxl_free_afu_irqs(ctx); + list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) { + cxl_stop_context(pos); + cxl_free_afu_irqs(pos); + list_del(&pos->extra_irq_contexts); + cxl_release_context(pos); + } +} +/* Exported via cxl_base */ diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c index 9b90ec6c07cd..cd54ce6f6230 100644 --- a/drivers/misc/cxl/base.c +++ b/drivers/misc/cxl/base.c @@ -54,6 +54,19 @@ static inline void cxl_calls_put(struct cxl_calls *calls) { } #endif /* CONFIG_CXL_MODULE */ +/* AFU refcount management */ +struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) +{ + return (get_device(&afu->dev) == NULL) ? NULL : afu; +} +EXPORT_SYMBOL_GPL(cxl_afu_get); + +void cxl_afu_put(struct cxl_afu *afu) +{ + put_device(&afu->dev); +} +EXPORT_SYMBOL_GPL(cxl_afu_put); + void cxl_slbia(struct mm_struct *mm) { struct cxl_calls *calls; @@ -93,9 +106,92 @@ int cxl_update_properties(struct device_node *dn, } EXPORT_SYMBOL_GPL(cxl_update_properties); +/* + * API calls into the driver that may be called from the PHB code and must be + * built in. + */ +bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) +{ + bool ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return false; + + ret = calls->cxl_pci_associate_default_context(dev, afu); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context); + +void cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return; + + calls->cxl_pci_disable_device(dev); + + cxl_calls_put(calls); +} +EXPORT_SYMBOL_GPL(cxl_pci_disable_device); + +int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) +{ + int ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return -EBUSY; + + ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq); + +int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return false; + + ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs); + +void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return; + + calls->cxl_cx4_teardown_msi_irqs(pdev); + + cxl_calls_put(calls); +} +EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs); + static int __init cxl_base_init(void) { - struct device_node *np = NULL; + struct device_node *np; struct platform_device *dev; int count = 0; @@ -105,8 +201,7 @@ static int __init cxl_base_init(void) if (cpu_has_feature(CPU_FTR_HVMODE)) return 0; - while ((np = of_find_compatible_node(np, NULL, - "ibm,coherent-platform-facility"))) { + for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") { dev = of_platform_device_create(np, NULL, NULL); if (dev) count++; @@ -114,5 +209,4 @@ static int __init cxl_base_init(void) pr_devel("Found %d cxl device(s)\n", count); return 0; } - -module_init(cxl_base_init); +device_initcall(cxl_base_init); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 26d206b1d08c..bdee9a01ef35 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -67,6 +67,9 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ctx->pending_fault = false; ctx->pending_afu_err = false; + INIT_LIST_HEAD(&ctx->irq_names); + INIT_LIST_HEAD(&ctx->extra_irq_contexts); + /* * When we have to destroy all contexts in cxl_context_detach_all() we * end up with afu_release_irqs() called from inside a @@ -87,7 +90,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, */ mutex_lock(&afu->contexts_lock); idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, + i = idr_alloc(&ctx->afu->contexts_idr, ctx, + ctx->afu->adapter->native->sl_ops->min_pe, ctx->afu->num_procs, GFP_NOWAIT); idr_preload_end(); mutex_unlock(&afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 4fe50788ff45..de090533f18c 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -24,6 +24,7 @@ #include <asm/reg.h> #include <misc/cxl-base.h> +#include <misc/cxl.h> #include <uapi/misc/cxl.h> extern uint cxl_verbose; @@ -34,7 +35,7 @@ extern uint cxl_verbose; * Bump version each time a user API change is made, whether it is * backwards compatible ot not. */ -#define CXL_API_VERSION 2 +#define CXL_API_VERSION 3 #define CXL_API_VERSION_COMPATIBLE 1 /* @@ -81,6 +82,7 @@ static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8}; static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0}; /* 0x00C0:7EFF Implementation dependent area */ +/* PSL registers */ static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100}; static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108}; static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110}; @@ -91,6 +93,11 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; +/* XSL registers (Mellanox CX4) */ +static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100}; +static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108}; +static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158}; +static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168}; /* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ /* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ @@ -182,6 +189,18 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_ID_An_F (1ull << (63-31)) #define CXL_PSL_ID_An_L (1ull << (63-30)) +/****** CXL_PSL_SERR_An ****************************************************/ +#define CXL_PSL_SERR_An_afuto (1ull << (63-0)) +#define CXL_PSL_SERR_An_afudis (1ull << (63-1)) +#define CXL_PSL_SERR_An_afuov (1ull << (63-2)) +#define CXL_PSL_SERR_An_badsrc (1ull << (63-3)) +#define CXL_PSL_SERR_An_badctx (1ull << (63-4)) +#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5)) +#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6)) +#define CXL_PSL_SERR_An_afupar (1ull << (63-7)) +#define CXL_PSL_SERR_An_afudup (1ull << (63-8)) +#define CXL_PSL_SERR_An_AE (1ull << (63-30)) + /****** CXL_PSL_SCNTL_An ****************************************************/ #define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15)) /* Programming Modes: */ @@ -421,18 +440,6 @@ struct cxl_afu { bool enabled; }; -/* AFU refcount management */ -static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) -{ - - return (get_device(&afu->dev) == NULL) ? NULL : afu; -} - -static inline void cxl_afu_put(struct cxl_afu *afu) -{ - put_device(&afu->dev); -} - struct cxl_irq_name { struct list_head list; @@ -477,6 +484,9 @@ struct cxl_context { /* Only used in PR mode */ u64 process_token; + /* driver private data */ + void *priv; + unsigned long *irq_bitmap; /* Accessed from IRQ context */ struct cxl_irq_ranges irqs; struct list_head irq_names; @@ -522,7 +532,36 @@ struct cxl_context { bool pending_fault; bool pending_afu_err; + /* Used by AFU drivers for driver specific event delivery */ + struct cxl_afu_driver_ops *afu_driver_ops; + atomic_t afu_driver_events; + struct rcu_head rcu; + + /* + * Only used when more interrupts are allocated via + * pci_enable_msix_range than are supported in the default context, to + * use additional contexts to overcome the limitation. i.e. Mellanox + * CX4 only: + */ + struct list_head extra_irq_contexts; +}; + +struct cxl_service_layer_ops { + int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev); + int (*afu_regs_init)(struct cxl_afu *afu); + int (*register_serr_irq)(struct cxl_afu *afu); + void (*release_serr_irq)(struct cxl_afu *afu); + void (*debugfs_add_adapter_sl_regs)(struct cxl *adapter, struct dentry *dir); + void (*debugfs_add_afu_sl_regs)(struct cxl_afu *afu, struct dentry *dir); + void (*psl_irq_dump_registers)(struct cxl_context *ctx); + void (*err_irq_dump_registers)(struct cxl *adapter); + void (*debugfs_stop_trace)(struct cxl *adapter); + void (*write_timebase_ctrl)(struct cxl *adapter); + u64 (*timebase_read)(struct cxl *adapter); + int capi_mode; + bool needs_reset_before_disable; + int min_pe; }; struct cxl_native { @@ -533,6 +572,7 @@ struct cxl_native { irq_hw_number_t err_hwirq; unsigned int err_virq; u64 ps_off; + const struct cxl_service_layer_ops *sl_ops; }; struct cxl_guest { @@ -688,9 +728,21 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, loff_t off, size_t count); +/* Internal functions wrapped in cxl_base to allow PHB to call them */ +bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); +void _cxl_pci_disable_device(struct pci_dev *dev); +int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); +int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); +void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev); struct cxl_calls { void (*cxl_slbia)(struct mm_struct *mm); + bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu); + void (*cxl_pci_disable_device)(struct pci_dev *dev); + int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); + int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type); + void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev); + struct module *owner; }; int register_cxl_calls(struct cxl_calls *calls); @@ -805,6 +857,11 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter); int cxl_afu_disable(struct cxl_afu *afu); int cxl_psl_purge(struct cxl_afu *afu); +void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir); +void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx); +void cxl_native_err_irq_dump_regs(struct cxl *adapter); void cxl_stop_trace(struct cxl *cxl); int cxl_pci_vphb_add(struct cxl_afu *afu); void cxl_pci_vphb_remove(struct cxl_afu *afu); @@ -855,6 +912,7 @@ struct cxl_backend_ops { int (*attach_process)(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr); int (*detach_process)(struct cxl_context *ctx); + void (*update_ivtes)(struct cxl_context *ctx); bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); void (*release_afu)(struct device *dev); @@ -879,4 +937,7 @@ extern const struct cxl_backend_ops *cxl_ops; /* check if the given pci_dev is on the the cxl vphb bus */ bool cxl_pci_is_vphb_device(struct pci_dev *dev); + +/* decode AFU error bits in the PSL register PSL_SERR_An */ +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); #endif diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 5751899e0c17..ec7b8a017439 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c @@ -51,6 +51,19 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64); } +void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); + debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); + debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); +} + +void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC)); +} + int cxl_debugfs_adapter_add(struct cxl *adapter) { struct dentry *dir; @@ -65,13 +78,10 @@ int cxl_debugfs_adapter_add(struct cxl *adapter) return PTR_ERR(dir); adapter->debugfs = dir; - debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); - debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); - debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); - + if (adapter->native->sl_ops->debugfs_add_adapter_sl_regs) + adapter->native->sl_ops->debugfs_add_adapter_sl_regs(adapter, dir); return 0; } @@ -80,6 +90,14 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter) debugfs_remove_recursive(adapter->debugfs); } +void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir) +{ + debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); + debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); + debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); +} + int cxl_debugfs_afu_add(struct cxl_afu *afu) { struct dentry *dir; @@ -94,18 +112,15 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu) return PTR_ERR(dir); afu->debugfs = dir; - debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); - debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); - debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An)); - debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An)); debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An)); debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); + if (afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs) + afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs(afu, dir); return 0; } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index eec468f1612f..5fb9894b157f 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -293,6 +293,17 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm) return cxl_context_iomap(ctx, vm); } +static inline bool ctx_event_pending(struct cxl_context *ctx) +{ + if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err) + return true; + + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) + return true; + + return false; +} + unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) { struct cxl_context *ctx = file->private_data; @@ -305,8 +316,7 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) pr_devel("afu_poll wait done pe: %i\n", ctx->pe); spin_lock_irqsave(&ctx->lock, flags); - if (ctx->pending_irq || ctx->pending_fault || - ctx->pending_afu_err) + if (ctx_event_pending(ctx)) mask |= POLLIN | POLLRDNORM; else if (ctx->status == CLOSED) /* Only error on closed when there are no futher events pending @@ -319,16 +329,46 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) return mask; } -static inline int ctx_event_pending(struct cxl_context *ctx) +static ssize_t afu_driver_event_copy(struct cxl_context *ctx, + char __user *buf, + struct cxl_event *event, + struct cxl_event_afu_driver_reserved *pl) { - return (ctx->pending_irq || ctx->pending_fault || - ctx->pending_afu_err || (ctx->status == CLOSED)); + /* Check event */ + if (!pl) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Check event size */ + event->header.size += pl->data_size; + if (event->header.size > CXL_READ_MIN_SIZE) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Copy event header */ + if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + /* Copy event data */ + buf += sizeof(struct cxl_event_header); + if (copy_to_user(buf, &pl->data, pl->data_size)) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */ + return event->header.size; } ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct cxl_context *ctx = file->private_data; + struct cxl_event_afu_driver_reserved *pl = NULL; struct cxl_event event; unsigned long flags; int rc; @@ -344,7 +384,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, for (;;) { prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); - if (ctx_event_pending(ctx)) + if (ctx_event_pending(ctx) || (ctx->status == CLOSED)) break; if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { @@ -374,7 +414,12 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, memset(&event, 0, sizeof(event)); event.header.process_element = ctx->pe; event.header.size = sizeof(struct cxl_event_header); - if (ctx->pending_irq) { + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) { + pr_devel("afu_read delivering AFU driver specific event\n"); + pl = ctx->afu_driver_ops->fetch_event(ctx); + atomic_dec(&ctx->afu_driver_events); + event.header.type = CXL_EVENT_AFU_DRIVER; + } else if (ctx->pending_irq) { pr_devel("afu_read delivering AFU interrupt\n"); event.header.size += sizeof(struct cxl_event_afu_interrupt); event.header.type = CXL_EVENT_AFU_INTERRUPT; @@ -404,6 +449,9 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, spin_unlock_irqrestore(&ctx->lock, flags); + if (event.header.type == CXL_EVENT_AFU_DRIVER) + return afu_driver_event_copy(ctx, buf, &event, pl); + if (copy_to_user(buf, &event, event.header.size)) return -EFAULT; return event.header.size; @@ -558,7 +606,7 @@ int __init cxl_file_init(void) * If these change we really need to update API. Either change some * flags or update API version number CXL_API_VERSION. */ - BUILD_BUG_ON(CXL_API_VERSION != 2); + BUILD_BUG_ON(CXL_API_VERSION != 3); BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c index 68dd0b7da471..c63d61e17d56 100644 --- a/drivers/misc/cxl/flash.c +++ b/drivers/misc/cxl/flash.c @@ -24,8 +24,8 @@ struct ai_header { }; static struct semaphore sem; -unsigned long *buffer[CXL_AI_MAX_ENTRIES]; -struct sg_list *le; +static unsigned long *buffer[CXL_AI_MAX_ENTRIES]; +static struct sg_list *le; static u64 continue_token; static unsigned int transfer; diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index bc8d0b9870eb..9aa58a77a24d 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -196,15 +196,18 @@ static irqreturn_t guest_slice_irq_err(int irq, void *data) { struct cxl_afu *afu = data; int rc; - u64 serr; + u64 serr, afu_error, dsisr; - WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); if (rc) { dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); return IRQ_HANDLED; } - dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); if (rc) @@ -1052,16 +1055,18 @@ static void free_adapter(struct cxl *adapter) struct irq_avail *cur; int i; - if (adapter->guest->irq_avail) { - for (i = 0; i < adapter->guest->irq_nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - kfree(cur->bitmap); + if (adapter->guest) { + if (adapter->guest->irq_avail) { + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); } - kfree(adapter->guest->irq_avail); + kfree(adapter->guest->status); + kfree(adapter->guest); } - kfree(adapter->guest->status); cxl_remove_adapter_nr(adapter); - kfree(adapter->guest); kfree(adapter); } @@ -1182,6 +1187,7 @@ const struct cxl_backend_ops cxl_guest_ops = { .ack_irq = guest_ack_irq, .attach_process = guest_attach_process, .detach_process = guest_detach_process, + .update_ivtes = NULL, .support_attributes = guest_support_attributes, .link_ok = guest_link_ok, .release_afu = guest_release_afu, diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 8def4553acba..dec60f58a767 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -260,9 +260,6 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) else alloc_count = count + 1; - /* Initialize the list head to hold irq names */ - INIT_LIST_HEAD(&ctx->irq_names); - if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, alloc_count))) return rc; @@ -374,3 +371,32 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) ctx->irq_count = 0; } + +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) +{ + dev_crit(&afu->dev, + "PSL Slice error received. Check AFU for root cause.\n"); + dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + if (serr & CXL_PSL_SERR_An_afuto) + dev_crit(&afu->dev, "AFU MMIO Timeout\n"); + if (serr & CXL_PSL_SERR_An_afudis) + dev_crit(&afu->dev, + "MMIO targeted Accelerator that was not enabled\n"); + if (serr & CXL_PSL_SERR_An_afuov) + dev_crit(&afu->dev, "AFU CTAG Overflow\n"); + if (serr & CXL_PSL_SERR_An_badsrc) + dev_crit(&afu->dev, "Bad Interrupt Source\n"); + if (serr & CXL_PSL_SERR_An_badctx) + dev_crit(&afu->dev, "Bad Context Handle\n"); + if (serr & CXL_PSL_SERR_An_llcmdis) + dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); + if (serr & CXL_PSL_SERR_An_llcmdto) + dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); + if (serr & CXL_PSL_SERR_An_afupar) + dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); + if (serr & CXL_PSL_SERR_An_afudup) + dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); + if (serr & CXL_PSL_SERR_An_AE) + dev_crit(&afu->dev, + "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); +} diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index ae68c3201156..d9be23b24aa3 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -110,6 +110,11 @@ static inline void cxl_slbia_core(struct mm_struct *mm) static struct cxl_calls cxl_calls = { .cxl_slbia = cxl_slbia_core, + .cxl_pci_associate_default_context = _cxl_pci_associate_default_context, + .cxl_pci_disable_device = _cxl_pci_disable_device, + .cxl_next_msi_hwirq = _cxl_next_msi_hwirq, + .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs, + .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs, .owner = THIS_MODULE, }; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 55d8a1459f28..3bcdaee11ba1 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -21,10 +21,10 @@ #include "cxl.h" #include "trace.h" -static int afu_control(struct cxl_afu *afu, u64 command, +static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, u64 result, u64 mask, bool enabled) { - u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + u64 AFU_Cntl; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); int rc = 0; @@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command, trace_cxl_afu_ctrl(afu, command); - cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); while ((AFU_Cntl & mask) != result) { @@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command, cpu_relax(); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); }; + + if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { + /* + * Workaround for a bug in the XSL used in the Mellanox CX4 + * that fails to clear the RA bit after an AFU reset, + * preventing subsequent AFU resets from working. + */ + cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); + } + pr_devel("AFU command complete: %llx\n", command); afu->enabled = enabled; out: @@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu) { pr_devel("AFU enable request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_E, + return afu_control(afu, CXL_AFU_Cntl_An_E, 0, CXL_AFU_Cntl_An_ES_Enabled, CXL_AFU_Cntl_An_ES_MASK, true); } @@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu) { pr_devel("AFU disable request\n"); - return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, + return afu_control(afu, 0, CXL_AFU_Cntl_An_E, + CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_ES_MASK, false); } @@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu) { pr_devel("AFU reset request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_RA, + return afu_control(afu, CXL_AFU_Cntl_An_RA, 0, CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, false); @@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu) unsigned spa_size; /* Work out how many pages to allocate */ - afu->native->spa_order = 0; + afu->native->spa_order = -1; do { afu->native->spa_order++; spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; @@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx) return rc; } - void cxl_assign_psn_space(struct cxl_context *ctx) { if (!ctx->afu->pp_size || ctx->master) { @@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx) return sr; } +static void update_ivtes_directed(struct cxl_context *ctx) +{ + bool need_update = (ctx->status == STARTED); + int r; + + if (need_update) { + WARN_ON(terminate_process_element(ctx)); + WARN_ON(remove_process_element(ctx)); + } + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); + ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); + } + + /* + * Theoretically we could use the update llcmd, instead of a + * terminate/remove/add (or if an atomic update was required we could + * do a suspend/update/resume), however it seems there might be issues + * with the update llcmd on some cards (including those using an XSL on + * an ASIC) so for now it's safest to go with the commands that are + * known to work. In the future if we come across a situation where the + * card may be performing transactions using the same PE while we are + * doing this update we might need to revisit this. + */ + if (need_update) + WARN_ON(add_process_element(ctx)); +} + static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) { u32 pid; - int r, result; + int result; cxl_assign_psn_space(ctx); @@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->irqs.range[0] = 1; } - for (r = 0; r < CXL_IRQ_RANGES; r++) { - ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); - ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); - } + update_ivtes_directed(ctx); ctx->elem->common.amr = cpu_to_be64(amr); ctx->elem->common.wed = cpu_to_be64(wed); @@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu) cxl_sysfs_afu_m_remove(afu); cxl_chardev_afu_remove(afu); - cxl_ops->afu_reset(afu); + /* + * The CAIA section 2.2.1 indicates that the procedure for starting and + * stopping an AFU in AFU directed mode is AFU specific, which is not + * ideal since this code is generic and with one exception has no + * knowledge of the AFU. This is in contrast to the procedure for + * disabling a dedicated process AFU, which is documented to just + * require a reset. The architecture does indicate that both an AFU + * reset and an AFU disable should result in the AFU being disabled and + * we do both followed by a PSL purge for safety. + * + * Notably we used to have some issues with the disable sequence on PSL + * cards, which is why we ended up using this heavy weight procedure in + * the first place, however a bug was discovered that had rendered the + * disable operation ineffective, so it is conceivable that was the + * sole explanation for those difficulties. Careful regression testing + * is recommended if anyone attempts to remove or reorder these + * operations. + * + * The XSL on the Mellanox CX4 behaves a little differently from the + * PSL based cards and will time out an AFU reset if the AFU is still + * enabled. That card is special in that we do have a means to identify + * it from this code, so in that case we skip the reset and just use a + * disable/purge to avoid the timeout and corresponding noise in the + * kernel log. + */ + if (afu->adapter->native->sl_ops->needs_reset_before_disable) + cxl_ops->afu_reset(afu); cxl_afu_disable(afu); cxl_psl_purge(afu); @@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu) return cxl_chardev_d_afu_add(afu); } +static void update_ivtes_dedicated(struct cxl_context *ctx) +{ + struct cxl_afu *afu = ctx->afu; + + cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, + (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | + (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | + (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | + ((u64)ctx->irqs.offset[3] & 0xffff)); + cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) + (((u64)ctx->irqs.range[0] & 0xffff) << 48) | + (((u64)ctx->irqs.range[1] & 0xffff) << 32) | + (((u64)ctx->irqs.range[2] & 0xffff) << 16) | + ((u64)ctx->irqs.range[3] & 0xffff)); +} + static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) { struct cxl_afu *afu = ctx->afu; @@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) cxl_prefault(ctx, wed); - cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, - (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | - (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | - (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | - ((u64)ctx->irqs.offset[3] & 0xffff)); - cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) - (((u64)ctx->irqs.range[0] & 0xffff) << 48) | - (((u64)ctx->irqs.range[1] & 0xffff) << 32) | - (((u64)ctx->irqs.range[2] & 0xffff) << 16) | - ((u64)ctx->irqs.range[3] & 0xffff)); + update_ivtes_dedicated(ctx); cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); @@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel, static inline int detach_process_native_dedicated(struct cxl_context *ctx) { + /* + * The CAIA section 2.1.1 indicates that we need to do an AFU reset to + * stop the AFU in dedicated mode (we therefore do not make that + * optional like we do in the afu directed path). It does not indicate + * that we need to do an explicit disable (which should occur + * implicitly as part of the reset) or purge, but we do these as well + * to be on the safe side. + * + * Notably we used to have some issues with the disable sequence + * (before the sequence was spelled out in the architecture) which is + * why we were so heavy weight in the first place, however a bug was + * discovered that had rendered the disable operation ineffective, so + * it is conceivable that was the sole explanation for those + * difficulties. Point is, we should be careful and do some regression + * testing if we ever attempt to remove any part of this procedure. + */ cxl_ops->afu_reset(ctx->afu); cxl_afu_disable(ctx->afu); cxl_psl_purge(ctx->afu); return 0; } +static void native_update_ivtes(struct cxl_context *ctx) +{ + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return update_ivtes_directed(ctx); + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) + return update_ivtes_dedicated(ctx); + WARN(1, "native_update_ivtes: Bad mode\n"); +} + static inline int detach_process_native_afu_directed(struct cxl_context *ctx) { if (!ctx->pe_inserted) @@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) return 0; } -static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, - u64 dsisr, u64 errstat) +void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx) { u64 fir1, fir2, fir_slice, serr, afu_debug; fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); - serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); - dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); - dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { + serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); + cxl_afu_decode_psl_serr(ctx->afu, serr); + } dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); +} + +static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, + u64 dsisr, u64 errstat) +{ + + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); - dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(ctx->afu->adapter); + if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) + ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); + + if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); + ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); + } return cxl_ops->ack_irq(ctx, 0, errstat); } @@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx) static irqreturn_t native_slice_irq_err(int irq, void *data) { struct cxl_afu *afu = data; - u64 fir_slice, errstat, serr, afu_debug; - - WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); + u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr; + /* + * slice err interrupt is only used with full PSL (no XSL) + */ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); - dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); return IRQ_HANDLED; } +void cxl_native_err_irq_dump_regs(struct cxl *adapter) +{ + u64 fir1, fir2; + + fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + + dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); +} + static irqreturn_t native_irq_err(int irq, void *data) { struct cxl *adapter = data; - u64 fir1, fir2, err_ivte; + u64 err_ivte; WARN(1, "CXL ERROR interrupt %i\n", irq); err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); - dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(adapter); - - fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + if (adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); + adapter->native->sl_ops->debugfs_stop_trace(adapter); + } - dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); + if (adapter->native->sl_ops->err_irq_dump_registers) + adapter->native->sl_ops->err_irq_dump_registers(adapter); return IRQ_HANDLED; } @@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = { .irq_wait = native_irq_wait, .attach_process = native_attach_process, .detach_process = native_detach_process, + .update_ivtes = native_update_ivtes, .support_attributes = native_support_attributes, .link_ok = cxl_adapter_link_ok, .release_afu = cxl_pci_release_afu, diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a08fcc888a71..d152e2de8c93 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -55,6 +55,8 @@ pci_read_config_byte(dev, vsec + 0xa, dest) #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ pci_write_config_byte(dev, vsec + 0xa, val) +#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \ + pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val) #define CXL_VSEC_PROTOCOL_MASK 0xe0 #define CXL_VSEC_PROTOCOL_1024TB 0x80 #define CXL_VSEC_PROTOCOL_512TB 0x40 @@ -352,13 +354,10 @@ static u64 get_capp_unit_id(struct device_node *np) return 0; } -static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) +static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id) { struct device_node *np; const __be32 *prop; - u64 psl_dsnctl; - u64 chipid; - u64 capp_unit_id; if (!(np = pnv_pci_get_phb_node(dev))) return -ENODEV; @@ -367,14 +366,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev np = of_get_next_parent(np); if (!np) return -ENODEV; - chipid = be32_to_cpup(prop); - capp_unit_id = get_capp_unit_id(np); + *chipid = be32_to_cpup(prop); + *capp_unit_id = get_capp_unit_id(np); of_node_put(np); - if (!capp_unit_id) { + if (!*capp_unit_id) { pr_err("cxl: invalid capp unit id\n"); return -ENODEV; } + return 0; +} + +static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 psl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ /* Tell PSL where to route data to */ @@ -393,8 +406,61 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev return 0; } +static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 xsl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + + /* Tell XSL where to route data to */ + xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5)); + xsl_dsnctl |= (capp_unit_id << (63-13)); + cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl); + + return 0; +} + +/* PSL & XSL */ +#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) -#define _2048_250MHZ_CYCLES 1 +/* For the PSL this is a multiple for 0 < n <= 7: */ +#define PSL_2048_250MHZ_CYCLES 1 + +static void write_timebase_ctrl_psl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, + TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); +} + +/* XSL */ +#define TBSYNC_ENA (1ULL << 63) +/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */ +#define XSL_2000_CLOCKS 1 +#define XSL_4000_CLOCKS 2 +#define XSL_8000_CLOCKS 3 + +static void write_timebase_ctrl_xsl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT, + TBSYNC_ENA | + TBSYNC_CAL(3) | + TBSYNC_CNT(XSL_4000_CLOCKS)); +} + +static u64 timebase_read_psl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_PSL_Timebase); +} + +static u64 timebase_read_xsl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_XSL_Timebase); +} static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { @@ -421,8 +487,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) * Setup PSL Timebase Control and Status register * with the recommended Timebase Sync Count value */ - cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, - TBSYNC_CNT(2 * _2048_250MHZ_CYCLES)); + adapter->native->sl_ops->write_timebase_ctrl(adapter); /* Enable PSL Timebase */ cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); @@ -435,7 +500,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) dev_info(&dev->dev, "PSL timebase can't synchronize\n"); return; } - psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase); + psl_tb = adapter->native->sl_ops->timebase_read(adapter); delta = mftb() - psl_tb; if (delta < 0) delta = -delta; @@ -445,7 +510,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) return; } -static int init_implementation_afu_regs(struct cxl_afu *afu) +static int init_implementation_afu_psl_regs(struct cxl_afu *afu) { /* read/write masks for this slice */ cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); @@ -551,36 +616,234 @@ static int setup_cxl_bars(struct pci_dev *dev) return 0; } -/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ -static int switch_card_to_cxl(struct pci_dev *dev) -{ +#ifdef CONFIG_CXL_BIMODAL + +struct cxl_switch_work { + struct pci_dev *dev; + struct work_struct work; int vsec; + int mode; +}; + +static void switch_card_to_cxl(struct work_struct *work) +{ + struct cxl_switch_work *switch_work = + container_of(work, struct cxl_switch_work, work); + struct pci_dev *dev = switch_work->dev; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_host(bus); + struct pci_dev *bridge; + struct pnv_php_slot *php_slot; + unsigned int devfn; u8 val; int rc; - dev_info(&dev->dev, "switch card to CXL\n"); + dev_info(&bus->dev, "cxl: Preparing for mode switch...\n"); + bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev, + bus_list); + if (!bridge) { + dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n"); + goto err_dev_put; + } - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge)); + if (!php_slot) { + dev_err(&bus->dev, "cxl: Failed to find slot hotplug " + "information. You may need to upgrade " + "skiboot. Aborting.\n"); + goto err_dev_put; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc); + goto err_dev_put; + } + devfn = dev->devfn; + + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); + + dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n"); + pci_lock_rescan_remove(); + pci_hp_remove_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + /* Switch the CXL protocol on the card */ + if (switch_work->mode == CXL_BIMODE_CXL) { + dev_info(&bus->dev, "cxl: Switching card to CXL mode\n"); + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; + rc = pnv_cxl_enable_phb_kernel_api(hose, true); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to enable kernel API" + " on real PHB, aborting\n"); + goto err_free_work; + } + } else { + dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n"); + goto err_free_work; + } + + rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc); + goto err_free_work; + } + + /* + * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states + * we must wait 100ms after this mode switch before touching PCIe config + * space. + */ + msleep(100); + + /* + * Hot reset to cause the card to come back in cxl mode. A + * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support + * in skiboot, so we use a hot reset instead. + * + * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is + * guaranteed to sit directly under the root port, and setting the reset + * state on a device directly under the root port is equivalent to doing + * it on the root port iself. + */ + dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n"); + pci_set_pcie_reset_state(bridge, pcie_hot_reset); + pci_set_pcie_reset_state(bridge, pcie_deassert_reset); + + dev_dbg(&bus->dev, "cxl: Offlining slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc); + goto err_free_work; + } + + dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc); + goto err_free_work; + } + + pci_lock_rescan_remove(); + pci_hp_add_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + dev_info(&bus->dev, "cxl: CAPI mode switch completed\n"); + kfree(switch_work); + return; + +err_dev_put: + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); +err_free_work: + kfree(switch_work); +} + +int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec) +{ + struct cxl_switch_work *work; + u8 val; + int rc; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) return -ENODEV; + + if (!vsec) { + vsec = find_cxl_vsec(dev); + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } } - if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { - dev_err(&dev->dev, "failed to read current mode control: %i", rc); + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i", rc); return rc; } - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; - if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { - dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); - return rc; + + if (mode == CXL_BIMODE_PCI) { + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_info(&dev->dev, "Card is already in PCI mode\n"); + return 0; + } + /* + * TODO: Before it's safe to switch the card back to PCI mode + * we need to disable the CAPP and make sure any cachelines the + * card holds have been flushed out. Needs skiboot support. + */ + dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n"); + return -EIO; } + + if (val & CXL_VSEC_PROTOCOL_ENABLE) { + dev_info(&dev->dev, "Card is already in CXL mode\n"); + return 0; + } + + dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread " + "to switch to CXL mode\n"); + + work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + pci_dev_get(dev); + work->dev = dev; + work->vsec = vsec; + work->mode = mode; + INIT_WORK(&work->work, switch_card_to_cxl); + + schedule_work(&work->work); + /* - * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states - * we must wait 100ms after this mode switch before touching - * PCIe config space. + * We return a failure now to abort the driver init. Once the + * link has been cycled and the card is in cxl mode we will + * come back (possibly using the generic cxl driver), but + * return success as the card should then be in cxl mode. + * + * TODO: What if the card comes back in PCI mode even after + * the switch? Don't want to spin endlessly. */ - msleep(100); + return -EBUSY; +} +EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode); + +#endif /* CONFIG_CXL_BIMODAL */ + +static int setup_cxl_protocol_area(struct pci_dev *dev) +{ + u8 val; + int rc; + int vsec = find_cxl_vsec(dev); + + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc); + return rc; + } + + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_err(&dev->dev, "Card not in CAPI mode!\n"); + return -EIO; + } + + if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) { + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB; + rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val); + if (rc) { + dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc); + return rc; + } + } return 0; } @@ -712,6 +975,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) } } + if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { + /* + * We could also check this for the dedicated process model + * since the architecture indicates it should be set to 1, but + * in that case we ignore the value and I'd rather not risk + * breaking any existing dedicated process AFUs that left it as + * 0 (not that I'm aware of any). It is clearly an error for an + * AFU directed AFU to set this to 0, and would have previously + * triggered a bug resulting in the maximum not being enforced + * at all since idr_alloc treats 0 as no maximum. + */ + dev_err(&afu->dev, "AFU does not support any processes\n"); + return -EINVAL; + } + return 0; } @@ -753,11 +1031,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) else cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); } - reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (reg) { - if (reg & ~0xffff) - dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); - cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + if (afu->adapter->native->sl_ops->register_serr_irq) { + reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (reg) { + if (reg & ~0xffff) + dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); + cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + } } reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); if (reg) { @@ -835,11 +1115,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = cxl_afu_descriptor_looks_ok(afu))) goto err1; - if ((rc = init_implementation_afu_regs(afu))) - goto err1; + if (adapter->native->sl_ops->afu_regs_init) + if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) + goto err1; - if ((rc = cxl_native_register_serr_irq(afu))) - goto err1; + if (adapter->native->sl_ops->register_serr_irq) + if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) + goto err1; if ((rc = cxl_native_register_psl_irq(afu))) goto err2; @@ -847,7 +1129,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc return 0; err2: - cxl_native_release_serr_irq(afu); + if (adapter->native->sl_ops->release_serr_irq) + adapter->native->sl_ops->release_serr_irq(afu); err1: pci_unmap_slice_regs(afu); return rc; @@ -856,7 +1139,8 @@ err1: static void pci_deconfigure_afu(struct cxl_afu *afu) { cxl_native_release_psl_irq(afu); - cxl_native_release_serr_irq(afu); + if (afu->adapter->native->sl_ops->release_serr_irq) + afu->adapter->native->sl_ops->release_serr_irq(afu); pci_unmap_slice_regs(afu); } @@ -1165,7 +1449,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = setup_cxl_bars(dev))) return rc; - if ((rc = switch_card_to_cxl(dev))) + if ((rc = setup_cxl_protocol_area(dev))) return rc; if ((rc = cxl_update_image_control(adapter))) @@ -1177,10 +1461,13 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = sanitise_adapter_regs(adapter))) goto err; - if ((rc = init_implementation_adapter_regs(adapter, dev))) + if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) goto err; - if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) + /* Required for devices using CAPP DMA mode, harmless for others */ + pci_set_master(dev); + + if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) goto err; /* If recovery happened, the last step is to turn on snooping. @@ -1212,6 +1499,43 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) pci_disable_device(pdev); } +static const struct cxl_service_layer_ops psl_ops = { + .adapter_regs_init = init_implementation_adapter_psl_regs, + .afu_regs_init = init_implementation_afu_psl_regs, + .register_serr_irq = cxl_native_register_serr_irq, + .release_serr_irq = cxl_native_release_serr_irq, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs, + .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs, + .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs, + .err_irq_dump_registers = cxl_native_err_irq_dump_regs, + .debugfs_stop_trace = cxl_stop_trace, + .write_timebase_ctrl = write_timebase_ctrl_psl, + .timebase_read = timebase_read_psl, + .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, + .needs_reset_before_disable = true, +}; + +static const struct cxl_service_layer_ops xsl_ops = { + .adapter_regs_init = init_implementation_adapter_xsl_regs, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs, + .write_timebase_ctrl = write_timebase_ctrl_xsl, + .timebase_read = timebase_read_xsl, + .capi_mode = OPAL_PHB_CAPI_MODE_DMA, + .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ +}; + +static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) +{ + if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { + dev_info(&adapter->dev, "Device uses an XSL\n"); + adapter->native->sl_ops = &xsl_ops; + } else { + dev_info(&adapter->dev, "Device uses a PSL\n"); + adapter->native->sl_ops = &psl_ops; + } +} + + static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) { struct cxl *adapter; @@ -1227,6 +1551,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) goto err_release; } + set_sl_ops(adapter, dev); + /* Set defaults for parameters which need to persist over * configure/reconfigure */ @@ -1280,6 +1606,67 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) device_unregister(&adapter->dev); } +#define CXL_MAX_PCIEX_PARENT 2 + +static int cxl_slot_is_switched(struct pci_dev *dev) +{ + struct device_node *np; + int depth = 0; + const __be32 *prop; + + if (!(np = pci_device_to_OF_node(dev))) { + pr_err("cxl: np = NULL\n"); + return -ENODEV; + } + of_node_get(np); + while (np) { + np = of_get_next_parent(np); + prop = of_get_property(np, "device_type", NULL); + if (!prop || strcmp((char *)prop, "pciex")) + break; + depth++; + } + of_node_put(np); + return (depth > CXL_MAX_PCIEX_PARENT); +} + +bool cxl_slot_is_supported(struct pci_dev *dev, int flags) +{ + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return false; + + if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) { + /* + * CAPP DMA mode is technically supported on regular P8, but + * will EEH if the card attempts to access memory < 4GB, which + * we cannot realistically avoid. We might be able to work + * around the issue, but until then return unsupported: + */ + return false; + } + + if (cxl_slot_is_switched(dev)) + return false; + + /* + * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since + * the CAPP can be connected to PHB 0, 1 or 2 on a first come first + * served basis, which is racy to check from here. If we need to + * support this in future we might need to consider having this + * function effectively reserve it ahead of time. + * + * Currently, the only user of this API is the Mellanox CX4, which is + * only supported on P8NVL due to the above mentioned limitation of + * CAPP DMA mode and therefore does not need to worry about this. If the + * issue with CAPP DMA mode is later worked around on P8 we might need + * to revisit this. + */ + + return true; +} +EXPORT_SYMBOL_GPL(cxl_slot_is_supported); + + static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cxl *adapter; @@ -1291,6 +1678,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } + if (cxl_slot_is_switched(dev)) { + dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); + return -ENODEV; + } + if (cxl_verbose) dump_cxl_config_space(dev); @@ -1311,6 +1703,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); } + if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1) + pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]); + return 0; } @@ -1381,6 +1776,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + /* Only participate in EEH if we are on a virtual PHB */ + if (afu->phb == NULL) + return PCI_ERS_RESULT_NONE; cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c new file mode 100644 index 000000000000..0935d44c1770 --- /dev/null +++ b/drivers/misc/cxl/phb.c @@ -0,0 +1,44 @@ +/* + * Copyright 2014-2016 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/pci.h> +#include "cxl.h" + +bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) +{ + struct cxl_context *ctx; + + /* + * Allocate a context to do cxl things to. This is used for interrupts + * in the peer model using a real phb, and if we eventually do DMA ops + * in the virtual phb, we'll need a default context to attach them to. + */ + ctx = cxl_dev_context_init(dev); + if (!ctx) + return false; + dev->dev.archdata.cxl_ctx = ctx; + + return (cxl_ops->afu_check_and_enable(afu) == 0); +} +/* exported via cxl_base */ + +void _cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_context *ctx = cxl_get_context(dev); + + if (ctx) { + if (ctx->status == STARTED) { + dev_err(&dev->dev, "Default context started\n"); + return; + } + dev->dev.archdata.cxl_ctx = NULL; + cxl_release_context(ctx); + } +} +/* exported via cxl_base */ diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index cdc7723b845d..dee8def1c193 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <misc/cxl.h> +#include <asm/pnv-pci.h> #include "cxl.h" static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) @@ -44,7 +45,6 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *phb; struct cxl_afu *afu; - struct cxl_context *ctx; phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; @@ -57,30 +57,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) set_dma_ops(&dev->dev, &dma_direct_ops); set_dma_offset(&dev->dev, PAGE_OFFSET); - /* - * Allocate a context to do cxl things too. If we eventually do real - * DMA ops, we'll need a default context to attach them to - */ - ctx = cxl_dev_context_init(dev); - if (!ctx) - return false; - dev->dev.archdata.cxl_ctx = ctx; - - return (cxl_ops->afu_check_and_enable(afu) == 0); -} - -static void cxl_pci_disable_device(struct pci_dev *dev) -{ - struct cxl_context *ctx = cxl_get_context(dev); - - if (ctx) { - if (ctx->status == STARTED) { - dev_err(&dev->dev, "Default context started\n"); - return; - } - dev->dev.archdata.cxl_ctx = NULL; - cxl_release_context(ctx); - } + return _cxl_pci_associate_default_context(dev, afu); } static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, @@ -197,8 +174,8 @@ static struct pci_controller_ops cxl_pci_controller_ops = { .probe_mode = cxl_pci_probe_mode, .enable_device_hook = cxl_pci_enable_device_hook, - .disable_device = cxl_pci_disable_device, - .release_device = cxl_pci_disable_device, + .disable_device = _cxl_pci_disable_device, + .release_device = _cxl_pci_disable_device, .window_alignment = cxl_pci_window_alignment, .reset_secondary_bus = cxl_pci_reset_secondary_bus, .setup_msi_irqs = cxl_setup_msi_irqs, @@ -208,20 +185,30 @@ static struct pci_controller_ops cxl_pci_controller_ops = int cxl_pci_vphb_add(struct cxl_afu *afu) { - struct pci_dev *phys_dev; - struct pci_controller *phb, *phys_phb; + struct pci_controller *phb; struct device_node *vphb_dn; struct device *parent; - if (cpu_has_feature(CPU_FTR_HVMODE)) { - phys_dev = to_pci_dev(afu->adapter->dev.parent); - phys_phb = pci_bus_to_host(phys_dev->bus); - vphb_dn = phys_phb->dn; - parent = &phys_dev->dev; - } else { - vphb_dn = afu->adapter->dev.parent->of_node; - parent = afu->adapter->dev.parent; - } + /* + * If there are no AFU configuration records we won't have anything to + * expose under the vPHB, so skip creating one, returning success since + * this is still a valid case. This will also opt us out of EEH + * handling since we won't have anything special to do if there are no + * kernel drivers attached to the vPHB, and EEH handling is not yet + * supported in the peer model. + */ + if (!afu->crs_num) + return 0; + + /* The parent device is the adapter. Reuse the device node of + * the adapter. + * We don't seem to care what device node is used for the vPHB, + * but tools such as lsvpd walk up the device parents looking + * for a valid location code, so we might as well show devices + * attached to the adapter as being located on that adapter. + */ + parent = afu->adapter->dev.parent; + vphb_dn = parent->of_node; /* Alloc and setup PHB data structure */ phb = pcibios_alloc_controller(vphb_dn); @@ -272,13 +259,18 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) pcibios_free_controller(phb); } +static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) +{ + return (phb->ops == &cxl_pcie_pci_ops); +} + bool cxl_pci_is_vphb_device(struct pci_dev *dev) { struct pci_controller *phb; phb = pci_bus_to_host(dev->bus); - return (phb->ops == &cxl_pcie_pci_ops); + return _cxl_pci_is_vphb_device(phb); } struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) @@ -287,7 +279,13 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); - return (struct cxl_afu *)phb->private_data; + if (_cxl_pci_is_vphb_device(phb)) + return (struct cxl_afu *)phb->private_data; + + if (pnv_pci_on_cxl_phb(dev)) + return pnv_cxl_phb_to_afu(phb); + + return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(cxl_pci_to_afu); diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index 4cf8f82cfca2..a70b853fa2c9 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -182,7 +182,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd) */ static int genwqe_bus_reset(struct genwqe_dev *cd) { - int bars, rc = 0; + int rc = 0; struct pci_dev *pci_dev = cd->pci_dev; void __iomem *mmio; @@ -193,8 +193,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd) cd->mmio = NULL; pci_iounmap(pci_dev, mmio); - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); /* * Firmware/BIOS might change memory mapping during bus reset. @@ -218,7 +217,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd) GENWQE_INJECT_GFIR_FATAL | GENWQE_INJECT_GFIR_INFO); - rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + rc = pci_request_mem_regions(pci_dev, genwqe_driver_name); if (rc) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, rc); @@ -1068,10 +1067,9 @@ static int genwqe_health_check_stop(struct genwqe_dev *cd) */ static int genwqe_pci_setup(struct genwqe_dev *cd) { - int err, bars; + int err; struct pci_dev *pci_dev = cd->pci_dev; - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); err = pci_enable_device_mem(pci_dev); if (err) { dev_err(&pci_dev->dev, @@ -1080,7 +1078,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) } /* Reserve PCI I/O and memory resources */ - err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + err = pci_request_mem_regions(pci_dev, genwqe_driver_name); if (err) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, err); @@ -1142,7 +1140,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) out_iounmap: pci_iounmap(pci_dev, cd->mmio); out_release_resources: - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); err_disable_device: pci_disable_device(pci_dev); err_out: @@ -1154,14 +1152,12 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) */ static void genwqe_pci_remove(struct genwqe_dev *cd) { - int bars; struct pci_dev *pci_dev = cd->pci_dev; if (cd->mmio) pci_iounmap(pci_dev, cd->mmio); - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); pci_disable_device(pci_dev); } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 10b553765ee7..48a5dd740f3b 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1801,8 +1801,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size; /* Argument of CMD23 */ packed_cmd_hdr[(i * 2)] = cpu_to_le32( (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | @@ -1977,8 +1976,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * When 4KB native sector is enabled, only 8 blocks * multiple read or write is allowed */ - if ((brq->data.blocks & 0x07) && - (card->ext_csd.data_sector_size == 4096)) { + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(rqc), 8)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", req->rq_disk->disk_name); mq_rq = mq->mqrq_cur; @@ -2501,12 +2500,6 @@ force_ro_fail: return ret; } -#define CID_MANFID_SANDISK 0x2 -#define CID_MANFID_TOSHIBA 0x11 -#define CID_MANFID_MICRON 0x13 -#define CID_MANFID_SAMSUNG 0x15 -#define CID_MANFID_KINGSTON 0x70 - static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 4bc48f10452f..c64266f5a399 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -332,12 +332,13 @@ int mmc_add_card(struct mmc_card *card) mmc_card_ddr52(card) ? "DDR " : "", type); } else { - pr_info("%s: new %s%s%s%s%s card at address %04x\n", + pr_info("%s: new %s%s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_hs(card) ? "high speed " : ""), mmc_card_hs400(card) ? "HS400 " : (mmc_card_hs200(card) ? "HS200 " : ""), + mmc_card_hs400es(card) ? "Enhanced strobe " : "", mmc_card_ddr52(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8b4dfd45433b..e55cde6d436d 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1127,6 +1127,15 @@ void mmc_set_initial_state(struct mmc_host *host) host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; host->ios.drv_type = 0; + host->ios.enhanced_strobe = false; + + /* + * Make sure we are in non-enhanced strobe mode before we + * actually enable it in ext_csd. + */ + if ((host->caps2 & MMC_CAP2_HS400_ES) && + host->ops->hs400_enhanced_strobe) + host->ops->hs400_enhanced_strobe(host, &host->ios); mmc_set_ios(host); } @@ -1925,17 +1934,15 @@ void mmc_init_erase(struct mmc_card *card) * to that size and alignment. * * For SD cards that define Allocation Unit size, limit erases to one - * Allocation Unit at a time. For MMC cards that define High Capacity - * Erase Size, whether it is switched on or not, limit to that size. - * Otherwise just have a stab at a good value. For modern cards it - * will end up being 4MiB. Note that if the value is too small, it - * can end up taking longer to erase. + * Allocation Unit at a time. + * For MMC, have a stab at ai good value and for modern cards it will + * end up being 4MiB. Note that if the value is too small, it can end + * up taking longer to erase. Also note, erase_size is already set to + * High Capacity Erase Size if available when this function is called. */ if (mmc_card_sd(card) && card->ssr.au) { card->pref_erase = card->ssr.au; card->erase_shift = ffs(card->ssr.au) - 1; - } else if (card->ext_csd.hc_erase_size) { - card->pref_erase = card->ext_csd.hc_erase_size; } else if (card->erase_size) { sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; if (sz < 128) @@ -2060,7 +2067,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, unsigned int to, unsigned int arg) { struct mmc_command cmd = {0}; - unsigned int qty = 0; + unsigned int qty = 0, busy_timeout = 0; + bool use_r1b_resp = false; unsigned long timeout; int err; @@ -2128,8 +2136,22 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_ERASE; cmd.arg = arg; - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - cmd.busy_timeout = mmc_erase_timeout(card, arg, qty); + busy_timeout = mmc_erase_timeout(card, arg, qty); + /* + * If the host controller supports busy signalling and the timeout for + * the erase operation does not exceed the max_busy_timeout, we should + * use R1B response. Or we need to prevent the host from doing hw busy + * detection, which is done by converting to a R1 response instead. + */ + if (card->host->max_busy_timeout && + busy_timeout > card->host->max_busy_timeout) { + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } else { + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = busy_timeout; + use_r1b_resp = true; + } + err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_err("mmc_erase: erase error %d, status %#x\n", @@ -2141,7 +2163,14 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, if (mmc_host_is_spi(card->host)) goto out; - timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); + /* + * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling + * shall be avoided. + */ + if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) + goto out; + + timeout = jiffies + msecs_to_jiffies(busy_timeout); do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; @@ -2321,23 +2350,41 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, unsigned int arg) { struct mmc_host *host = card->host; - unsigned int max_discard, x, y, qty = 0, max_qty, timeout; + unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout; unsigned int last_timeout = 0; - if (card->erase_shift) + if (card->erase_shift) { max_qty = UINT_MAX >> card->erase_shift; - else if (mmc_card_sd(card)) + min_qty = card->pref_erase >> card->erase_shift; + } else if (mmc_card_sd(card)) { max_qty = UINT_MAX; - else + min_qty = card->pref_erase; + } else { max_qty = UINT_MAX / card->erase_size; + min_qty = card->pref_erase / card->erase_size; + } - /* Find the largest qty with an OK timeout */ + /* + * We should not only use 'host->max_busy_timeout' as the limitation + * when deciding the max discard sectors. We should set a balance value + * to improve the erase speed, and it can not get too long timeout at + * the same time. + * + * Here we set 'card->pref_erase' as the minimal discard sectors no + * matter what size of 'host->max_busy_timeout', but if the + * 'host->max_busy_timeout' is large enough for more discard sectors, + * then we can continue to increase the max discard sectors until we + * get a balance value. + */ do { y = 0; for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { timeout = mmc_erase_timeout(card, arg, qty + x); - if (timeout > host->max_busy_timeout) + + if (qty + x > min_qty && + timeout > host->max_busy_timeout) break; + if (timeout < last_timeout) break; last_timeout = timeout; @@ -2491,17 +2538,21 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) mmc_go_idle(host); - mmc_send_if_cond(host, host->ocr_avail); + if (!(host->caps2 & MMC_CAP2_NO_SD)) + mmc_send_if_cond(host, host->ocr_avail); /* Order's important: probe SDIO, then SD, then MMC */ if (!(host->caps2 & MMC_CAP2_NO_SDIO)) if (!mmc_attach_sdio(host)) return 0; - if (!mmc_attach_sd(host)) - return 0; - if (!mmc_attach_mmc(host)) - return 0; + if (!(host->caps2 & MMC_CAP2_NO_SD)) + if (!mmc_attach_sd(host)) + return 0; + + if (!(host->caps2 & MMC_CAP2_NO_MMC)) + if (!mmc_attach_mmc(host)) + return 0; mmc_power_off(host); return -EIO; diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 9382a57a5aa4..c8451ce557ae 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -148,7 +148,8 @@ static int mmc_ios_show(struct seq_file *s, void *data) str = "mmc HS200"; break; case MMC_TIMING_MMC_HS400: - str = "mmc HS400"; + str = mmc_card_hs400es(host->card) ? + "mmc HS400 enhanced strobe" : "mmc HS400"; break; default: str = "invalid"; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 1be42fab1a30..98f25ffb4258 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -313,6 +313,14 @@ int mmc_of_parse(struct mmc_host *host) host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; if (of_property_read_bool(np, "mmc-hs400-1_2v")) host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; + if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe")) + host->caps2 |= MMC_CAP2_HS400_ES; + if (of_property_read_bool(np, "no-sdio")) + host->caps2 |= MMC_CAP2_NO_SDIO; + if (of_property_read_bool(np, "no-sd")) + host->caps2 |= MMC_CAP2_NO_SD; + if (of_property_read_bool(np, "no-mmc")) + host->caps2 |= MMC_CAP2_NO_MMC; host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); if (host->dsr_req && (host->dsr & ~0xffff)) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 5d438ad3ee32..f2d185cf8a8b 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -45,6 +45,17 @@ static const unsigned int tacc_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; +static const struct mmc_fixup mmc_ext_csd_fixups[] = { + /* + * Certain Hynix eMMC 4.41 cards might get broken when HPI feature + * is used so disable the HPI feature for such buggy cards. + */ + MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, + 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), + + END_FIXUP +}; + #define UNSTUFF_BITS(resp,start,size) \ ({ \ const int __size = size; \ @@ -235,6 +246,11 @@ static void mmc_select_card_type(struct mmc_card *card) avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V; } + if ((caps2 & MMC_CAP2_HS400_ES) && + card->ext_csd.strobe_support && + (avail_type & EXT_CSD_CARD_TYPE_HS400)) + avail_type |= EXT_CSD_CARD_TYPE_HS400ES; + card->ext_csd.hs_max_dtr = hs_max_dtr; card->ext_csd.hs200_max_dtr = hs200_max_dtr; card->mmc_avail_type = avail_type; @@ -370,6 +386,9 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) */ card->ext_csd.rev = ext_csd[EXT_CSD_REV]; + /* fixup device after ext_csd revision field is updated */ + mmc_fixup_device(card, mmc_ext_csd_fixups); + card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; @@ -386,6 +405,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) mmc_card_set_blockaddr(card); } + card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT]; card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; mmc_select_card_type(card); @@ -500,7 +520,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ - if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { + if (!mmc_card_broken_hpi(card) && + ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.man_bkops_en = (ext_csd[EXT_CSD_BKOPS_EN] & @@ -513,7 +534,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) } /* check whether the eMMC card supports HPI */ - if (!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) { + if (!mmc_card_broken_hpi(card) && + !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) { card->ext_csd.hpi = 1; if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; @@ -727,6 +749,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); +MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); static ssize_t mmc_fwrev_show(struct device *dev, struct device_attribute *attr, @@ -744,6 +767,22 @@ static ssize_t mmc_fwrev_show(struct device *dev, static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL); +static ssize_t mmc_dsr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mmc_card *card = mmc_dev_to_card(dev); + struct mmc_host *host = card->host; + + if (card->csd.dsr_imp && host->dsr_req) + return sprintf(buf, "0x%x\n", host->dsr); + else + /* return default DSR value */ + return sprintf(buf, "0x%x\n", 0x404); +} + +static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL); + static struct attribute *mmc_std_attrs[] = { &dev_attr_cid.attr, &dev_attr_csd.attr, @@ -762,6 +801,8 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_enhanced_area_size.attr, &dev_attr_raw_rpmb_size_mult.attr, &dev_attr_rel_sectors.attr, + &dev_attr_ocr.attr, + &dev_attr_dsr.attr, NULL, }; ATTRIBUTE_GROUPS(mmc_std); @@ -959,6 +1000,19 @@ static int mmc_select_bus_width(struct mmc_card *card) return err; } +/* Caller must hold re-tuning */ +static int mmc_switch_status(struct mmc_card *card) +{ + u32 status; + int err; + + err = mmc_send_status(card, &status); + if (err) + return err; + + return mmc_switch_status_error(card->host, status); +} + /* * Switch to the high-speed mode */ @@ -969,9 +1023,11 @@ static int mmc_select_hs(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, card->ext_csd.generic_cmd6_time, - true, true, true); - if (!err) + true, false, true); + if (!err) { mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + err = mmc_switch_status(card); + } return err; } @@ -1047,23 +1103,9 @@ static int mmc_select_hs_ddr(struct mmc_card *card) return err; } -/* Caller must hold re-tuning */ -static int mmc_switch_status(struct mmc_card *card) -{ - u32 status; - int err; - - err = mmc_send_status(card, &status); - if (err) - return err; - - return mmc_switch_status_error(card->host, status); -} - static int mmc_select_hs400(struct mmc_card *card) { struct mmc_host *host = card->host; - bool send_status = true; unsigned int max_dtr; int err = 0; u8 val; @@ -1075,19 +1117,12 @@ static int mmc_select_hs400(struct mmc_card *card) host->ios.bus_width == MMC_BUS_WIDTH_8)) return 0; - if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) - send_status = false; - - /* Reduce frequency to HS frequency */ - max_dtr = card->ext_csd.hs_max_dtr; - mmc_set_clock(host, max_dtr); - /* Switch card to HS mode */ val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, send_status, true); + true, false, true); if (err) { pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", mmc_hostname(host), err); @@ -1097,11 +1132,13 @@ static int mmc_select_hs400(struct mmc_card *card) /* Set host controller to HS timing */ mmc_set_timing(card->host, MMC_TIMING_MMC_HS); - if (!send_status) { - err = mmc_switch_status(card); - if (err) - goto out_err; - } + /* Reduce frequency to HS frequency */ + max_dtr = card->ext_csd.hs_max_dtr; + mmc_set_clock(host, max_dtr); + + err = mmc_switch_status(card); + if (err) + goto out_err; /* Switch card to DDR */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -1120,7 +1157,7 @@ static int mmc_select_hs400(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, send_status, true); + true, false, true); if (err) { pr_err("%s: switch to hs400 failed, err:%d\n", mmc_hostname(host), err); @@ -1131,11 +1168,9 @@ static int mmc_select_hs400(struct mmc_card *card) mmc_set_timing(host, MMC_TIMING_MMC_HS400); mmc_set_bus_speed(card); - if (!send_status) { - err = mmc_switch_status(card); - if (err) - goto out_err; - } + err = mmc_switch_status(card); + if (err) + goto out_err; return 0; @@ -1153,14 +1188,10 @@ int mmc_hs200_to_hs400(struct mmc_card *card) int mmc_hs400_to_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; - bool send_status = true; unsigned int max_dtr; int err; u8 val; - if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) - send_status = false; - /* Reduce frequency to HS */ max_dtr = card->ext_csd.hs_max_dtr; mmc_set_clock(host, max_dtr); @@ -1169,49 +1200,43 @@ int mmc_hs400_to_hs200(struct mmc_card *card) val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, send_status, true); + true, false, true); if (err) goto out_err; mmc_set_timing(host, MMC_TIMING_MMC_DDR52); - if (!send_status) { - err = mmc_switch_status(card); - if (err) - goto out_err; - } + err = mmc_switch_status(card); + if (err) + goto out_err; /* Switch HS DDR to HS */ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time, - true, send_status, true); + true, false, true); if (err) goto out_err; mmc_set_timing(host, MMC_TIMING_MMC_HS); - if (!send_status) { - err = mmc_switch_status(card); - if (err) - goto out_err; - } + err = mmc_switch_status(card); + if (err) + goto out_err; /* Switch HS to HS200 */ val = EXT_CSD_TIMING_HS200 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, - val, card->ext_csd.generic_cmd6_time, true, - send_status, true); + val, card->ext_csd.generic_cmd6_time, + true, false, true); if (err) goto out_err; mmc_set_timing(host, MMC_TIMING_MMC_HS200); - if (!send_status) { - err = mmc_switch_status(card); - if (err) - goto out_err; - } + err = mmc_switch_status(card); + if (err) + goto out_err; mmc_set_bus_speed(card); @@ -1223,6 +1248,78 @@ out_err: return err; } +static int mmc_select_hs400es(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + int err = 0; + u8 val; + + if (!(host->caps & MMC_CAP_8_BIT_DATA)) { + err = -ENOTSUPP; + goto out_err; + } + + err = mmc_select_bus_width(card); + if (err < 0) + goto out_err; + + /* Switch card to HS mode */ + err = mmc_select_hs(card); + if (err) { + pr_err("%s: switch to high-speed failed, err:%d\n", + mmc_hostname(host), err); + goto out_err; + } + + err = mmc_switch_status(card); + if (err) + goto out_err; + + /* Switch card to DDR with strobe bit */ + val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE; + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BUS_WIDTH, + val, + card->ext_csd.generic_cmd6_time); + if (err) { + pr_err("%s: switch to bus width for hs400es failed, err:%d\n", + mmc_hostname(host), err); + goto out_err; + } + + /* Switch card to HS400 */ + val = EXT_CSD_TIMING_HS400 | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, val, + card->ext_csd.generic_cmd6_time, + true, false, true); + if (err) { + pr_err("%s: switch to hs400es failed, err:%d\n", + mmc_hostname(host), err); + goto out_err; + } + + /* Set host controller to HS400 timing and frequency */ + mmc_set_timing(host, MMC_TIMING_MMC_HS400); + + /* Controller enable enhanced strobe function */ + host->ios.enhanced_strobe = true; + if (host->ops->hs400_enhanced_strobe) + host->ops->hs400_enhanced_strobe(host, &host->ios); + + err = mmc_switch_status(card); + if (err) + goto out_err; + + return 0; + +out_err: + pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), + __func__, err); + return err; +} + static void mmc_select_driver_type(struct mmc_card *card) { int card_drv_type, drive_strength, drv_type; @@ -1250,7 +1347,6 @@ static void mmc_select_driver_type(struct mmc_card *card) static int mmc_select_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; - bool send_status = true; unsigned int old_timing, old_signal_voltage; int err = -EINVAL; u8 val; @@ -1268,34 +1364,30 @@ static int mmc_select_hs200(struct mmc_card *card) mmc_select_driver_type(card); - if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) - send_status = false; - /* * Set the bus width(4 or 8) with host's support and * switch to HS200 mode if bus width is set successfully. */ err = mmc_select_bus_width(card); - if (err >= 0) { + if (err > 0) { val = EXT_CSD_TIMING_HS200 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, send_status, true); + true, false, true); if (err) goto err; old_timing = host->ios.timing; mmc_set_timing(host, MMC_TIMING_MMC_HS200); - if (!send_status) { - err = mmc_switch_status(card); - /* - * mmc_select_timing() assumes timing has not changed if - * it is a switch error. - */ - if (err == -EBADMSG) - mmc_set_timing(host, old_timing); - } + + err = mmc_switch_status(card); + /* + * mmc_select_timing() assumes timing has not changed if + * it is a switch error. + */ + if (err == -EBADMSG) + mmc_set_timing(host, old_timing); } err: if (err) { @@ -1310,7 +1402,7 @@ err: } /* - * Activate High Speed or HS200 mode if supported. + * Activate High Speed, HS200 or HS400ES mode if supported. */ static int mmc_select_timing(struct mmc_card *card) { @@ -1319,7 +1411,9 @@ static int mmc_select_timing(struct mmc_card *card) if (!mmc_can_ext_csd(card)) goto bus_speed; - if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) + err = mmc_select_hs400es(card); + else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) err = mmc_select_hs200(card); else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) err = mmc_select_hs(card); @@ -1583,7 +1677,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } else if (mmc_card_hs(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); - if (err >= 0) { + if (err > 0) { err = mmc_select_hs_ddr(card); if (err) goto free_card; @@ -1616,7 +1710,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, * If cache size is higher than 0, this indicates * the existence of cache and it can be turned on. */ - if (card->ext_csd.cache_size > 0) { + if (!mmc_card_broken_hpi(card) && + card->ext_csd.cache_size > 0) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL, 1, card->ext_csd.generic_cmd6_time); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 62355bda608f..ad6e9798e949 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -480,6 +480,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, u32 status = 0; bool use_r1b_resp = use_busy_signal; bool expired = false; + bool busy = false; mmc_retune_hold(host); @@ -533,21 +534,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, timeout_ms = MMC_OPS_TIMEOUT_MS; /* Must check status to be sure of no errors. */ - timeout = jiffies + msecs_to_jiffies(timeout_ms); + timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { + /* + * Due to the possibility of being preempted after + * sending the status command, check the expiration + * time first. + */ + expired = time_after(jiffies, timeout); if (send_status) { - /* - * Due to the possibility of being preempted after - * sending the status command, check the expiration - * time first. - */ - expired = time_after(jiffies, timeout); err = __mmc_send_status(card, &status, ignore_crc); if (err) goto out; } if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) break; + if (host->ops->card_busy) { + if (!host->ops->card_busy(host)) + break; + busy = true; + } if (mmc_host_is_spi(host)) break; @@ -556,19 +562,20 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only * rely on waiting for the stated timeout to be sufficient. */ - if (!send_status) { + if (!send_status && !host->ops->card_busy) { mmc_delay(timeout_ms); goto out; } /* Timeout if the device never leaves the program state. */ - if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) { + if (expired && + (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) { pr_err("%s: Card stuck in programming state! %s\n", mmc_hostname(host), __func__); err = -ETIMEDOUT; goto out; } - } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); + } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy); err = mmc_switch_status_error(host, status); out: diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index fad660b95809..ca9cade317c7 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c @@ -72,6 +72,8 @@ void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table) f->cis_vendor == (u16) SDIO_ANY_ID) && (f->cis_device == card->cis.device || f->cis_device == (u16) SDIO_ANY_ID) && + (f->ext_csd_rev == EXT_CSD_REV_ANY || + f->ext_csd_rev == card->ext_csd.rev) && rev >= f->rev_start && rev <= f->rev_end) { dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup); f->vendor_fixup(card, f->data); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index b95bd24d92f4..0123936241b0 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -675,8 +675,25 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); +MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +static ssize_t mmc_dsr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mmc_card *card = mmc_dev_to_card(dev); + struct mmc_host *host = card->host; + + if (card->csd.dsr_imp && host->dsr_req) + return sprintf(buf, "0x%x\n", host->dsr); + else + /* return default DSR value */ + return sprintf(buf, "0x%x\n", 0x404); +} + +static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL); + static struct attribute *sd_std_attrs[] = { &dev_attr_cid.attr, &dev_attr_csd.attr, @@ -690,6 +707,8 @@ static struct attribute *sd_std_attrs[] = { &dev_attr_name.attr, &dev_attr_oemid.attr, &dev_attr_serial.attr, + &dev_attr_ocr.attr, + &dev_attr_dsr.attr, NULL, }; ATTRIBUTE_GROUPS(sd_std); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 0aa484c10c0a..5274f503a39a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -122,6 +122,7 @@ config MMC_SDHCI_OF_ARASAN tristate "SDHCI OF support for the Arasan SDHCI controllers" depends on MMC_SDHCI_PLTFM depends on OF + depends on COMMON_CLK help This selects the Arasan Secure Digital Host Controller Interface (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. @@ -296,17 +297,6 @@ config MMC_SDHCI_BCM_KONA If you have a controller with this interface, say Y or M here. -config MMC_SDHCI_BCM2835 - tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" - depends on ARCH_BCM2835 - depends on MMC_SDHCI_PLTFM - select MMC_SDHCI_IO_ACCESSORS - help - This selects the BCM2835 SD/MMC controller. If you have a BCM2835 - platform with SD or MMC devices, say Y or M here. - - If unsure, say N. - config MMC_SDHCI_F_SDH30 tristate "SDHCI support for Fujitsu Semiconductor F_SDH30" depends on MMC_SDHCI_PLTFM @@ -798,3 +788,13 @@ config MMC_SDHCI_MICROCHIP_PIC32 If you have a controller with this interface, say Y or M here. If unsure, say N. +config MMC_SDHCI_BRCMSTB + tristate "Broadcom SDIO/SD/MMC support" + depends on ARCH_BRCMSTB || BMIPS_GENERIC + depends on MMC_SDHCI_PLTFM + default y + help + This selects support for the SDIO/SD/MMC Host Controller on + Broadcom STB SoCs. + + If unsure, say Y. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index af918d261ff9..e2bdaaf43184 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -71,11 +71,11 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o -obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o +obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 7e3a3247b852..da0ef1765735 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -157,7 +157,7 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) * HOLD register should be bypassed in case there is no phase shift * applied on CMD/DATA that is sent to the card. */ - if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel)) + if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->cur_slot) set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags); } diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 63c2e2ed1288..8e9d886bfcda 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -32,6 +32,12 @@ struct k3_priv { struct regmap *reg; }; +static unsigned long dw_mci_hi6220_caps[] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + 0 +}; + static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) { int ret; @@ -126,6 +132,7 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios) } static const struct dw_mci_drv_data hi6220_data = { + .caps = dw_mci_hi6220_caps, .switch_voltage = dw_mci_hi6220_switch_voltage, .set_ios = dw_mci_hi6220_set_ios, .parse_dt = dw_mci_hi6220_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index 358b0dc853b0..25eae359a5ea 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -285,9 +285,6 @@ static int dw_mci_rockchip_init(struct dw_mci *host) /* It is slot 8 on Rockchip SoCs */ host->sdio_id0 = 8; - /* It needs this quirk on all Rockchip SoCs */ - host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO; - if (of_device_is_compatible(host->dev->of_node, "rockchip,rk3288-dw-mshc")) host->bus_hz /= RK3288_CLKGEN_DIV; @@ -297,10 +294,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host) /* Common capabilities of RK3288 SoC */ static unsigned long dw_mci_rk3288_dwmmc_caps[4] = { - MMC_CAP_ERASE | MMC_CAP_CMD23, - MMC_CAP_ERASE | MMC_CAP_CMD23, - MMC_CAP_ERASE | MMC_CAP_CMD23, - MMC_CAP_ERASE | MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, }; static const struct dw_mci_drv_data rk2928_drv_data = { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 2cc6123b1df9..32380d5d4f6b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -44,11 +44,11 @@ /* Common flag combinations */ #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ SDMMC_INT_HTO | SDMMC_INT_SBE | \ - SDMMC_INT_EBE) + SDMMC_INT_EBE | SDMMC_INT_HLE) #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ - SDMMC_INT_RESP_ERR) + SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ - DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) + DW_MCI_CMD_ERROR_FLAGS) #define DW_MCI_SEND_STATUS 1 #define DW_MCI_RECV_STATUS 2 #define DW_MCI_DMA_THRESHOLD 16 @@ -92,7 +92,7 @@ struct idmac_desc { __le32 des1; /* Buffer sizes */ #define IDMAC_SET_BUFFER1_SIZE(d, s) \ - ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) + ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) __le32 des2; /* buffer 1 physical address */ @@ -105,6 +105,7 @@ struct idmac_desc { static bool dw_mci_reset(struct dw_mci *host); static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); static int dw_mci_card_busy(struct mmc_host *mmc); +static int dw_mci_get_cd(struct mmc_host *mmc); #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) @@ -898,23 +899,35 @@ done: mci_writel(host, FIFOTH, fifoth_val); } -static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) +static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) { unsigned int blksz = data->blksz; u32 blksz_depth, fifo_depth; u16 thld_size; - - WARN_ON(!(data->flags & MMC_DATA_READ)); + u8 enable; /* * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is * in the FIFO region, so we really shouldn't access it). */ - if (host->verid < DW_MMC_240A) + if (host->verid < DW_MMC_240A || + (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) + return; + + /* + * Card write Threshold is introduced since 2.80a + * It's used when HS400 mode is enabled. + */ + if (data->flags & MMC_DATA_WRITE && + !(host->timing != MMC_TIMING_MMC_HS400)) return; + if (data->flags & MMC_DATA_WRITE) + enable = SDMMC_CARD_WR_THR_EN; + else + enable = SDMMC_CARD_RD_THR_EN; + if (host->timing != MMC_TIMING_MMC_HS200 && - host->timing != MMC_TIMING_MMC_HS400 && host->timing != MMC_TIMING_UHS_SDR104) goto disable; @@ -930,11 +943,11 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) * Currently just choose blksz. */ thld_size = blksz; - mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); + mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); return; disable: - mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); + mci_writel(host, CDTHRCTL, 0); } static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) @@ -1005,12 +1018,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) host->sg = NULL; host->data = data; - if (data->flags & MMC_DATA_READ) { + if (data->flags & MMC_DATA_READ) host->dir_status = DW_MCI_RECV_STATUS; - dw_mci_ctrl_rd_thld(host, data); - } else { + else host->dir_status = DW_MCI_SEND_STATUS; - } + + dw_mci_ctrl_thld(host, data); if (dw_mci_submit_data_dma(host, data)) { if (host->data->flags & MMC_DATA_READ) @@ -1099,12 +1112,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; - if ((clock << div) != slot->__clk_old || force_clkinit) - dev_info(&slot->mmc->class_dev, - "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", - slot->id, host->bus_hz, clock, - div ? ((host->bus_hz / div) >> 1) : - host->bus_hz, div); + dev_info(&slot->mmc->class_dev, + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", + slot->id, host->bus_hz, clock, + div ? ((host->bus_hz / div) >> 1) : + host->bus_hz, div); /* disable clock */ mci_writel(host, CLKENA, 0); @@ -1127,9 +1139,6 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) /* inform CIU */ mci_send_cmd(slot, sdmmc_cmd_bits, 0); - - /* keep the clock with reflecting clock dividor */ - slot->__clk_old = clock << div; } host->current_speed = clock; @@ -1253,15 +1262,15 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) * atomic, otherwise the card could be removed in between and the * request wouldn't fail until another card was inserted. */ - spin_lock_bh(&host->lock); - if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { - spin_unlock_bh(&host->lock); + if (!dw_mci_get_cd(mmc)) { mrq->cmd->error = -ENOMEDIUM; mmc_request_done(mmc, mrq); return; } + spin_lock_bh(&host->lock); + dw_mci_queue_request(host, slot, mrq); spin_unlock_bh(&host->lock); @@ -1451,8 +1460,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc) int gpio_cd = mmc_gpio_get_cd(mmc); /* Use platform get_cd function, else try onboard card detect */ - if ((mmc->caps & MMC_CAP_NEEDS_POLL) || - (mmc->caps & MMC_CAP_NONREMOVABLE)) + if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc)) present = 1; else if (gpio_cd >= 0) present = gpio_cd; @@ -1761,6 +1769,33 @@ static void dw_mci_tasklet_func(unsigned long priv) } if (cmd->data && err) { + /* + * During UHS tuning sequence, sending the stop + * command after the response CRC error would + * throw the system into a confused state + * causing all future tuning phases to report + * failure. + * + * In such case controller will move into a data + * transfer state after a response error or + * response CRC error. Let's let that finish + * before trying to send a stop, so we'll go to + * STATE_SENDING_DATA. + * + * Although letting the data transfer take place + * will waste a bit of time (we already know + * the command was bad), it can't cause any + * errors since it's possible it would have + * taken place anyway if this tasklet got + * delayed. Allowing the transfer to take place + * avoids races and keeps things simple. + */ + if ((err != -ETIMEDOUT) && + (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { + state = STATE_SENDING_DATA; + continue; + } + dw_mci_stop_dma(host); send_stop_abort(host, data); state = STATE_SENDING_STOP; @@ -1801,8 +1836,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * If all data-related interrupts don't come * within the given time in reading data state. */ - if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && - (host->dir_status == DW_MCI_RECV_STATUS)) + if (host->dir_status == DW_MCI_RECV_STATUS) dw_mci_set_drto(host); break; } @@ -1844,8 +1878,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * interrupt doesn't come within the given time. * in reading data state. */ - if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && - (host->dir_status == DW_MCI_RECV_STATUS)) + if (host->dir_status == DW_MCI_RECV_STATUS) dw_mci_set_drto(host); break; } @@ -2411,8 +2444,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_DATA_OVER) { - if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) - del_timer(&host->dto_timer); + del_timer(&host->dto_timer); mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) @@ -2474,7 +2506,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); - host->dma_ops->complete((void *)host); + if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) + host->dma_ops->complete((void *)host); } } else { pending = mci_readl(host, IDSTS); @@ -2482,7 +2515,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); - host->dma_ops->complete((void *)host); + if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) + host->dma_ops->complete((void *)host); } } @@ -2570,6 +2604,12 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) if (host->pdata->caps) mmc->caps = host->pdata->caps; + /* + * Support MMC_CAP_ERASE by default. + * It needs to use trim/discard/erase commands. + */ + mmc->caps |= MMC_CAP_ERASE; + if (host->pdata->pm_caps) mmc->pm_caps = host->pdata->pm_caps; @@ -2616,10 +2656,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) mmc->max_seg_size = mmc->max_req_size; } - if (dw_mci_get_cd(mmc)) - set_bit(DW_MMC_CARD_PRESENT, &slot->flags); - else - clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); + dw_mci_get_cd(mmc); ret = mmc_add_host(mmc); if (ret) @@ -3006,11 +3043,8 @@ int dw_mci_probe(struct dw_mci *host) setup_timer(&host->cmd11_timer, dw_mci_cmd11_timer, (unsigned long)host); - host->quirks = host->pdata->quirks; - - if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) - setup_timer(&host->dto_timer, - dw_mci_dto_timer, (unsigned long)host); + setup_timer(&host->dto_timer, + dw_mci_dto_timer, (unsigned long)host); spin_lock_init(&host->lock); spin_lock_init(&host->irq_lock); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 1e8d8380f9cf..9e740bc232a8 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -15,6 +15,7 @@ #define _DW_MMC_H_ #define DW_MMC_240A 0x240a +#define DW_MMC_280A 0x280a #define SDMMC_CTRL 0x000 #define SDMMC_PWREN 0x004 @@ -175,7 +176,10 @@ /* Version ID register define */ #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) /* Card read threshold */ -#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define SDMMC_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define SDMMC_CARD_WR_THR_EN BIT(2) +#define SDMMC_CARD_RD_THR_EN BIT(0) +/* UHS-1 register defines */ #define SDMMC_UHS_18V BIT(0) /* All ctrl reset bits */ #define SDMMC_CTRL_ALL_RESET_FLAGS \ @@ -245,9 +249,6 @@ extern int dw_mci_resume(struct dw_mci *host); * @queue_node: List node for placing this node in the @queue list of * &struct dw_mci. * @clock: Clock rate configured by set_ios(). Protected by host->lock. - * @__clk_old: The last updated clock with reflecting clock divider. - * Keeping track of this helps us to avoid spamming the console - * with CONFIG_MMC_CLKGATE. * @flags: Random state bits associated with the slot. * @id: Number of this slot. * @sdio_id: Number of this slot in the SDIO interrupt registers. @@ -262,7 +263,6 @@ struct dw_mci_slot { struct list_head queue_node; unsigned int clock; - unsigned int __clk_old; unsigned long flags; #define DW_MMC_CARD_PRESENT 0 diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 5642f71f8bf0..84e9afcb5c09 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -287,6 +287,11 @@ struct msdc_save_para { u32 emmc50_cfg0; }; +struct msdc_tune_para { + u32 iocon; + u32 pad_tune; +}; + struct msdc_delay_phase { u8 maxlen; u8 start; @@ -326,7 +331,10 @@ struct msdc_host { unsigned char timing; bool vqmmc_enabled; u32 hs400_ds_delay; + bool hs400_mode; /* current eMMC will run at hs400 mode */ struct msdc_save_para save_para; /* used when gate HCLK */ + struct msdc_tune_para def_tune_para; /* default tune setting */ + struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ }; static void sdr_set_bits(void __iomem *reg, u32 bs) @@ -582,6 +590,18 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); sdr_set_bits(host->base + MSDC_INTEN, flags); + /* + * mmc_select_hs400() will drop to 50Mhz and High speed mode, + * tune result of hs200/200Mhz is not suitable for 50Mhz + */ + if (host->sclk <= 52000000) { + writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); + writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + } else { + writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); + writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + } + dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing); } @@ -781,7 +801,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, } if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { - msdc_reset_hw(host); + if (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) + /* + * should not clear fifo/interrupt as the tune data + * may have alreay come. + */ + msdc_reset_hw(host); if (events & MSDC_INT_RSPCRCERR) { cmd->error = -EILSEQ; host->error |= REQ_CMD_EIO; @@ -865,7 +891,11 @@ static void msdc_start_command(struct msdc_host *host, static void msdc_cmd_next(struct msdc_host *host, struct mmc_request *mrq, struct mmc_command *cmd) { - if (cmd->error || (mrq->sbc && mrq->sbc->error)) + if ((cmd->error && + !(cmd->error == -EILSEQ && + (cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) || + (mrq->sbc && mrq->sbc->error)) msdc_request_done(host, mrq); else if (cmd == mrq->sbc) msdc_start_command(host, mrq, mrq->cmd); @@ -1158,6 +1188,8 @@ static void msdc_init_hw(struct msdc_host *host) /* Configure to default data timeout */ sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); + host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); dev_dbg(host->dev, "init hardware done!"); } @@ -1296,7 +1328,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); u32 rise_delay = 0, fall_delay = 0; - struct msdc_delay_phase final_rise_delay, final_fall_delay; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int cmd_err; int i; @@ -1309,6 +1341,11 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) if (!cmd_err) rise_delay |= (1 << i); } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 10 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { @@ -1318,10 +1355,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) if (!cmd_err) fall_delay |= (1 << i); } - - final_rise_delay = get_best_delay(host, rise_delay); final_fall_delay = get_best_delay(host, fall_delay); +skip_fall: final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); @@ -1342,7 +1378,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); u32 rise_delay = 0, fall_delay = 0; - struct msdc_delay_phase final_rise_delay, final_fall_delay; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int i, ret; @@ -1355,6 +1391,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) if (!ret) rise_delay |= (1 << i); } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 10 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); @@ -1365,14 +1406,10 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) if (!ret) fall_delay |= (1 << i); } - - final_rise_delay = get_best_delay(host, rise_delay); final_fall_delay = get_best_delay(host, fall_delay); +skip_fall: final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); - /* Rising edge is more stable, prefer to use it */ - if (final_rise_delay.maxlen >= 10) - final_maxlen = final_rise_delay.maxlen; if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); @@ -1402,16 +1439,21 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) dev_err(host->dev, "Tune response fail!\n"); return ret; } - ret = msdc_tune_data(mmc, opcode); - if (ret == -EIO) - dev_err(host->dev, "Tune data fail!\n"); + if (host->hs400_mode == false) { + ret = msdc_tune_data(mmc, opcode); + if (ret == -EIO) + dev_err(host->dev, "Tune data fail!\n"); + } + host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); return ret; } static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) { struct msdc_host *host = mmc_priv(mmc); + host->hs400_mode = true; writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); return 0; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 3d1ea5e0e549..fb3ca8296273 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1065,7 +1065,7 @@ static int mxcmci_probe(struct platform_device *pdev) if (pdata) dat3_card_detect = pdata->dat3_card_detect; - else if (!(mmc->caps & MMC_CAP_NONREMOVABLE) + else if (mmc_card_is_removable(mmc) && !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) dat3_card_detect = true; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 93137483ecde..396c9b7e4121 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -38,7 +38,6 @@ struct realtek_pci_sdmmc { struct rtsx_pcr *pcr; struct mmc_host *mmc; struct mmc_request *mrq; - struct workqueue_struct *workq; #define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq" struct work_struct work; @@ -244,7 +243,7 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, stat_idx = sd_status_index(rsp_type); if (rsp_type == SD_RSP_TYPE_R1b) - timeout = 3000; + timeout = cmd->busy_timeout ? cmd->busy_timeout : 3000; if (cmd->opcode == SD_SWITCH_VOLTAGE) { err = rtsx_pci_write_register(pcr, SD_BUS_STAT, @@ -885,7 +884,7 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data)) host->using_cookie = sd_pre_dma_transfer(host, data, false); - queue_work(host->workq, &host->work); + schedule_work(&host->work); } static int sd_set_bus_width(struct realtek_pci_sdmmc *host, @@ -1360,7 +1359,7 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | - MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_ERASE; mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE; mmc->max_current_330 = 400; mmc->max_current_180 = 800; @@ -1404,11 +1403,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) return -ENOMEM; host = mmc_priv(mmc); - host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME); - if (!host->workq) { - mmc_free_host(mmc); - return -ENOMEM; - } host->pcr = pcr; host->mmc = mmc; host->pdev = pdev; @@ -1462,9 +1456,7 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) mmc_remove_host(mmc); host->eject = true; - flush_workqueue(host->workq); - destroy_workqueue(host->workq); - host->workq = NULL; + flush_work(&host->work); mmc_free_host(mmc); diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 39814f3dc96f..c531deef3258 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1365,7 +1365,7 @@ static struct s3c24xx_mci_pdata s3cmci_def_pdata = { .no_detect = 1, }; -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ static int s3cmci_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h index cc2e46cb5c64..30c2c0dd1bc8 100644 --- a/drivers/mmc/host/s3cmci.h +++ b/drivers/mmc/host/s3cmci.h @@ -74,7 +74,7 @@ struct s3cmci_host { struct dentry *debug_regs; #endif -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ struct notifier_block freq_transition; #endif }; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 008709c5cb09..8fe0756c8e1e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -532,11 +532,6 @@ static int sdhci_acpi_resume(struct device *dev) return sdhci_resume_host(c->host); } -#else - -#define sdhci_acpi_suspend NULL -#define sdhci_acpi_resume NULL - #endif #ifdef CONFIG_PM @@ -560,8 +555,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops sdhci_acpi_pm_ops = { - .suspend = sdhci_acpi_suspend, - .resume = sdhci_acpi_resume, + SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume) SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, sdhci_acpi_runtime_resume, NULL) }; diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index 00a8a40a3729..e5c634bdfdd9 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -264,12 +264,12 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev) } dev_dbg(dev, "non-removable=%c\n", - (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N'); + mmc_card_is_removable(host->mmc) ? 'N' : 'Y'); dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n", (mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N', (mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N'); - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if (!mmc_card_is_removable(host->mmc)) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; dev_dbg(dev, "is_8bit=%c\n", @@ -288,7 +288,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev) } /* if device is eMMC, emulate card insert right here */ - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) { + if (!mmc_card_is_removable(host->mmc)) { ret = sdhci_bcm_kona_sd_card_emulate(host, 1); if (ret) { dev_err(dev, @@ -326,7 +326,7 @@ err_pltfm_free: static struct platform_driver sdhci_bcm_kona_driver = { .driver = { .name = "sdhci-kona", - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, .of_match_table = sdhci_bcm_kona_of_match, }, .probe = sdhci_bcm_kona_probe, diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c deleted file mode 100644 index 4a6a1d1386cb..000000000000 --- a/drivers/mmc/host/sdhci-bcm2835.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * BCM2835 SDHCI - * Copyright (C) 2012 Stephen Warren - * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me - * Portions of the code there were obviously based on the Linux kernel at: - * git://github.com/raspberrypi/linux.git rpi-3.6.y - * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/mmc/host.h> -#include "sdhci-pltfm.h" - -/* - * 400KHz is max freq for card ID etc. Use that as min card clock. We need to - * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY. - */ -#define MIN_FREQ 400000 - -/* - * The Arasan has a bugette whereby it may lose the content of successive - * writes to registers that are within two SD-card clock cycles of each other - * (a clock domain crossing problem). It seems, however, that the data - * register does not have this problem, which is just as well - otherwise we'd - * have to nobble the DMA engine too. - * - * This should probably be dynamically calculated based on the actual card - * frequency. However, this is the longest we'll have to wait, and doesn't - * seem to slow access down too much, so the added complexity doesn't seem - * worth it for now. - * - * 1/MIN_FREQ is (max) time per tick of eMMC clock. - * 2/MIN_FREQ is time for two ticks. - * Multiply by 1000000 to get uS per two ticks. - * *1000000 for uSecs. - * +1 for hack rounding. - */ -#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1) - -struct bcm2835_sdhci { - u32 shadow; -}; - -static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg) -{ - writel(val, host->ioaddr + reg); - - udelay(BCM2835_SDHCI_WRITE_DELAY); -} - -static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg) -{ - u32 val = readl(host->ioaddr + reg); - - if (reg == SDHCI_CAPABILITIES) - val |= SDHCI_CAN_VDD_330; - - return val; -} - -static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct bcm2835_sdhci *bcm2835_host = sdhci_pltfm_priv(pltfm_host); - u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow : - bcm2835_sdhci_readl(host, reg & ~3); - u32 word_num = (reg >> 1) & 1; - u32 word_shift = word_num * 16; - u32 mask = 0xffff << word_shift; - u32 newval = (oldval & ~mask) | (val << word_shift); - - if (reg == SDHCI_TRANSFER_MODE) - bcm2835_host->shadow = newval; - else - bcm2835_sdhci_writel(host, newval, reg & ~3); -} - -static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg) -{ - u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); - u32 word_num = (reg >> 1) & 1; - u32 word_shift = word_num * 16; - u32 word = (val >> word_shift) & 0xffff; - - return word; -} - -static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) -{ - u32 oldval = bcm2835_sdhci_readl(host, reg & ~3); - u32 byte_num = reg & 3; - u32 byte_shift = byte_num * 8; - u32 mask = 0xff << byte_shift; - u32 newval = (oldval & ~mask) | (val << byte_shift); - - bcm2835_sdhci_writel(host, newval, reg & ~3); -} - -static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg) -{ - u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); - u32 byte_num = reg & 3; - u32 byte_shift = byte_num * 8; - u32 byte = (val >> byte_shift) & 0xff; - - return byte; -} - -static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host) -{ - return MIN_FREQ; -} - -static const struct sdhci_ops bcm2835_sdhci_ops = { - .write_l = bcm2835_sdhci_writel, - .write_w = bcm2835_sdhci_writew, - .write_b = bcm2835_sdhci_writeb, - .read_l = bcm2835_sdhci_readl, - .read_w = bcm2835_sdhci_readw, - .read_b = bcm2835_sdhci_readb, - .set_clock = sdhci_set_clock, - .get_max_clock = sdhci_pltfm_clk_get_max_clock, - .get_min_clock = bcm2835_sdhci_get_min_clock, - .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, -}; - -static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = { - .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, - .ops = &bcm2835_sdhci_ops, -}; - -static int bcm2835_sdhci_probe(struct platform_device *pdev) -{ - struct sdhci_host *host; - struct bcm2835_sdhci *bcm2835_host; - struct sdhci_pltfm_host *pltfm_host; - int ret; - - host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, - sizeof(*bcm2835_host)); - if (IS_ERR(host)) - return PTR_ERR(host); - - pltfm_host = sdhci_priv(host); - - pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pltfm_host->clk)) { - ret = PTR_ERR(pltfm_host->clk); - goto err; - } - ret = clk_prepare_enable(pltfm_host->clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable host clk\n"); - goto err; - } - - ret = sdhci_add_host(host); - if (ret) - goto err_clk; - - return 0; -err_clk: - clk_disable_unprepare(pltfm_host->clk); -err: - sdhci_pltfm_free(pdev); - return ret; -} - -static const struct of_device_id bcm2835_sdhci_of_match[] = { - { .compatible = "brcm,bcm2835-sdhci" }, - { } -}; -MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match); - -static struct platform_driver bcm2835_sdhci_driver = { - .driver = { - .name = "sdhci-bcm2835", - .of_match_table = bcm2835_sdhci_of_match, - .pm = SDHCI_PLTFM_PMOPS, - }, - .probe = bcm2835_sdhci_probe, - .remove = sdhci_pltfm_unregister, -}; -module_platform_driver(bcm2835_sdhci_driver); - -MODULE_DESCRIPTION("BCM2835 SDHCI driver"); -MODULE_AUTHOR("Stephen Warren"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c new file mode 100644 index 000000000000..cce10fe3e19e --- /dev/null +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -0,0 +1,143 @@ +/* + * sdhci-brcmstb.c Support for SDHCI on Broadcom BRCMSTB SoC's + * + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/io.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> + +#include "sdhci-pltfm.h" + +#ifdef CONFIG_PM_SLEEP + +static int sdhci_brcmstb_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int res; + + res = sdhci_suspend_host(host); + if (res) + return res; + clk_disable_unprepare(pltfm_host->clk); + return res; +} + +static int sdhci_brcmstb_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int err; + + err = clk_prepare_enable(pltfm_host->clk); + if (err) + return err; + return sdhci_resume_host(host); +} + +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend, + sdhci_brcmstb_resume); + +static const struct sdhci_ops sdhci_brcmstb_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_brcmstb_pdata = { + .ops = &sdhci_brcmstb_ops, +}; + +static int sdhci_brcmstb_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct clk *clk; + int res; + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Clock not found in Device Tree\n"); + clk = NULL; + } + res = clk_prepare_enable(clk); + if (res) + return res; + + host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0); + if (IS_ERR(host)) { + res = PTR_ERR(host); + goto err_clk; + } + + /* Enable MMC_CAP2_HC_ERASE_SZ for better max discard calculations */ + host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; + + sdhci_get_of_property(pdev); + mmc_of_parse(host->mmc); + + /* + * Supply the existing CAPS, but clear the UHS modes. This + * will allow these modes to be specified by device tree + * properties through mmc_of_parse(). + */ + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + + res = sdhci_add_host(host); + if (res) + goto err; + + pltfm_host = sdhci_priv(host); + pltfm_host->clk = clk; + return res; + +err: + sdhci_pltfm_free(pdev); +err_clk: + clk_disable_unprepare(clk); + return res; +} + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); + +static struct platform_driver sdhci_brcmstb_driver = { + .driver = { + .name = "sdhci-brcmstb", + .owner = THIS_MODULE, + .pm = &sdhci_brcmstb_pmops, + .of_match_table = of_match_ptr(sdhci_brcm_of_match), + }, + .probe = sdhci_brcmstb_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_brcmstb_driver); + +MODULE_DESCRIPTION("SDHCI driver for Broadcom BRCMSTB SoCs"); +MODULE_AUTHOR("Broadcom"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c index 59f2923f8054..bd286db7f9af 100644 --- a/drivers/mmc/host/sdhci-cns3xxx.c +++ b/drivers/mmc/host/sdhci-cns3xxx.c @@ -101,7 +101,7 @@ static int sdhci_cns3xxx_probe(struct platform_device *pdev) static struct platform_driver sdhci_cns3xxx_driver = { .driver = { .name = "sdhci-cns3xxx", - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_cns3xxx_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c index 407c21f152b2..de9f9603dbdc 100644 --- a/drivers/mmc/host/sdhci-dove.c +++ b/drivers/mmc/host/sdhci-dove.c @@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); static struct platform_driver sdhci_dove_driver = { .driver = { .name = "sdhci-dove", - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, .of_match_table = sdhci_dove_of_match_table, }, .probe = sdhci_dove_probe, diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 9d3ae1f4bd3c..99e0b334f9df 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -39,11 +39,13 @@ #define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) #define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) #define ESDHC_WTMK_LVL 0x44 +#define ESDHC_WTMK_DEFAULT_VAL 0x10401040 #define ESDHC_MIX_CTRL 0x48 #define ESDHC_MIX_CTRL_DDREN (1 << 3) #define ESDHC_MIX_CTRL_AC23EN (1 << 7) #define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) #define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) +#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24) #define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) #define ESDHC_MIX_CTRL_HS400_EN (1 << 26) /* Bits 3 and 6 are not SDHCI standard definitions */ @@ -75,7 +77,8 @@ #define ESDHC_TUNING_CTRL 0xcc #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ -#define ESDHC_TUNING_START_TAP 0x1 +#define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +#define ESDHC_TUNING_START_TAP_MASK 0xff #define ESDHC_TUNING_STEP_MASK 0x00070000 #define ESDHC_TUNING_STEP_SHIFT 16 @@ -299,7 +302,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) /* imx6q/dl does not have cap_1 register, fake one */ val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 - | SDHCI_USE_SDR50_TUNING; + | SDHCI_USE_SDR50_TUNING + | (SDHCI_TUNING_MODE_3 << SDHCI_RETUNING_MODE_SHIFT); if (imx_data->socdata->flags & ESDHC_FLAG_HS400) val |= SDHCI_SUPPORT_HS400; @@ -469,32 +473,29 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); - if (val & SDHCI_CTRL_TUNED_CLK) + if (val & SDHCI_CTRL_TUNED_CLK) { new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; - else + new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } else { new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); - u32 tuning_ctrl; if (val & SDHCI_CTRL_TUNED_CLK) { v |= ESDHC_MIX_CTRL_SMPCLK_SEL; } else { v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; } if (val & SDHCI_CTRL_EXEC_TUNING) { v |= ESDHC_MIX_CTRL_EXE_TUNE; m |= ESDHC_MIX_CTRL_FBCLK_SEL; - tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL); - tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP; - if (imx_data->boarddata.tuning_step) { - tuning_ctrl &= ~ESDHC_TUNING_STEP_MASK; - tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT; - } - writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL); + m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; } else { v &= ~ESDHC_MIX_CTRL_EXE_TUNE; } @@ -751,6 +752,7 @@ static void esdhc_post_tuning(struct sdhci_host *host) reg = readl(host->ioaddr + ESDHC_MIX_CTRL); reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; + reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; writel(reg, host->ioaddr + ESDHC_MIX_CTRL); } @@ -838,6 +840,11 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host) u32 v; if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) { + /* disable clock before enabling strobe dll */ + writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & + ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + /* force a reset on strobe dll */ writel(ESDHC_STROBE_DLL_CTRL_RESET, host->ioaddr + ESDHC_STROBE_DLL_CTRL); @@ -899,6 +906,8 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN; writel(m, host->ioaddr + ESDHC_MIX_CTRL); imx_data->is_ddr = 1; + /* update clock after enable DDR for strobe DLL lock */ + host->ops->set_clock(host, host->clock); esdhc_set_strobe_dll(host); break; } @@ -957,6 +966,62 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .ops = &sdhci_esdhc_ops, }; +static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + int tmp; + + if (esdhc_is_usdhc(imx_data)) { + /* + * The imx6q ROM code will change the default watermark + * level setting to something insane. Change it back here. + */ + writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL); + + /* + * ROM code will change the bit burst_length_enable setting + * to zero if this usdhc is choosed to boot system. Change + * it back here, otherwise it will impact the performance a + * lot. This bit is used to enable/disable the burst length + * for the external AHB2AXI bridge, it's usefully especially + * for INCR transfer because without burst length indicator, + * the AHB2AXI bridge does not know the burst length in + * advance. And without burst length indicator, AHB INCR + * transfer can only be converted to singles on the AXI side. + */ + writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) + | ESDHC_BURST_LEN_EN_INCR, + host->ioaddr + SDHCI_HOST_CONTROL); + /* + * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) | BIT(7), + host->ioaddr + 0x6c); + + /* disable DLL_CTRL delay line settings */ + writel(0x0, host->ioaddr + ESDHC_DLL_CTRL); + + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); + tmp |= ESDHC_STD_TUNING_EN | + ESDHC_TUNING_START_TAP_DEFAULT; + if (imx_data->boarddata.tuning_start_tap) { + tmp &= ~ESDHC_TUNING_START_TAP_MASK; + tmp |= imx_data->boarddata.tuning_start_tap; + } + + if (imx_data->boarddata.tuning_step) { + tmp &= ~ESDHC_TUNING_STEP_MASK; + tmp |= imx_data->boarddata.tuning_step + << ESDHC_TUNING_STEP_SHIFT; + } + writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); + } + } +} + #ifdef CONFIG_OF static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, @@ -975,6 +1040,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, boarddata->wp_type = ESDHC_WP_GPIO; of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step); + of_property_read_u32(np, "fsl,tuning-start-tap", + &boarddata->tuning_start_tap); if (of_find_property(np, "no-1-8-v", NULL)) boarddata->support_vsel = false; @@ -1147,58 +1214,27 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (IS_ERR(imx_data->pins_default)) dev_warn(mmc_dev(host->mmc), "could not get default state\n"); - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207) /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK | SDHCI_QUIRK_BROKEN_ADMA; - /* - * The imx6q ROM code will change the default watermark level setting - * to something insane. Change it back here. - */ if (esdhc_is_usdhc(imx_data)) { - writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL); - host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR; - - /* - * ROM code will change the bit burst_length_enable setting - * to zero if this usdhc is choosed to boot system. Change - * it back here, otherwise it will impact the performance a - * lot. This bit is used to enable/disable the burst length - * for the external AHB2AXI bridge, it's usefully especially - * for INCR transfer because without burst length indicator, - * the AHB2AXI bridge does not know the burst length in - * advance. And without burst length indicator, AHB INCR - * transfer can only be converted to singles on the AXI side. - */ - writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) - | ESDHC_BURST_LEN_EN_INCR, - host->ioaddr + SDHCI_HOST_CONTROL); - if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; - /* - * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL - * TO1.1, it's harmless for MX6SL - */ - writel(readl(host->ioaddr + 0x6c) | BIT(7), - host->ioaddr + 0x6c); + /* clear tuning bits in case ROM has set it already */ + writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); + writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR); + writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); } if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; - if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) - writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) | - ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP, - host->ioaddr + ESDHC_TUNING_CTRL); - if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; @@ -1212,6 +1248,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (err) goto disable_clk; + sdhci_esdhc_imx_hwinit(host); + err = sdhci_add_host(host); if (err) goto disable_clk; @@ -1255,6 +1293,25 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int sdhci_esdhc_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + return sdhci_suspend_host(host); +} + +static int sdhci_esdhc_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + /* re-initialize hw state in case it's lost in low power mode */ + sdhci_esdhc_imx_hwinit(host); + + return sdhci_resume_host(host); +} +#endif + #ifdef CONFIG_PM static int sdhci_esdhc_runtime_suspend(struct device *dev) { @@ -1291,7 +1348,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops sdhci_esdhc_pmops = { - SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume) + SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume) SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, sdhci_esdhc_runtime_resume, NULL) }; diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 1110f73b08aa..726246665850 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -164,8 +164,17 @@ static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { static const struct sdhci_iproc_data iproc_data = { .pdata = &sdhci_iproc_pltfm_data, - .caps = 0x05E90000, - .caps1 = 0x00000064, + .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT) + & SDHCI_MAX_BLOCK_MASK) | + SDHCI_CAN_VDD_330 | + SDHCI_CAN_VDD_180 | + SDHCI_CAN_DO_SUSPEND | + SDHCI_CAN_DO_HISPD | + SDHCI_CAN_DO_ADMA2 | + SDHCI_CAN_DO_SDMA, + .caps1 = SDHCI_DRIVER_TYPE_C | + SDHCI_DRIVER_TYPE_D | + SDHCI_SUPPORT_DDR50, .mmc_caps = MMC_CAP_1_8V_DDR, }; @@ -251,7 +260,7 @@ static struct platform_driver sdhci_iproc_driver = { .driver = { .name = "sdhci-iproc", .of_match_table = sdhci_iproc_of_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_iproc_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 0653fe730150..8ef44a2a2fd9 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -32,6 +32,21 @@ #define CORE_POWER 0x0 #define CORE_SW_RST BIT(7) +#define CORE_PWRCTL_STATUS 0xdc +#define CORE_PWRCTL_MASK 0xe0 +#define CORE_PWRCTL_CLEAR 0xe4 +#define CORE_PWRCTL_CTL 0xe8 +#define CORE_PWRCTL_BUS_OFF BIT(0) +#define CORE_PWRCTL_BUS_ON BIT(1) +#define CORE_PWRCTL_IO_LOW BIT(2) +#define CORE_PWRCTL_IO_HIGH BIT(3) +#define CORE_PWRCTL_BUS_SUCCESS BIT(0) +#define CORE_PWRCTL_IO_SUCCESS BIT(2) +#define REQ_BUS_OFF BIT(0) +#define REQ_BUS_ON BIT(1) +#define REQ_IO_LOW BIT(2) +#define REQ_IO_HIGH BIT(3) +#define INT_MASK 0xf #define MAX_PHASES 16 #define CORE_DLL_LOCK BIT(7) #define CORE_DLL_EN BIT(16) @@ -56,6 +71,7 @@ struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ + int pwr_irq; /* power irq */ struct clk *clk; /* main SD/MMC bus clock */ struct clk *pclk; /* SDHC peripheral bus clock */ struct clk *bus_clk; /* SDHC bus voter clock */ @@ -410,6 +426,85 @@ retry: return rc; } +static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, + unsigned int uhs) +{ + struct mmc_host *mmc = host->mmc; + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + break; + } + + /* + * When clock frequency is less than 100MHz, the feedback clock must be + * provided and DLL must not be used so that tuning can be skipped. To + * provide feedback clock, the mode selection can be any value less + * than 3'b011 in bits [2:0] of HOST CONTROL2 register. + */ + if (host->clock <= 100000000 && + (uhs == MMC_TIMING_MMC_HS400 || + uhs == MMC_TIMING_MMC_HS200 || + uhs == MMC_TIMING_UHS_SDR104)) + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + + dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", + mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_msm_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 irq_status, irq_ack = 0; + + irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS); + irq_status &= INT_MASK; + + writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR); + + if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF)) + irq_ack |= CORE_PWRCTL_BUS_SUCCESS; + if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH)) + irq_ack |= CORE_PWRCTL_IO_SUCCESS; + + /* + * The driver has to acknowledge the interrupt, switch voltages and + * report back if it succeded or not to this register. The voltage + * switches are handled by the sdhci core, so just report success. + */ + writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL); +} + +static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) +{ + struct sdhci_host *host = (struct sdhci_host *)data; + + sdhci_msm_voltage_switch(host); + + return IRQ_HANDLED; +} + static const struct of_device_id sdhci_msm_dt_match[] = { { .compatible = "qcom,sdhci-msm-v4" }, {}, @@ -422,11 +517,13 @@ static const struct sdhci_ops sdhci_msm_ops = { .reset = sdhci_reset, .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, - .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_uhs_signaling = sdhci_msm_set_uhs_signaling, + .voltage_switch = sdhci_msm_voltage_switch, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_NO_CARD_NO_RESET | SDHCI_QUIRK_SINGLE_POWER_WRITE, .ops = &sdhci_msm_ops, }; @@ -473,7 +570,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(msm_host->pclk)) { ret = PTR_ERR(msm_host->pclk); - dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret); + dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); goto bus_clk_disable; } @@ -545,6 +642,22 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* Setup IRQ for handling power/voltage tasks with PMIC */ + msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); + if (msm_host->pwr_irq < 0) { + dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", + msm_host->pwr_irq); + goto clk_disable; + } + + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, + sdhci_msm_pwr_irq, IRQF_ONESHOT, + dev_name(&pdev->dev), host); + if (ret) { + dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); + goto clk_disable; + } + ret = sdhci_add_host(host); if (ret) goto clk_disable; diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index b6f4c1d41636..e0f193f7e3e5 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -19,27 +19,136 @@ * your option) any later version. */ +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/phy/phy.h> +#include <linux/regmap.h> #include "sdhci-pltfm.h" #define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c +#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 +#define VENDOR_ENHANCED_STROBE BIT(0) #define CLK_CTRL_TIMEOUT_SHIFT 16 #define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) #define CLK_CTRL_TIMEOUT_MIN_EXP 13 +/* + * On some SoCs the syscon area has a feature where the upper 16-bits of + * each 32-bit register act as a write mask for the lower 16-bits. This allows + * atomic updates of the register without locking. This macro is used on SoCs + * that have that feature. + */ +#define HIWORD_UPDATE(val, mask, shift) \ + ((val) << (shift) | (mask) << ((shift) + 16)) + +/** + * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map + * + * @reg: Offset within the syscon of the register containing this field + * @width: Number of bits for this field + * @shift: Bit offset within @reg of this field (or -1 if not avail) + */ +struct sdhci_arasan_soc_ctl_field { + u32 reg; + u16 width; + s16 shift; +}; + +/** + * struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers + * + * It's up to the licensee of the Arsan IP block to make these available + * somewhere if needed. Presumably these will be scattered somewhere that's + * accessible via the syscon API. + * + * @baseclkfreq: Where to find corecfg_baseclkfreq + * @hiword_update: If true, use HIWORD_UPDATE to access the syscon + */ +struct sdhci_arasan_soc_ctl_map { + struct sdhci_arasan_soc_ctl_field baseclkfreq; + bool hiword_update; +}; + /** * struct sdhci_arasan_data - * @clk_ahb: Pointer to the AHB clock - * @phy: Pointer to the generic phy + * @host: Pointer to the main SDHCI host structure. + * @clk_ahb: Pointer to the AHB clock + * @phy: Pointer to the generic phy + * @sdcardclk_hw: Struct for the clock we might provide to a PHY. + * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw. + * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers. + * @soc_ctl_map: Map to get offsets into soc_ctl registers. */ struct sdhci_arasan_data { + struct sdhci_host *host; struct clk *clk_ahb; struct phy *phy; + + struct clk_hw sdcardclk_hw; + struct clk *sdcardclk; + + struct regmap *soc_ctl_base; + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; +}; + +static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { + .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 }, + .hiword_update = true, }; +/** + * sdhci_arasan_syscon_write - Write to a field in soc_ctl registers + * + * This function allows writing to fields in sdhci_arasan_soc_ctl_map. + * Note that if a field is specified as not available (shift < 0) then + * this function will silently return an error code. It will be noisy + * and print errors for any other (unexpected) errors. + * + * @host: The sdhci_host + * @fld: The field to write to + * @val: The value to write + */ +static int sdhci_arasan_syscon_write(struct sdhci_host *host, + const struct sdhci_arasan_soc_ctl_field *fld, + u32 val) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct regmap *soc_ctl_base = sdhci_arasan->soc_ctl_base; + u32 reg = fld->reg; + u16 width = fld->width; + s16 shift = fld->shift; + int ret; + + /* + * Silently return errors for shift < 0 so caller doesn't have + * to check for fields which are optional. For fields that + * are required then caller needs to do something special + * anyway. + */ + if (shift < 0) + return -EINVAL; + + if (sdhci_arasan->soc_ctl_map->hiword_update) + ret = regmap_write(soc_ctl_base, reg, + HIWORD_UPDATE(val, GENMASK(width, 0), + shift)); + else + ret = regmap_update_bits(soc_ctl_base, reg, + GENMASK(shift + width, shift), + val << shift); + + /* Yell about (unexpected) regmap errors */ + if (ret) + pr_warn("%s: Regmap write fail: %d\n", + mmc_hostname(host->mmc), ret); + + return ret; +} + static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) { u32 div; @@ -79,6 +188,21 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) } } +static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + u32 vendor; + struct sdhci_host *host = mmc_priv(mmc); + + vendor = readl(host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); + if (ios->enhanced_strobe) + vendor |= VENDOR_ENHANCED_STROBE; + else + vendor &= ~VENDOR_ENHANCED_STROBE; + + writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); +} + static struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, @@ -172,9 +296,168 @@ static int sdhci_arasan_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend, sdhci_arasan_resume); +static const struct of_device_id sdhci_arasan_of_match[] = { + /* SoC-specific compatible strings w/ soc_ctl_map */ + { + .compatible = "rockchip,rk3399-sdhci-5.1", + .data = &rk3399_soc_ctl_map, + }, + + /* Generic compatible below here */ + { .compatible = "arasan,sdhci-8.9a" }, + { .compatible = "arasan,sdhci-5.1" }, + { .compatible = "arasan,sdhci-4.9a" }, + + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); + +/** + * sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate + * + * Return the current actual rate of the SD card clock. This can be used + * to communicate with out PHY. + * + * @hw: Pointer to the hardware clock structure. + * @parent_rate The parent rate (should be rate of clk_xin). + * Returns the card clock rate. + */ +static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + +{ + struct sdhci_arasan_data *sdhci_arasan = + container_of(hw, struct sdhci_arasan_data, sdcardclk_hw); + struct sdhci_host *host = sdhci_arasan->host; + + return host->mmc->actual_clock; +} + +static const struct clk_ops arasan_sdcardclk_ops = { + .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate, +}; + +/** + * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq + * + * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This + * function can be used to make that happen. + * + * NOTES: + * - Many existing devices don't seem to do this and work fine. To keep + * compatibility for old hardware where the device tree doesn't provide a + * register map, this function is a noop if a soc_ctl_map hasn't been provided + * for this platform. + * - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider + * to achieve lower clock rates. That means that this function is called once + * at probe time and never called again. + * + * @host: The sdhci_host + */ +static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map = + sdhci_arasan->soc_ctl_map; + u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000); + + /* Having a map is optional */ + if (!soc_ctl_map) + return; + + /* If we have a map, we expect to have a syscon */ + if (!sdhci_arasan->soc_ctl_base) { + pr_warn("%s: Have regmap, but no soc-ctl-syscon\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz); +} + +/** + * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use + * + * Some PHY devices need to know what the actual card clock is. In order for + * them to find out, we'll provide a clock through the common clock framework + * for them to query. + * + * Note: without seriously re-architecting SDHCI's clock code and testing on + * all platforms, there's no way to create a totally beautiful clock here + * with all clock ops implemented. Instead, we'll just create a clock that can + * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock + * framework that we're doing things behind its back. This should be sufficient + * to create nice clean device tree bindings and later (if needed) we can try + * re-architecting SDHCI if we see some benefit to it. + * + * @sdhci_arasan: Our private data structure. + * @clk_xin: Pointer to the functional clock + * @dev: Pointer to our struct device. + * Returns 0 on success and error value on error + */ +static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan, + struct clk *clk_xin, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct clk_init_data sdcardclk_init; + const char *parent_clk_name; + int ret; + + /* Providing a clock to the PHY is optional; no error if missing */ + if (!of_find_property(np, "#clock-cells", NULL)) + return 0; + + ret = of_property_read_string_index(np, "clock-output-names", 0, + &sdcardclk_init.name); + if (ret) { + dev_err(dev, "DT has #clock-cells but no clock-output-names\n"); + return ret; + } + + parent_clk_name = __clk_get_name(clk_xin); + sdcardclk_init.parent_names = &parent_clk_name; + sdcardclk_init.num_parents = 1; + sdcardclk_init.flags = CLK_GET_RATE_NOCACHE; + sdcardclk_init.ops = &arasan_sdcardclk_ops; + + sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init; + sdhci_arasan->sdcardclk = + devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw); + sdhci_arasan->sdcardclk_hw.init = NULL; + + ret = of_clk_add_provider(np, of_clk_src_simple_get, + sdhci_arasan->sdcardclk); + if (ret) + dev_err(dev, "Failed to add clock provider\n"); + + return ret; +} + +/** + * sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk() + * + * Should be called any time we're exiting and sdhci_arasan_register_sdclk() + * returned success. + * + * @dev: Pointer to our struct device. + */ +static void sdhci_arasan_unregister_sdclk(struct device *dev) +{ + struct device_node *np = dev->of_node; + + if (!of_find_property(np, "#clock-cells", NULL)) + return; + + of_clk_del_provider(dev->of_node); +} + static int sdhci_arasan_probe(struct platform_device *pdev) { int ret; + const struct of_device_id *match; + struct device_node *node; struct clk *clk_xin; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; @@ -187,6 +470,24 @@ static int sdhci_arasan_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + sdhci_arasan->host = host; + + match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node); + sdhci_arasan->soc_ctl_map = match->data; + + node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0); + if (node) { + sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node); + of_node_put(node); + + if (IS_ERR(sdhci_arasan->soc_ctl_base)) { + ret = PTR_ERR(sdhci_arasan->soc_ctl_base); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Can't get syscon: %d\n", + ret); + goto err_pltfm_free; + } + } sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb"); if (IS_ERR(sdhci_arasan->clk_ahb)) { @@ -217,10 +518,16 @@ static int sdhci_arasan_probe(struct platform_device *pdev) sdhci_get_of_property(pdev); pltfm_host->clk = clk_xin; + sdhci_arasan_update_baseclkfreq(host); + + ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev); + if (ret) + goto clk_disable_all; + ret = mmc_of_parse(host->mmc); if (ret) { dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret); - goto clk_disable_all; + goto unreg_clk; } sdhci_arasan->phy = ERR_PTR(-ENODEV); @@ -231,13 +538,13 @@ static int sdhci_arasan_probe(struct platform_device *pdev) if (IS_ERR(sdhci_arasan->phy)) { ret = PTR_ERR(sdhci_arasan->phy); dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n"); - goto clk_disable_all; + goto unreg_clk; } ret = phy_init(sdhci_arasan->phy); if (ret < 0) { dev_err(&pdev->dev, "phy_init err.\n"); - goto clk_disable_all; + goto unreg_clk; } ret = phy_power_on(sdhci_arasan->phy); @@ -245,6 +552,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev) dev_err(&pdev->dev, "phy_power_on err.\n"); goto err_phy_power; } + + host->mmc_host_ops.hs400_enhanced_strobe = + sdhci_arasan_hs400_enhanced_strobe; } ret = sdhci_add_host(host); @@ -259,6 +569,8 @@ err_add_host: err_phy_power: if (!IS_ERR(sdhci_arasan->phy)) phy_exit(sdhci_arasan->phy); +unreg_clk: + sdhci_arasan_unregister_sdclk(&pdev->dev); clk_disable_all: clk_disable_unprepare(clk_xin); clk_dis_ahb: @@ -281,6 +593,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev) phy_exit(sdhci_arasan->phy); } + sdhci_arasan_unregister_sdclk(&pdev->dev); + ret = sdhci_pltfm_unregister(pdev); clk_disable_unprepare(clk_ahb); @@ -288,14 +602,6 @@ static int sdhci_arasan_remove(struct platform_device *pdev) return ret; } -static const struct of_device_id sdhci_arasan_of_match[] = { - { .compatible = "arasan,sdhci-8.9a" }, - { .compatible = "arasan,sdhci-5.1" }, - { .compatible = "arasan,sdhci-4.9a" }, - { } -}; -MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); - static struct platform_driver sdhci_arasan_driver = { .driver = { .name = "sdhci-arasan", diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d4cef713d246..a9b7fc06c434 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -288,7 +288,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) * Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries * to enable polling via device tree with broken-cd property. */ - if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + if (mmc_card_is_removable(host->mmc) && mmc_gpio_get_cd(host->mmc) < 0) { host->mmc->caps |= MMC_CAP_NEEDS_POLL; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 3f34d354f1fc..239be2fde242 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -481,7 +481,7 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static u32 esdhc_proctl; static int esdhc_of_suspend(struct device *dev) { @@ -504,16 +504,12 @@ static int esdhc_of_resume(struct device *dev) } return ret; } - -static const struct dev_pm_ops esdhc_pmops = { - .suspend = esdhc_of_suspend, - .resume = esdhc_of_resume, -}; -#define ESDHC_PMOPS (&esdhc_pmops) -#else -#define ESDHC_PMOPS NULL #endif +static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops, + esdhc_of_suspend, + esdhc_of_resume); + static const struct sdhci_ops sdhci_esdhc_be_ops = { .read_l = esdhc_be_readl, .read_w = esdhc_be_readw, @@ -657,7 +653,7 @@ static struct platform_driver sdhci_esdhc_driver = { .driver = { .name = "sdhci-esdhc", .of_match_table = sdhci_esdhc_of_match, - .pm = ESDHC_PMOPS, + .pm = &esdhc_of_dev_pm_ops, }, .probe = sdhci_esdhc_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 4079a96ad37e..ac00c5efb2a3 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -85,7 +85,7 @@ static struct platform_driver sdhci_hlwd_driver = { .driver = { .name = "sdhci-hlwd", .of_match_table = sdhci_hlwd_of_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_hlwd_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index a4dbf7421edc..897cfd24ca2e 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -419,13 +419,13 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { }; /* Define Host controllers for Intel Merrifield platform */ -#define INTEL_MRFL_EMMC_0 0 -#define INTEL_MRFL_EMMC_1 1 +#define INTEL_MRFLD_EMMC_0 0 +#define INTEL_MRFLD_EMMC_1 1 -static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) +static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) { - if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && - (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) + if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_0) && + (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_1)) /* SD support is not ready yet */ return -ENODEV; @@ -435,12 +435,12 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) return 0; } -static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { +static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .allow_runtime_pm = true, - .probe_slot = intel_mrfl_mmc_probe_slot, + .probe_slot = intel_mrfld_mmc_probe_slot, }; /* O2Micro extra registers */ @@ -1104,10 +1104,10 @@ static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_INTEL, - .device = PCI_DEVICE_ID_INTEL_MRFL_MMC, + .device = PCI_DEVICE_ID_INTEL_MRFLD_MMC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrfld_mmc, }, { @@ -1413,8 +1413,7 @@ static const struct sdhci_ops sdhci_pci_ops = { * * \*****************************************************************************/ -#ifdef CONFIG_PM - +#ifdef CONFIG_PM_SLEEP static int sdhci_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -1496,7 +1495,9 @@ static int sdhci_pci_resume(struct device *dev) return 0; } +#endif +#ifdef CONFIG_PM static int sdhci_pci_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -1562,17 +1563,10 @@ static int sdhci_pci_runtime_resume(struct device *dev) return 0; } - -#else /* CONFIG_PM */ - -#define sdhci_pci_suspend NULL -#define sdhci_pci_resume NULL - -#endif /* CONFIG_PM */ +#endif static const struct dev_pm_ops sdhci_pci_pm_ops = { - .suspend = sdhci_pci_suspend, - .resume = sdhci_pci_resume, + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume) SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend, sdhci_pci_runtime_resume, NULL) }; @@ -1760,11 +1754,12 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) static void sdhci_pci_runtime_pm_allow(struct device *dev) { - pm_runtime_put_noidle(dev); - pm_runtime_allow(dev); + pm_suspend_ignore_children(dev, 1); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); - pm_suspend_ignore_children(dev, 1); + pm_runtime_allow(dev); + /* Stay active until mmc core scans for a card */ + pm_runtime_put_noidle(dev); } static void sdhci_pci_runtime_pm_forbid(struct device *dev) @@ -1810,15 +1805,13 @@ static int sdhci_pci_probe(struct pci_dev *pdev, return -ENODEV; } - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; - chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); - if (!chip) { - ret = -ENOMEM; - goto err; - } + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; chip->pdev = pdev; chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; @@ -1834,7 +1827,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev, if (chip->fixes && chip->fixes->probe) { ret = chip->fixes->probe(chip); if (ret) - goto free; + return ret; } slots = chip->num_slots; /* Quirk may have changed this */ @@ -1844,8 +1837,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev, if (IS_ERR(slot)) { for (i--; i >= 0; i--) sdhci_pci_remove_slot(chip->slots[i]); - ret = PTR_ERR(slot); - goto free; + return PTR_ERR(slot); } chip->slots[i] = slot; @@ -1855,35 +1847,18 @@ static int sdhci_pci_probe(struct pci_dev *pdev, sdhci_pci_runtime_pm_allow(&pdev->dev); return 0; - -free: - pci_set_drvdata(pdev, NULL); - kfree(chip); - -err: - pci_disable_device(pdev); - return ret; } static void sdhci_pci_remove(struct pci_dev *pdev) { int i; - struct sdhci_pci_chip *chip; + struct sdhci_pci_chip *chip = pci_get_drvdata(pdev); - chip = pci_get_drvdata(pdev); - - if (chip) { - if (chip->allow_runtime_pm) - sdhci_pci_runtime_pm_forbid(&pdev->dev); - - for (i = 0; i < chip->num_slots; i++) - sdhci_pci_remove_slot(chip->slots[i]); - - pci_set_drvdata(pdev, NULL); - kfree(chip); - } + if (chip->allow_runtime_pm) + sdhci_pci_runtime_pm_forbid(&pdev->dev); - pci_disable_device(pdev); + for (i = 0; i < chip->num_slots; i++) + sdhci_pci_remove_slot(chip->slots[i]); } static struct pci_driver sdhci_driver = { diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 89e7151684a1..7e0788712e1a 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -14,7 +14,7 @@ #define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294 #define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295 #define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296 -#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190 #define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 #define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa #define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 64f287a03cd3..1d17dcfc3ffb 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -215,29 +215,26 @@ int sdhci_pltfm_unregister(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); -#ifdef CONFIG_PM -int sdhci_pltfm_suspend(struct device *dev) +#ifdef CONFIG_PM_SLEEP +static int sdhci_pltfm_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); return sdhci_suspend_host(host); } -EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); -int sdhci_pltfm_resume(struct device *dev) +static int sdhci_pltfm_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); return sdhci_resume_host(host); } -EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); +#endif const struct dev_pm_ops sdhci_pltfm_pmops = { - .suspend = sdhci_pltfm_suspend, - .resume = sdhci_pltfm_resume, + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume) }; EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops); -#endif /* CONFIG_PM */ static int __init sdhci_pltfm_drv_init(void) { diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index d38053bf9e4d..3280f2077959 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -109,13 +109,6 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) return (void *)host->private; } -#ifdef CONFIG_PM -extern int sdhci_pltfm_suspend(struct device *dev); -extern int sdhci_pltfm_resume(struct device *dev); extern const struct dev_pm_ops sdhci_pltfm_pmops; -#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) -#else -#define SDHCI_PLTFM_PMOPS NULL -#endif #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index 1d8dd3540636..347eae2d7b6a 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -252,7 +252,7 @@ static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", .of_match_table = of_match_ptr(sdhci_pxav2_of_match), - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_pxav2_probe, .remove = sdhci_pxav2_remove, diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 30132500aa1c..dd1938d341f7 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -583,24 +583,17 @@ static int sdhci_pxav3_runtime_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static const struct dev_pm_ops sdhci_pxav3_pmops = { SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume) SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, sdhci_pxav3_runtime_resume, NULL) }; -#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops) - -#else -#define SDHCI_PXAV3_PMOPS NULL -#endif - static struct platform_driver sdhci_pxav3_driver = { .driver = { .name = "sdhci-pxav3", .of_match_table = of_match_ptr(sdhci_pxav3_of_match), - .pm = SDHCI_PXAV3_PMOPS, + .pm = &sdhci_pxav3_pmops, }, .probe = sdhci_pxav3_probe, .remove = sdhci_pxav3_remove, diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 70c724bc6fc7..784c5a848fb4 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -714,19 +714,12 @@ static int sdhci_s3c_runtime_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static const struct dev_pm_ops sdhci_s3c_pmops = { SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume) SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, NULL) }; -#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops) - -#else -#define SDHCI_S3C_PMOPS NULL -#endif - #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { .no_divider = true, @@ -765,7 +758,7 @@ static struct platform_driver sdhci_s3c_driver = { .driver = { .name = "s3c-sdhci", .of_match_table = of_match_ptr(sdhci_s3c_dt_match), - .pm = SDHCI_S3C_PMOPS, + .pm = &sdhci_s3c_pmops, }, }; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index 34866f668dd7..5d068639dd3f 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -260,9 +260,9 @@ static int sdhci_sirf_resume(struct device *dev) return sdhci_resume_host(host); } +#endif static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume); -#endif static const struct of_device_id sdhci_sirf_of_match[] = { { .compatible = "sirf,prima2-sdhc" }, @@ -274,9 +274,7 @@ static struct platform_driver sdhci_sirf_driver = { .driver = { .name = "sdhci-sirf", .of_match_table = sdhci_sirf_of_match, -#ifdef CONFIG_PM_SLEEP .pm = &sdhci_sirf_pm_ops, -#endif }, .probe = sdhci_sirf_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c index 320e1c2f8853..c95ba83366a0 100644 --- a/drivers/mmc/host/sdhci-st.c +++ b/drivers/mmc/host/sdhci-st.c @@ -183,7 +183,7 @@ static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host) writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2); - if (mhost->caps & MMC_CAP_NONREMOVABLE) + if (!mmc_card_is_removable(mhost)) cconf3 |= ST_MMC_CCONFIG_EMMC_SLOT_TYPE; else /* CARD _D ET_CTRL */ diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index bcc0de47fe7e..1e93dc4e303e 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -148,28 +148,37 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) return; misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); - /* Erratum: Enable SDHCI spec v3.00 support */ - if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) - misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; - /* Advertise UHS modes as supported by host */ - if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) - misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; - else - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50; - if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) - misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; - else - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50; - if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) - misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; - else - misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104; - sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); - clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | + SDHCI_MISC_CTRL_ENABLE_SDR50 | + SDHCI_MISC_CTRL_ENABLE_DDR50 | + SDHCI_MISC_CTRL_ENABLE_SDR104); + clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE; - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) - clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; + + /* + * If the board does not define a regulator for the SDHCI + * IO voltage, then don't advertise support for UHS modes + * even if the device supports it because the IO voltage + * cannot be configured. + */ + if (!IS_ERR(host->mmc->supply.vqmmc)) { + /* Erratum: Enable SDHCI spec v3.00 support */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; + /* Advertise UHS modes as supported by host */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; + if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; + } + + sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) @@ -474,7 +483,7 @@ static struct platform_driver sdhci_tegra_driver = { .driver = { .name = "sdhci-tegra", .of_match_table = sdhci_tegra_dt_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_tegra_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0e3d7c056cb1..cd65d474afa2 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -45,65 +45,62 @@ static unsigned int debug_quirks2; static void sdhci_finish_data(struct sdhci_host *); -static void sdhci_finish_command(struct sdhci_host *); -static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); -static int sdhci_get_cd(struct mmc_host *mmc); static void sdhci_dumpregs(struct sdhci_host *host) { - pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", - mmc_hostname(host->mmc)); - - pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", - sdhci_readl(host, SDHCI_DMA_ADDRESS), - sdhci_readw(host, SDHCI_HOST_VERSION)); - pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", - sdhci_readw(host, SDHCI_BLOCK_SIZE), - sdhci_readw(host, SDHCI_BLOCK_COUNT)); - pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", - sdhci_readl(host, SDHCI_ARGUMENT), - sdhci_readw(host, SDHCI_TRANSFER_MODE)); - pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", - sdhci_readl(host, SDHCI_PRESENT_STATE), - sdhci_readb(host, SDHCI_HOST_CONTROL)); - pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", - sdhci_readb(host, SDHCI_POWER_CONTROL), - sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); - pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", - sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), - sdhci_readw(host, SDHCI_CLOCK_CONTROL)); - pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", - sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), - sdhci_readl(host, SDHCI_INT_STATUS)); - pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", - sdhci_readl(host, SDHCI_INT_ENABLE), - sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); - pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", - sdhci_readw(host, SDHCI_ACMD12_ERR), - sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); - pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", - sdhci_readl(host, SDHCI_CAPABILITIES), - sdhci_readl(host, SDHCI_CAPABILITIES_1)); - pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", - sdhci_readw(host, SDHCI_COMMAND), - sdhci_readl(host, SDHCI_MAX_CURRENT)); - pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", - sdhci_readw(host, SDHCI_HOST_CONTROL2)); + pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", + mmc_hostname(host->mmc)); + + pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", + sdhci_readl(host, SDHCI_DMA_ADDRESS), + sdhci_readw(host, SDHCI_HOST_VERSION)); + pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", + sdhci_readw(host, SDHCI_BLOCK_SIZE), + sdhci_readw(host, SDHCI_BLOCK_COUNT)); + pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", + sdhci_readl(host, SDHCI_ARGUMENT), + sdhci_readw(host, SDHCI_TRANSFER_MODE)); + pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", + sdhci_readl(host, SDHCI_PRESENT_STATE), + sdhci_readb(host, SDHCI_HOST_CONTROL)); + pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", + sdhci_readb(host, SDHCI_POWER_CONTROL), + sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); + pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", + sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), + sdhci_readw(host, SDHCI_CLOCK_CONTROL)); + pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", + sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), + sdhci_readl(host, SDHCI_INT_STATUS)); + pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", + sdhci_readl(host, SDHCI_INT_ENABLE), + sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); + pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", + sdhci_readw(host, SDHCI_ACMD12_ERR), + sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); + pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", + sdhci_readl(host, SDHCI_CAPABILITIES), + sdhci_readl(host, SDHCI_CAPABILITIES_1)); + pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", + sdhci_readw(host, SDHCI_COMMAND), + sdhci_readl(host, SDHCI_MAX_CURRENT)); + pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); if (host->flags & SDHCI_USE_ADMA) { if (host->flags & SDHCI_USE_64_BIT_DMA) - pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", - readl(host->ioaddr + SDHCI_ADMA_ERROR), - readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), - readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); + pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", + readl(host->ioaddr + SDHCI_ADMA_ERROR), + readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), + readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); else - pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", - readl(host->ioaddr + SDHCI_ADMA_ERROR), - readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); + pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", + readl(host->ioaddr + SDHCI_ADMA_ERROR), + readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); } - pr_debug(DRIVER_NAME ": ===========================================\n"); + pr_err(DRIVER_NAME ": ===========================================\n"); } /*****************************************************************************\ @@ -112,12 +109,17 @@ static void sdhci_dumpregs(struct sdhci_host *host) * * \*****************************************************************************/ +static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) +{ + return cmd->data || cmd->flags & MMC_RSP_BUSY; +} + static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) { u32 present; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || - (host->mmc->caps & MMC_CAP_NONREMOVABLE)) + !mmc_card_is_removable(host->mmc)) return; if (enable) { @@ -193,7 +195,9 @@ EXPORT_SYMBOL_GPL(sdhci_reset); static void sdhci_do_reset(struct sdhci_host *host, u8 mask) { if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { - if (!sdhci_get_cd(host->mmc)) + struct mmc_host *mmc = host->mmc; + + if (!mmc->ops->get_cd(mmc)) return; } @@ -210,10 +214,10 @@ static void sdhci_do_reset(struct sdhci_host *host, u8 mask) } } -static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); - static void sdhci_init(struct sdhci_host *host, int soft) { + struct mmc_host *mmc = host->mmc; + if (soft) sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); else @@ -225,13 +229,17 @@ static void sdhci_init(struct sdhci_host *host, int soft) SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; + if (host->tuning_mode == SDHCI_TUNING_MODE_2 || + host->tuning_mode == SDHCI_TUNING_MODE_3) + host->ier |= SDHCI_INT_RETUNE; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); if (soft) { /* force clock reconfiguration */ host->clock = 0; - sdhci_set_ios(host->mmc, &host->mmc->ios); + mmc->ops->set_ios(mmc, &mmc->ios); } } @@ -429,8 +437,6 @@ static void sdhci_transfer_pio(struct sdhci_host *host) { u32 mask; - BUG_ON(!host->data); - if (host->blocks == 0) return; @@ -747,14 +753,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) u8 ctrl; struct mmc_data *data = cmd->data; - WARN_ON(host->data); - - if (data || (cmd->flags & MMC_RSP_BUSY)) + if (sdhci_data_line_cmd(cmd)) sdhci_set_timeout(host, cmd); if (!data) return; + WARN_ON(host->data); + /* Sanity checks */ BUG_ON(data->blksz * data->blocks > 524288); BUG_ON(data->blksz > host->mmc->max_blk_size); @@ -879,6 +885,12 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); } +static inline bool sdhci_auto_cmd12(struct sdhci_host *host, + struct mmc_request *mrq) +{ + return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12); +} + static void sdhci_set_transfer_mode(struct sdhci_host *host, struct mmc_command *cmd) { @@ -909,12 +921,12 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, * If we are sending CMD23, CMD12 never gets sent * on successful completion (so no Auto-CMD12). */ - if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && + if (sdhci_auto_cmd12(host, cmd->mrq) && (cmd->opcode != SD_IO_RW_EXTENDED)) mode |= SDHCI_TRNS_AUTO_CMD12; - else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { mode |= SDHCI_TRNS_AUTO_CMD23; - sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); + sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); } } @@ -926,14 +938,63 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); } -static void sdhci_finish_data(struct sdhci_host *host) +static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) +{ + return (!(host->flags & SDHCI_DEVICE_DEAD) && + ((mrq->cmd && mrq->cmd->error) || + (mrq->sbc && mrq->sbc->error) || + (mrq->data && ((mrq->data->error && !mrq->data->stop) || + (mrq->data->stop && mrq->data->stop->error))) || + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + int i; + + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + if (host->mrqs_done[i] == mrq) { + WARN_ON(1); + return; + } + } + + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + if (!host->mrqs_done[i]) { + host->mrqs_done[i] = mrq; + break; + } + } + + WARN_ON(i >= SDHCI_MAX_MRQS); + + tasklet_schedule(&host->finish_tasklet); +} + +static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) { - struct mmc_data *data; + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; - BUG_ON(!host->data); + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + __sdhci_finish_mrq(host, mrq); +} + +static void sdhci_finish_data(struct sdhci_host *host) +{ + struct mmc_command *data_cmd = host->data_cmd; + struct mmc_data *data = host->data; - data = host->data; host->data = NULL; + host->data_cmd = NULL; if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) @@ -958,20 +1019,41 @@ static void sdhci_finish_data(struct sdhci_host *host) */ if (data->stop && (data->error || - !host->mrq->sbc)) { + !data->mrq->sbc)) { /* * The controller needs a reset of internal state machines * upon error conditions. */ if (data->error) { - sdhci_do_reset(host, SDHCI_RESET_CMD); + if (!host->cmd || host->cmd == data_cmd) + sdhci_do_reset(host, SDHCI_RESET_CMD); sdhci_do_reset(host, SDHCI_RESET_DATA); } + /* Avoid triggering warning in sdhci_send_command() */ + host->cmd = NULL; sdhci_send_command(host, data->stop); - } else - tasklet_schedule(&host->finish_tasklet); + } else { + sdhci_finish_mrq(host, data->mrq); + } +} + +static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, + unsigned long timeout) +{ + if (sdhci_data_line_cmd(mrq->cmd)) + mod_timer(&host->data_timer, timeout); + else + mod_timer(&host->timer, timeout); +} + +static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (sdhci_data_line_cmd(mrq->cmd)) + del_timer(&host->data_timer); + else + del_timer(&host->timer); } void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) @@ -989,12 +1071,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout = 10; mask = SDHCI_CMD_INHIBIT; - if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) + if (sdhci_data_line_cmd(cmd)) mask |= SDHCI_DATA_INHIBIT; /* We shouldn't wait for data inihibit for stop commands, even though they might use busy signaling */ - if (host->mrq->data && (cmd == host->mrq->data->stop)) + if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) mask &= ~SDHCI_DATA_INHIBIT; while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { @@ -1003,7 +1085,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) mmc_hostname(host->mmc)); sdhci_dumpregs(host); cmd->error = -EIO; - tasklet_schedule(&host->finish_tasklet); + sdhci_finish_mrq(host, cmd->mrq); return; } timeout--; @@ -1015,10 +1097,13 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; else timeout += 10 * HZ; - mod_timer(&host->timer, timeout); + sdhci_mod_timer(host, cmd->mrq, timeout); host->cmd = cmd; - host->busy_handle = 0; + if (sdhci_data_line_cmd(cmd)) { + WARN_ON(host->data_cmd); + host->data_cmd = cmd; + } sdhci_prepare_data(host, cmd); @@ -1030,7 +1115,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) pr_err("%s: Unsupported response type!\n", mmc_hostname(host->mmc)); cmd->error = -EINVAL; - tasklet_schedule(&host->finish_tasklet); + sdhci_finish_mrq(host, cmd->mrq); return; } @@ -1059,40 +1144,58 @@ EXPORT_SYMBOL_GPL(sdhci_send_command); static void sdhci_finish_command(struct sdhci_host *host) { + struct mmc_command *cmd = host->cmd; int i; - BUG_ON(host->cmd == NULL); + host->cmd = NULL; - if (host->cmd->flags & MMC_RSP_PRESENT) { - if (host->cmd->flags & MMC_RSP_136) { + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { /* CRC is stripped so we need to do some shifting. */ for (i = 0;i < 4;i++) { - host->cmd->resp[i] = sdhci_readl(host, + cmd->resp[i] = sdhci_readl(host, SDHCI_RESPONSE + (3-i)*4) << 8; if (i != 3) - host->cmd->resp[i] |= + cmd->resp[i] |= sdhci_readb(host, SDHCI_RESPONSE + (3-i)*4-1); } } else { - host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); + cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); + } + } + + /* + * The host can send and interrupt when the busy state has + * ended, allowing us to wait without wasting CPU cycles. + * The busy signal uses DAT0 so this is similar to waiting + * for data to complete. + * + * Note: The 1.0 specification is a bit ambiguous about this + * feature so there might be some problems with older + * controllers. + */ + if (cmd->flags & MMC_RSP_BUSY) { + if (cmd->data) { + DBG("Cannot wait for busy signal when also doing a data transfer"); + } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && + cmd == host->data_cmd) { + /* Command complete before busy is ended */ + return; } } /* Finished CMD23, now send actual command. */ - if (host->cmd == host->mrq->sbc) { - host->cmd = NULL; - sdhci_send_command(host, host->mrq->cmd); + if (cmd == cmd->mrq->sbc) { + sdhci_send_command(host, cmd->mrq->cmd); } else { /* Processed actual command. */ if (host->data && host->data_early) sdhci_finish_data(host); - if (!host->cmd->data) - tasklet_schedule(&host->finish_tasklet); - - host->cmd = NULL; + if (!cmd->data) + sdhci_finish_mrq(host, cmd->mrq); } } @@ -1373,26 +1476,22 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->mrq != NULL); - sdhci_led_activate(host); /* * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED * requests if Auto-CMD12 is enabled. */ - if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { + if (sdhci_auto_cmd12(host, mrq)) { if (mrq->stop) { mrq->data->stop = NULL; mrq->stop = NULL; } } - host->mrq = mrq; - if (!present || host->flags & SDHCI_DEVICE_DEAD) { - host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + mrq->cmd->error = -ENOMEDIUM; + sdhci_finish_mrq(host, mrq); } else { if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_send_command(host, mrq->sbc); @@ -1617,7 +1716,7 @@ static int sdhci_get_cd(struct mmc_host *mmc) return 0; /* If nonremovable, assume that the card is always present. */ - if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + if (!mmc_card_is_removable(host->mmc)) return 1; /* @@ -1733,13 +1832,14 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: + if (!(host->flags & SDHCI_SIGNALING_330)) + return -EINVAL; /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ ctrl &= ~SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); if (!IS_ERR(mmc->supply.vqmmc)) { - ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, - 3600000); + ret = mmc_regulator_set_vqmmc(mmc, ios); if (ret) { pr_warn("%s: Switching to 3.3V signalling voltage failed\n", mmc_hostname(mmc)); @@ -1759,9 +1859,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, return -EAGAIN; case MMC_SIGNAL_VOLTAGE_180: + if (!(host->flags & SDHCI_SIGNALING_180)) + return -EINVAL; if (!IS_ERR(mmc->supply.vqmmc)) { - ret = regulator_set_voltage(mmc->supply.vqmmc, - 1700000, 1950000); + ret = mmc_regulator_set_vqmmc(mmc, ios); if (ret) { pr_warn("%s: Switching to 1.8V signalling voltage failed\n", mmc_hostname(mmc)); @@ -1790,9 +1891,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, return -EAGAIN; case MMC_SIGNAL_VOLTAGE_120: + if (!(host->flags & SDHCI_SIGNALING_120)) + return -EINVAL; if (!IS_ERR(mmc->supply.vqmmc)) { - ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, - 1300000); + ret = mmc_regulator_set_vqmmc(mmc, ios); if (ret) { pr_warn("%s: Switching to 1.2V signalling voltage failed\n", mmc_hostname(mmc)); @@ -1811,10 +1913,10 @@ static int sdhci_card_busy(struct mmc_host *mmc) struct sdhci_host *host = mmc_priv(mmc); u32 present_state; - /* Check whether DAT[3:0] is 0000 */ + /* Check whether DAT[0] is 0 */ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); - return !(present_state & SDHCI_DATA_LVL_MASK); + return !(present_state & SDHCI_DATA_0_LVL_MASK); } static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) @@ -1909,7 +2011,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) /* * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number - * of loops reaches 40 times or a timeout of 150ms occurs. + * of loops reaches 40 times. */ do { struct mmc_command cmd = {0}; @@ -1920,13 +2022,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.retries = 0; cmd.data = NULL; + cmd.mrq = &mrq; cmd.error = 0; if (tuning_loop_counter-- == 0) break; mrq.cmd = &cmd; - host->mrq = &mrq; /* * In response to CMD19, the card sends 64 bytes of tuning @@ -1956,7 +2058,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_send_command(host, &cmd); host->cmd = NULL; - host->mrq = NULL; + sdhci_del_timer(host, &mrq); spin_unlock_irqrestore(&host->lock, flags); /* Wait for Buffer Read Ready interrupt */ @@ -2086,6 +2188,24 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); } +static inline bool sdhci_has_requests(struct sdhci_host *host) +{ + return host->cmd || host->data_cmd; +} + +static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) +{ + if (host->data_cmd) { + host->data_cmd->error = err; + sdhci_finish_mrq(host, host->data_cmd->mrq); + } + + if (host->cmd) { + host->cmd->error = err; + sdhci_finish_mrq(host, host->cmd->mrq); + } +} + static void sdhci_card_event(struct mmc_host *mmc) { struct sdhci_host *host = mmc_priv(mmc); @@ -2096,12 +2216,12 @@ static void sdhci_card_event(struct mmc_host *mmc) if (host->ops->card_event) host->ops->card_event(host); - present = sdhci_get_cd(host->mmc); + present = mmc->ops->get_cd(mmc); spin_lock_irqsave(&host->lock, flags); - /* Check host->mrq first in case we are runtime suspended */ - if (host->mrq && !present) { + /* Check sdhci_has_requests() first in case we are runtime suspended */ + if (sdhci_has_requests(host) && !present) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); pr_err("%s: Resetting controller.\n", @@ -2110,8 +2230,7 @@ static void sdhci_card_event(struct mmc_host *mmc) sdhci_do_reset(host, SDHCI_RESET_CMD); sdhci_do_reset(host, SDHCI_RESET_DATA); - host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + sdhci_error_out_mrqs(host, -ENOMEDIUM); } spin_unlock_irqrestore(&host->lock, flags); @@ -2140,28 +2259,28 @@ static const struct mmc_host_ops sdhci_ops = { * * \*****************************************************************************/ -static void sdhci_tasklet_finish(unsigned long param) +static bool sdhci_request_done(struct sdhci_host *host) { - struct sdhci_host *host; unsigned long flags; struct mmc_request *mrq; - - host = (struct sdhci_host*)param; + int i; spin_lock_irqsave(&host->lock, flags); - /* - * If this tasklet gets rescheduled while running, it will - * be run again afterwards but without any active request. - */ - if (!host->mrq) { - spin_unlock_irqrestore(&host->lock, flags); - return; + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + mrq = host->mrqs_done[i]; + if (mrq) { + host->mrqs_done[i] = NULL; + break; + } } - del_timer(&host->timer); + if (!mrq) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } - mrq = host->mrq; + sdhci_del_timer(host, mrq); /* * Always unmap the data buffers if they were mapped by @@ -2183,13 +2302,7 @@ static void sdhci_tasklet_finish(unsigned long param) * The controller needs a reset of internal state machines * upon error conditions. */ - if (!(host->flags & SDHCI_DEVICE_DEAD) && - ((mrq->cmd && mrq->cmd->error) || - (mrq->sbc && mrq->sbc->error) || - (mrq->data && ((mrq->data->error && !mrq->data->stop) || - (mrq->data->stop && mrq->data->stop->error))) || - (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { - + if (sdhci_needs_reset(host, mrq)) { /* Some controllers need this kick or reset won't work here */ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) /* This is to force an update */ @@ -2197,20 +2310,31 @@ static void sdhci_tasklet_finish(unsigned long param) /* Spec says we should do both at the same time, but Ricoh controllers do not like that. */ - sdhci_do_reset(host, SDHCI_RESET_CMD); - sdhci_do_reset(host, SDHCI_RESET_DATA); - } + if (!host->cmd) + sdhci_do_reset(host, SDHCI_RESET_CMD); + if (!host->data_cmd) + sdhci_do_reset(host, SDHCI_RESET_DATA); - host->mrq = NULL; - host->cmd = NULL; - host->data = NULL; + host->pending_reset = false; + } - sdhci_led_deactivate(host); + if (!sdhci_has_requests(host)) + sdhci_led_deactivate(host); mmiowb(); spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); + + return false; +} + +static void sdhci_tasklet_finish(unsigned long param) +{ + struct sdhci_host *host = (struct sdhci_host *)param; + + while (!sdhci_request_done(host)) + ; } static void sdhci_timeout_timer(unsigned long data) @@ -2222,7 +2346,30 @@ static void sdhci_timeout_timer(unsigned long data) spin_lock_irqsave(&host->lock, flags); - if (host->mrq) { + if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { + pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + + host->cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->cmd->mrq); + } + + mmiowb(); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sdhci_timeout_data_timer(unsigned long data) +{ + struct sdhci_host *host; + unsigned long flags; + + host = (struct sdhci_host *)data; + + spin_lock_irqsave(&host->lock, flags); + + if (host->data || host->data_cmd || + (host->cmd && sdhci_data_line_cmd(host->cmd))) { pr_err("%s: Timeout waiting for hardware interrupt.\n", mmc_hostname(host->mmc)); sdhci_dumpregs(host); @@ -2230,13 +2377,12 @@ static void sdhci_timeout_timer(unsigned long data) if (host->data) { host->data->error = -ETIMEDOUT; sdhci_finish_data(host); + } else if (host->data_cmd) { + host->data_cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->data_cmd->mrq); } else { - if (host->cmd) - host->cmd->error = -ETIMEDOUT; - else - host->mrq->cmd->error = -ETIMEDOUT; - - tasklet_schedule(&host->finish_tasklet); + host->cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->cmd->mrq); } } @@ -2252,9 +2398,14 @@ static void sdhci_timeout_timer(unsigned long data) static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) { - BUG_ON(intmask == 0); - if (!host->cmd) { + /* + * SDHCI recovers from errors by resetting the cmd and data + * circuits. Until that is done, there very well might be more + * interrupts, so ignore them in that case. + */ + if (host->pending_reset) + return; pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", mmc_hostname(host->mmc), (unsigned)intmask); sdhci_dumpregs(host); @@ -2285,37 +2436,14 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) return; } - tasklet_schedule(&host->finish_tasklet); + sdhci_finish_mrq(host, host->cmd->mrq); return; } - /* - * The host can send and interrupt when the busy state has - * ended, allowing us to wait without wasting CPU cycles. - * Unfortunately this is overloaded on the "data complete" - * interrupt, so we need to take some care when handling - * it. - * - * Note: The 1.0 specification is a bit ambiguous about this - * feature so there might be some problems with older - * controllers. - */ - if (host->cmd->flags & MMC_RSP_BUSY) { - if (host->cmd->data) - DBG("Cannot wait for busy signal when also doing a data transfer"); - else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) - && !host->busy_handle) { - /* Mark that command complete before busy is ended */ - host->busy_handle = 1; - return; - } - - /* The controller does not support the end-of-busy IRQ, - * fall through and take the SDHCI_INT_RESPONSE */ - } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && - host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { + if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && + !(host->cmd->flags & MMC_RSP_BUSY) && !host->data && + host->cmd->opcode == MMC_STOP_TRANSMISSION) *mask &= ~SDHCI_INT_DATA_END; - } if (intmask & SDHCI_INT_RESPONSE) sdhci_finish_command(host); @@ -2357,7 +2485,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host) { } static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { u32 command; - BUG_ON(intmask == 0); /* CMD19 generates _only_ Buffer Read Ready interrupt */ if (intmask & SDHCI_INT_DATA_AVAIL) { @@ -2371,15 +2498,20 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) } if (!host->data) { + struct mmc_command *data_cmd = host->data_cmd; + + if (data_cmd) + host->data_cmd = NULL; + /* * The "data complete" interrupt is also used to * indicate that a busy state has ended. See comment * above in sdhci_cmd_irq(). */ - if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { + if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { if (intmask & SDHCI_INT_DATA_TIMEOUT) { - host->cmd->error = -ETIMEDOUT; - tasklet_schedule(&host->finish_tasklet); + data_cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, data_cmd->mrq); return; } if (intmask & SDHCI_INT_DATA_END) { @@ -2388,14 +2520,22 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * before the command completed, so make * sure we do things in the proper order. */ - if (host->busy_handle) - sdhci_finish_command(host); - else - host->busy_handle = 1; + if (host->cmd == data_cmd) + return; + + sdhci_finish_mrq(host, data_cmd->mrq); return; } } + /* + * SDHCI recovers from errors by resetting the cmd and data + * circuits. Until that is done, there very well might be more + * interrupts, so ignore them in that case. + */ + if (host->pending_reset) + return; + pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", mmc_hostname(host->mmc), (unsigned)intmask); sdhci_dumpregs(host); @@ -2453,7 +2593,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) } if (intmask & SDHCI_INT_DATA_END) { - if (host->cmd) { + if (host->cmd == host->data_cmd) { /* * Data managed to finish before the * command completed. Make sure we do @@ -2537,6 +2677,9 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) pr_err("%s: Card is consuming too much power!\n", mmc_hostname(host->mmc)); + if (intmask & SDHCI_INT_RETUNE) + mmc_retune_needed(host->mmc); + if (intmask & SDHCI_INT_CARD_INT) { sdhci_enable_sdio_irq_nolock(host, false); host->thread_isr |= SDHCI_INT_CARD_INT; @@ -2546,7 +2689,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | - SDHCI_INT_CARD_INT); + SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); if (intmask) { unexpected |= intmask; @@ -2582,8 +2725,10 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) spin_unlock_irqrestore(&host->lock, flags); if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { - sdhci_card_event(host->mmc); - mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + struct mmc_host *mmc = host->mmc; + + mmc->ops->card_event(mmc); + mmc_detect_change(mmc, msecs_to_jiffies(200)); } if (isr & SDHCI_INT_CARD_INT) { @@ -2605,18 +2750,31 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) \*****************************************************************************/ #ifdef CONFIG_PM +/* + * To enable wakeup events, the corresponding events have to be enabled in + * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal + * Table' in the SD Host Controller Standard Specification. + * It is useless to restore SDHCI_INT_ENABLE state in + * sdhci_disable_irq_wakeups() since it will be set by + * sdhci_enable_card_detection() or sdhci_init(). + */ void sdhci_enable_irq_wakeups(struct sdhci_host *host) { u8 val; u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | SDHCI_WAKE_ON_INT; + u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | + SDHCI_INT_CARD_INT; val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); val |= mask ; /* Avoid fake wake up */ - if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) { val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); + irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); + } sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); + sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); } EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); @@ -2636,7 +2794,8 @@ int sdhci_suspend_host(struct sdhci_host *host) sdhci_disable_card_detection(host); mmc_retune_timer_stop(host->mmc); - mmc_retune_needed(host->mmc); + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); if (!device_may_wakeup(mmc_dev(host->mmc))) { host->ier = 0; @@ -2654,6 +2813,7 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host); int sdhci_resume_host(struct sdhci_host *host) { + struct mmc_host *mmc = host->mmc; int ret = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { @@ -2667,7 +2827,7 @@ int sdhci_resume_host(struct sdhci_host *host) sdhci_init(host, 0); host->pwr = 0; host->clock = 0; - sdhci_set_ios(host->mmc, &host->mmc->ios); + mmc->ops->set_ios(mmc, &mmc->ios); } else { sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); mmiowb(); @@ -2696,7 +2856,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) unsigned long flags; mmc_retune_timer_stop(host->mmc); - mmc_retune_needed(host->mmc); + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); spin_lock_irqsave(&host->lock, flags); host->ier &= SDHCI_INT_CARD_INT; @@ -2716,6 +2877,7 @@ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); int sdhci_runtime_resume_host(struct sdhci_host *host) { + struct mmc_host *mmc = host->mmc; unsigned long flags; int host_flags = host->flags; @@ -2729,8 +2891,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) /* Force clock and power re-program */ host->pwr = 0; host->clock = 0; - sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios); - sdhci_set_ios(host->mmc, &host->mmc->ios); + mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); + mmc->ops->set_ios(mmc, &mmc->ios); if ((host_flags & SDHCI_PV_ENABLED) && !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { @@ -2781,6 +2943,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, host->mmc_host_ops = sdhci_ops; mmc->ops = &host->mmc_host_ops; + host->flags = SDHCI_SIGNALING_330; + return host; } @@ -2816,10 +2980,41 @@ static int sdhci_set_dma_mask(struct sdhci_host *host) return ret; } -int sdhci_add_host(struct sdhci_host *host) +void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) +{ + u16 v; + + if (host->read_caps) + return; + + host->read_caps = true; + + if (debug_quirks) + host->quirks = debug_quirks; + + if (debug_quirks2) + host->quirks2 = debug_quirks2; + + sdhci_do_reset(host, SDHCI_RESET_ALL); + + v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); + host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; + + if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) + return; + + host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES); + + if (host->version < SDHCI_SPEC_300) + return; + + host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1); +} +EXPORT_SYMBOL_GPL(__sdhci_read_caps); + +int sdhci_setup_host(struct sdhci_host *host) { struct mmc_host *mmc; - u32 caps[2] = {0, 0}; u32 max_current_caps; unsigned int ocr_avail; unsigned int override_timeout_clk; @@ -2832,34 +3027,28 @@ int sdhci_add_host(struct sdhci_host *host) mmc = host->mmc; - if (debug_quirks) - host->quirks = debug_quirks; - if (debug_quirks2) - host->quirks2 = debug_quirks2; + /* + * If there are external regulators, get them. Note this must be done + * early before resetting the host and reading the capabilities so that + * the host can take the appropriate action if regulators are not + * available. + */ + ret = mmc_regulator_get_supply(mmc); + if (ret == -EPROBE_DEFER) + return ret; - override_timeout_clk = host->timeout_clk; + sdhci_read_caps(host); - sdhci_do_reset(host, SDHCI_RESET_ALL); + override_timeout_clk = host->timeout_clk; - host->version = sdhci_readw(host, SDHCI_HOST_VERSION); - host->version = (host->version & SDHCI_SPEC_VER_MASK) - >> SDHCI_SPEC_VER_SHIFT; if (host->version > SDHCI_SPEC_300) { pr_err("%s: Unknown controller version (%d). You may experience problems.\n", mmc_hostname(mmc), host->version); } - caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : - sdhci_readl(host, SDHCI_CAPABILITIES); - - if (host->version >= SDHCI_SPEC_300) - caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? - host->caps1 : - sdhci_readl(host, SDHCI_CAPABILITIES_1); - if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; - else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) + else if (!(host->caps & SDHCI_CAN_DO_SDMA)) DBG("Controller doesn't have SDMA capability\n"); else host->flags |= SDHCI_USE_SDMA; @@ -2871,7 +3060,7 @@ int sdhci_add_host(struct sdhci_host *host) } if ((host->version >= SDHCI_SPEC_200) && - (caps[0] & SDHCI_CAN_DO_ADMA2)) + (host->caps & SDHCI_CAN_DO_ADMA2)) host->flags |= SDHCI_USE_ADMA; if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && @@ -2887,7 +3076,7 @@ int sdhci_add_host(struct sdhci_host *host) * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to * implement. */ - if (caps[0] & SDHCI_CAN_64BIT) + if (host->caps & SDHCI_CAN_64BIT) host->flags |= SDHCI_USE_64_BIT_DMA; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { @@ -2963,10 +3152,10 @@ int sdhci_add_host(struct sdhci_host *host) } if (host->version >= SDHCI_SPEC_300) - host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) + host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; else - host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) + host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; host->max_clk *= 1000000; @@ -2985,7 +3174,7 @@ int sdhci_add_host(struct sdhci_host *host) * In case of Host Controller v3.00, find out whether clock * multiplier is supported. */ - host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> + host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; /* @@ -3017,7 +3206,7 @@ int sdhci_add_host(struct sdhci_host *host) mmc->f_max = max_clk; if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { - host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> + host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; if (host->timeout_clk == 0) { if (host->ops->get_timeout_clock) { @@ -3031,7 +3220,7 @@ int sdhci_add_host(struct sdhci_host *host) } } - if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) + if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; if (override_timeout_clk) @@ -3072,27 +3261,22 @@ int sdhci_add_host(struct sdhci_host *host) if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) mmc->caps &= ~MMC_CAP_CMD23; - if (caps[0] & SDHCI_CAN_DO_HISPD) + if (host->caps & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && - !(mmc->caps & MMC_CAP_NONREMOVABLE) && + mmc_card_is_removable(mmc) && mmc_gpio_get_cd(host->mmc) < 0) mmc->caps |= MMC_CAP_NEEDS_POLL; - /* If there are external regulators, get them */ - ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) - goto undma; - /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ if (!IS_ERR(mmc->supply.vqmmc)) { ret = regulator_enable(mmc->supply.vqmmc); if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 1950000)) - caps[1] &= ~(SDHCI_SUPPORT_SDR104 | - SDHCI_SUPPORT_SDR50 | - SDHCI_SUPPORT_DDR50); + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); if (ret) { pr_warn("%s: Failed to enable vqmmc regulator: %d\n", mmc_hostname(mmc), ret); @@ -3100,28 +3284,30 @@ int sdhci_add_host(struct sdhci_host *host) } } - if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) - caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | - SDHCI_SUPPORT_DDR50); + if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); + } /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ - if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | - SDHCI_SUPPORT_DDR50)) + if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50)) mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; /* SDR104 supports also implies SDR50 support */ - if (caps[1] & SDHCI_SUPPORT_SDR104) { + if (host->caps1 & SDHCI_SUPPORT_SDR104) { mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; /* SD3.0: SDR104 is supported so (for eMMC) the caps2 * field can be promoted to support HS200. */ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) mmc->caps2 |= MMC_CAP2_HS200; - } else if (caps[1] & SDHCI_SUPPORT_SDR50) + } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { mmc->caps |= MMC_CAP_UHS_SDR50; + } if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && - (caps[1] & SDHCI_SUPPORT_HS400)) + (host->caps1 & SDHCI_SUPPORT_HS400)) mmc->caps2 |= MMC_CAP2_HS400; if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && @@ -3130,25 +3316,25 @@ int sdhci_add_host(struct sdhci_host *host) 1300000))) mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; - if ((caps[1] & SDHCI_SUPPORT_DDR50) && - !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) + if ((host->caps1 & SDHCI_SUPPORT_DDR50) && + !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) mmc->caps |= MMC_CAP_UHS_DDR50; /* Does the host need tuning for SDR50? */ - if (caps[1] & SDHCI_USE_SDR50_TUNING) + if (host->caps1 & SDHCI_USE_SDR50_TUNING) host->flags |= SDHCI_SDR50_NEEDS_TUNING; /* Driver Type(s) (A, C, D) supported by the host */ - if (caps[1] & SDHCI_DRIVER_TYPE_A) + if (host->caps1 & SDHCI_DRIVER_TYPE_A) mmc->caps |= MMC_CAP_DRIVER_TYPE_A; - if (caps[1] & SDHCI_DRIVER_TYPE_C) + if (host->caps1 & SDHCI_DRIVER_TYPE_C) mmc->caps |= MMC_CAP_DRIVER_TYPE_C; - if (caps[1] & SDHCI_DRIVER_TYPE_D) + if (host->caps1 & SDHCI_DRIVER_TYPE_D) mmc->caps |= MMC_CAP_DRIVER_TYPE_D; /* Initial value for re-tuning timer count */ - host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> - SDHCI_RETUNING_TIMER_COUNT_SHIFT; + host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> + SDHCI_RETUNING_TIMER_COUNT_SHIFT; /* * In case Re-tuning Timer is not disabled, the actual value of @@ -3158,7 +3344,7 @@ int sdhci_add_host(struct sdhci_host *host) host->tuning_count = 1 << (host->tuning_count - 1); /* Re-tuning mode supported by the Host Controller */ - host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> + host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> SDHCI_RETUNING_MODE_SHIFT; ocr_avail = 0; @@ -3187,7 +3373,7 @@ int sdhci_add_host(struct sdhci_host *host) } } - if (caps[0] & SDHCI_CAN_VDD_330) { + if (host->caps & SDHCI_CAN_VDD_330) { ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_current_330 = ((max_current_caps & @@ -3195,7 +3381,7 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_MAX_CURRENT_330_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; } - if (caps[0] & SDHCI_CAN_VDD_300) { + if (host->caps & SDHCI_CAN_VDD_300) { ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; mmc->max_current_300 = ((max_current_caps & @@ -3203,7 +3389,7 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_MAX_CURRENT_300_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; } - if (caps[0] & SDHCI_CAN_VDD_180) { + if (host->caps & SDHCI_CAN_VDD_180) { ocr_avail |= MMC_VDD_165_195; mmc->max_current_180 = ((max_current_caps & @@ -3240,6 +3426,15 @@ int sdhci_add_host(struct sdhci_host *host) goto unreg; } + if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || + (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) + host->flags |= SDHCI_SIGNALING_180; + + if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) + host->flags |= SDHCI_SIGNALING_120; + spin_lock_init(&host->lock); /* @@ -3281,7 +3476,7 @@ int sdhci_add_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { mmc->max_blk_size = 2; } else { - mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> + mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; if (mmc->max_blk_size >= 3) { pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", @@ -3297,6 +3492,28 @@ int sdhci_add_host(struct sdhci_host *host) */ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + return 0; + +unreg: + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); +undma: + if (host->align_buffer) + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + + host->adma_table_sz, host->align_buffer, + host->align_addr); + host->adma_table = NULL; + host->align_buffer = NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_setup_host); + +int __sdhci_add_host(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + int ret; + /* * Init tasklets. */ @@ -3304,6 +3521,8 @@ int sdhci_add_host(struct sdhci_host *host) sdhci_tasklet_finish, (unsigned long)host); setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); + setup_timer(&host->data_timer, sdhci_timeout_data_timer, + (unsigned long)host); init_waitqueue_head(&host->buf_ready_int); @@ -3353,10 +3572,10 @@ unirq: free_irq(host->irq, host); untasklet: tasklet_kill(&host->finish_tasklet); -unreg: + if (!IS_ERR(mmc->supply.vqmmc)) regulator_disable(mmc->supply.vqmmc); -undma: + if (host->align_buffer) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, host->align_buffer, @@ -3366,7 +3585,18 @@ undma: return ret; } +EXPORT_SYMBOL_GPL(__sdhci_add_host); + +int sdhci_add_host(struct sdhci_host *host) +{ + int ret; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + return __sdhci_add_host(host); +} EXPORT_SYMBOL_GPL(sdhci_add_host); void sdhci_remove_host(struct sdhci_host *host, int dead) @@ -3379,12 +3609,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) host->flags |= SDHCI_DEVICE_DEAD; - if (host->mrq) { + if (sdhci_has_requests(host)) { pr_err("%s: Controller removed during " " transfer!\n", mmc_hostname(mmc)); - - host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + sdhci_error_out_mrqs(host, -ENOMEDIUM); } spin_unlock_irqrestore(&host->lock, flags); @@ -3404,6 +3632,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) free_irq(host->irq, host); del_timer_sync(&host->timer); + del_timer_sync(&host->data_timer); tasklet_kill(&host->finish_tasklet); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 609f87ca536b..0411c9f36461 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -128,6 +128,7 @@ #define SDHCI_INT_CARD_INSERT 0x00000040 #define SDHCI_INT_CARD_REMOVE 0x00000080 #define SDHCI_INT_CARD_INT 0x00000100 +#define SDHCI_INT_RETUNE 0x00001000 #define SDHCI_INT_ERROR 0x00008000 #define SDHCI_INT_TIMEOUT 0x00010000 #define SDHCI_INT_CRC 0x00020000 @@ -186,6 +187,7 @@ #define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 #define SDHCI_CAN_DO_SDMA 0x00400000 +#define SDHCI_CAN_DO_SUSPEND 0x00800000 #define SDHCI_CAN_VDD_330 0x01000000 #define SDHCI_CAN_VDD_300 0x02000000 #define SDHCI_CAN_VDD_180 0x04000000 @@ -314,6 +316,9 @@ struct sdhci_adma2_64_desc { */ #define SDHCI_MAX_SEGS 128 +/* Allow for a a command request and a data request at the same time */ +#define SDHCI_MAX_MRQS 2 + enum sdhci_cookie { COOKIE_UNMAPPED, COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */ @@ -447,6 +452,9 @@ struct sdhci_host { #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ +#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */ +#define SDHCI_SIGNALING_180 (1<<15) /* Host is capable of 1.8V signaling */ +#define SDHCI_SIGNALING_120 (1<<16) /* Host is capable of 1.2V signaling */ unsigned int version; /* SDHCI spec. version */ @@ -460,12 +468,13 @@ struct sdhci_host { bool runtime_suspended; /* Host is runtime suspended */ bool bus_on; /* Bus power prevents runtime suspend */ bool preset_enabled; /* Preset is enabled */ + bool pending_reset; /* Cmd/data reset is pending */ - struct mmc_request *mrq; /* Current request */ + struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ + struct mmc_command *data_cmd; /* Current data command */ struct mmc_data *data; /* Current data request */ unsigned int data_early:1; /* Data finished before cmd */ - unsigned int busy_handle:1; /* Handling the order of Busy-end */ struct sg_mapping_iter sg_miter; /* SG state for PIO */ unsigned int blocks; /* remaining PIO blocks */ @@ -486,9 +495,11 @@ struct sdhci_host { struct tasklet_struct finish_tasklet; /* Tasklet structures */ struct timer_list timer; /* Timer for timeouts */ + struct timer_list data_timer; /* Timer for data timeouts */ - u32 caps; /* Alternative CAPABILITY_0 */ - u32 caps1; /* Alternative CAPABILITY_1 */ + u32 caps; /* CAPABILITY_0 */ + u32 caps1; /* CAPABILITY_1 */ + bool read_caps; /* Capability flags have been read */ unsigned int ocr_avail_sdio; /* OCR bit masks */ unsigned int ocr_avail_sd; @@ -508,6 +519,8 @@ struct sdhci_host { unsigned int tuning_count; /* Timer count for re-tuning */ unsigned int tuning_mode; /* Re-tuning mode supported by host */ #define SDHCI_TUNING_MODE_1 0 +#define SDHCI_TUNING_MODE_2 1 +#define SDHCI_TUNING_MODE_3 2 unsigned long private[0] ____cacheline_aligned; }; @@ -645,11 +658,20 @@ static inline void *sdhci_priv(struct sdhci_host *host) } extern void sdhci_card_detect(struct sdhci_host *host); +extern void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, + u32 *caps1); +extern int sdhci_setup_host(struct sdhci_host *host); +extern int __sdhci_add_host(struct sdhci_host *host); extern int sdhci_add_host(struct sdhci_host *host); extern void sdhci_remove_host(struct sdhci_host *host, int dead); extern void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); +static inline void sdhci_read_caps(struct sdhci_host *host) +{ + __sdhci_read_caps(host, NULL, NULL, NULL); +} + static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) { return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 983b8b32ef96..111b66f5439b 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -222,7 +222,7 @@ static struct platform_driver sdhci_f_sdh30_driver = { .driver = { .name = "f_sdh30", .of_match_table = f_sdh30_dt_ids, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_f_sdh30_probe, .remove = sdhci_f_sdh30_remove, diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index dd64b8663984..900778421be6 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -574,7 +574,7 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); - for (timeout = 10000000; timeout; timeout--) { + for (timeout = 10000; timeout; timeout--) { if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; @@ -819,10 +819,12 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, tmp |= CMD_SET_RTYP_NO; break; case MMC_RSP_R1: - case MMC_RSP_R1B: case MMC_RSP_R3: tmp |= CMD_SET_RTYP_6B; break; + case MMC_RSP_R1B: + tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B; + break; case MMC_RSP_R2: tmp |= CMD_SET_RTYP_17B; break; @@ -830,17 +832,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, dev_err(dev, "Unsupported response type.\n"); break; } - switch (opc) { - /* RBSY */ - case MMC_SLEEP_AWAKE: - case MMC_SWITCH: - case MMC_STOP_TRANSMISSION: - case MMC_SET_WRITE_PROT: - case MMC_CLR_WRITE_PROT: - case MMC_ERASE: - tmp |= CMD_SET_RBSY; - break; - } + /* WDAT / DATW */ if (data) { tmp |= CMD_SET_WDAT; @@ -925,23 +917,13 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, { struct mmc_command *cmd = mrq->cmd; u32 opc = cmd->opcode; - u32 mask; + u32 mask = 0; unsigned long flags; - switch (opc) { - /* response busy check */ - case MMC_SLEEP_AWAKE: - case MMC_SWITCH: - case MMC_STOP_TRANSMISSION: - case MMC_SET_WRITE_PROT: - case MMC_CLR_WRITE_PROT: - case MMC_ERASE: + if (cmd->flags & MMC_RSP_BUSY) mask = MASK_START_CMD | MASK_MRBSYE; - break; - default: + else mask = MASK_START_CMD | MASK_MCRSPE; - break; - } if (host->ccs_enable) mask |= MASK_MCCSTO; @@ -1009,22 +991,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) host->state = STATE_REQUEST; spin_unlock_irqrestore(&host->lock, flags); - switch (mrq->cmd->opcode) { - /* MMCIF does not support SD/SDIO command */ - case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */ - case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ - if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) - break; - case MMC_APP_CMD: - case SD_IO_RW_DIRECT: - host->state = STATE_IDLE; - mrq->cmd->error = -ETIMEDOUT; - mmc_request_done(mmc, mrq); - return; - default: - break; - } - host->mrq = mrq; sh_mmcif_start_cmd(host, mrq); @@ -1488,6 +1454,9 @@ static int sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_init_ocr(host); mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; + mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; + mmc->max_busy_timeout = 10000; + if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index f750f9494410..c3b651bf89cb 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -39,6 +39,12 @@ #define EXT_ACC 0xe4 +#define SDHI_VER_GEN2_SDR50 0x490c +/* very old datasheets said 0x490c for SDR104, too. They are wrong! */ +#define SDHI_VER_GEN2_SDR104 0xcb0d +#define SDHI_VER_GEN3_SD 0xcc10 +#define SDHI_VER_GEN3_SDMMC 0xcd10 + #define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data) struct sh_mobile_sdhi_of_data { @@ -109,14 +115,14 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) * sh_mobile_sdhi_of_data :: dma_buswidth */ switch (sd_ctrl_read16(host, CTL_VERSION)) { - case 0x490C: + case SDHI_VER_GEN2_SDR50: val = (width == 32) ? 0x0001 : 0x0000; break; - case 0xCB0D: + case SDHI_VER_GEN2_SDR104: val = (width == 32) ? 0x0000 : 0x0001; break; - case 0xCC10: /* Gen3, SD only */ - case 0xCD10: /* Gen3, SD + MMC */ + case SDHI_VER_GEN3_SD: + case SDHI_VER_GEN3_SDMMC: if (width == 64) val = 0x0000; else if (width == 32) diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 1aac2ad8edf2..7f63ec05bdf4 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -259,7 +259,7 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val) { - writew(val, host->ctl + (addr << host->bus_shift)); + writew(val & 0xffff, host->ctl + (addr << host->bus_shift)); writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index f44e2ab7aea2..92467efc4e2c 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1086,7 +1086,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || mmc->caps & MMC_CAP_NEEDS_POLL || - mmc->caps & MMC_CAP_NONREMOVABLE || + !mmc_card_is_removable(mmc) || mmc->slot.cd_irq >= 0); if (tmio_mmc_clk_enable(_host) < 0) { diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 9a1a6ffd16b8..94d3eb42c4d5 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -416,7 +416,7 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t return ret; } -static inline int do_write_buffer(struct map_info *map, struct flchip *chip, +static int do_write_buffer(struct map_info *map, struct flchip *chip, unsigned long adr, const u_char *buf, int len) { struct cfi_private *cfi = map->fldrv_priv; diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 64a248556d29..58329d2dacd1 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -113,12 +113,12 @@ config MTD_SST25L if you want to specify device partitioning. config MTD_BCM47XXSFLASH - tristate "R/O support for serial flash on BCMA bus" + tristate "Support for serial flash on BCMA bus" depends on BCMA_SFLASH && (MIPS || ARM) help BCMA bus can have various flash memories attached, they are registered by bcma as platform devices. This enables driver for - serial flash memories (only read-only mode is implemented). + serial flash memories. config MTD_SLRAM tristate "Uncached system RAM" @@ -171,18 +171,6 @@ config MTDRAM_ERASE_SIZE as a module, it is also possible to specify this as a parameter when loading the module. -#If not a module (I don't want to test it as a module) -config MTDRAM_ABS_POS - hex "SRAM Hexadecimal Absolute position or 0" - depends on MTD_MTDRAM=y - default "0" - help - If you have system RAM accessible by the CPU but not used by Linux - in normal operation, you can give the physical address at which the - available RAM starts, and the MTDRAM driver will use it instead of - allocating space from Linux's available memory. Otherwise, leave - this set to zero. Most people will want to leave this as zero. - config MTD_BLOCK2MTD tristate "MTD using block device" depends on BLOCK diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 9d6854467651..9cf7fcd28034 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -73,14 +73,15 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return spi_write(spi, flash->command, len + 1); } -static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, - size_t *retlen, const u_char *buf) +static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) { struct m25p *flash = nor->priv; struct spi_device *spi = flash->spi; struct spi_transfer t[2] = {}; struct spi_message m; int cmd_sz = m25p_cmdsz(nor); + ssize_t ret; spi_message_init(&m); @@ -98,9 +99,14 @@ static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, t[1].len = len; spi_message_add_tail(&t[1], &m); - spi_sync(spi, &m); + ret = spi_sync(spi, &m); + if (ret) + return ret; - *retlen += m.actual_length - cmd_sz; + ret = m.actual_length - cmd_sz; + if (ret < 0) + return -EIO; + return ret; } static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) @@ -119,21 +125,21 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) * Read an address range from the nor chip. The address range * may be any size provided it is within the physical boundaries. */ -static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, - size_t *retlen, u_char *buf) +static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) { struct m25p *flash = nor->priv; struct spi_device *spi = flash->spi; struct spi_transfer t[2]; struct spi_message m; unsigned int dummy = nor->read_dummy; + ssize_t ret; /* convert the dummy cycles to the number of bytes */ dummy /= 8; if (spi_flash_read_supported(spi)) { struct spi_flash_read_message msg; - int ret; memset(&msg, 0, sizeof(msg)); @@ -149,8 +155,9 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, msg.data_nbits = m25p80_rx_nbits(nor); ret = spi_flash_read(spi, &msg); - *retlen = msg.retlen; - return ret; + if (ret < 0) + return ret; + return msg.retlen; } spi_message_init(&m); @@ -165,13 +172,17 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, t[1].rx_buf = buf; t[1].rx_nbits = m25p80_rx_nbits(nor); - t[1].len = len; + t[1].len = min(len, spi_max_transfer_size(spi)); spi_message_add_tail(&t[1], &m); - spi_sync(spi, &m); + ret = spi_sync(spi, &m); + if (ret) + return ret; - *retlen = m.actual_length - m25p_cmdsz(nor) - dummy; - return 0; + ret = m.actual_length - m25p_cmdsz(nor) - dummy; + if (ret < 0) + return -EIO; + return ret; } /* diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index d5b870b3fd4e..f5396f26ddb4 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -95,7 +95,7 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op, return -EIO; } - rc = be64_to_cpu(msg.params[1]); + rc = opal_get_async_rc(msg); if (rc == OPAL_SUCCESS) { rc = 0; if (retlen) diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 22f3858c0364..3fad35942895 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -186,7 +186,7 @@ static int of_flash_probe(struct platform_device *dev) * consists internally of 2 non-identical NOR chips on one die. */ p = of_get_property(dp, "reg", &count); - if (count % reg_tuple_size != 0) { + if (!p || count % reg_tuple_size != 0) { dev_err(&dev->dev, "Malformed reg property on %s\n", dev->dev.of_node->full_name); err = -EINVAL; diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index 744ca5cacc9b..f9fa3fad728e 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c @@ -75,15 +75,15 @@ static int __init init_msp_flash(void) printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); - msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); + msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); if (!msp_flash) return -ENOMEM; - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); + msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); + msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 142fc3d79463..784c6e1a0391 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, info->mtd = mtd_concat_create(cdev, info->num_subdev, plat->name); - if (info->mtd == NULL) + if (info->mtd == NULL) { ret = -ENXIO; + goto err; + } } info->mtd->dev.parent = &pdev->dev; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index f05e0e9eb2f7..21ff58099f3b 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC config MTD_NAND_FSL_IFC tristate "NAND support for Freescale IFC controller" - depends on MTD_NAND && FSL_SOC + depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) select FSL_IFC select MEMORY help @@ -539,7 +539,6 @@ config MTD_NAND_FSMC config MTD_NAND_XWAY tristate "Support for NAND on Lantiq XWAY SoC" depends on LANTIQ && SOC_TYPE_XWAY - select MTD_NAND_PLATFORM help Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached to the External Bus Unit (EBU). @@ -563,4 +562,11 @@ config MTD_NAND_QCOM Enables support for NAND flash chips on SoCs containing the EBI2 NAND controller. This controller is found on IPQ806x SoC. +config MTD_NAND_MTK + tristate "Support for NAND controller on MTK SoCs" + depends on HAS_DMA + help + Enables support for NAND controller on MTK SoCs. + This controller is found on mt27xx, mt81xx, mt65xx SoCs. + endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index f55335373f7c..cafde6f3d957 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o +obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index b76ad7c0144f..8eb2c64df38c 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = { [BRCMNAND_FC_BASE] = 0x400, }; +/* BRCMNAND v7.2 */ +static const u16 brcmnand_regs_v72[] = { + [BRCMNAND_CMD_START] = 0x04, + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, + [BRCMNAND_CMD_ADDRESS] = 0x0c, + [BRCMNAND_INTFC_STATUS] = 0x14, + [BRCMNAND_CS_SELECT] = 0x18, + [BRCMNAND_CS_XOR] = 0x1c, + [BRCMNAND_LL_OP] = 0x20, + [BRCMNAND_CS0_BASE] = 0x50, + [BRCMNAND_CS1_BASE] = 0, + [BRCMNAND_CORR_THRESHOLD] = 0xdc, + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, + [BRCMNAND_UNCORR_COUNT] = 0xfc, + [BRCMNAND_CORR_COUNT] = 0x100, + [BRCMNAND_CORR_EXT_ADDR] = 0x10c, + [BRCMNAND_CORR_ADDR] = 0x110, + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, + [BRCMNAND_UNCORR_ADDR] = 0x118, + [BRCMNAND_SEMAPHORE] = 0x150, + [BRCMNAND_ID] = 0x194, + [BRCMNAND_ID_EXT] = 0x198, + [BRCMNAND_LL_RDATA] = 0x19c, + [BRCMNAND_OOB_READ_BASE] = 0x200, + [BRCMNAND_OOB_READ_10_BASE] = 0, + [BRCMNAND_OOB_WRITE_BASE] = 0x400, + [BRCMNAND_OOB_WRITE_10_BASE] = 0, + [BRCMNAND_FC_BASE] = 0x600, +}; + enum brcmnand_cs_reg { BRCMNAND_CS_CFG_EXT = 0, BRCMNAND_CS_CFG, @@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } /* Register offsets */ - if (ctrl->nand_version >= 0x0701) + if (ctrl->nand_version >= 0x0702) + ctrl->reg_offsets = brcmnand_regs_v72; + else if (ctrl->nand_version >= 0x0701) ctrl->reg_offsets = brcmnand_regs_v71; else if (ctrl->nand_version >= 0x0600) ctrl->reg_offsets = brcmnand_regs_v60; @@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } /* Maximum spare area sector size (per 512B) */ - if (ctrl->nand_version >= 0x0600) + if (ctrl->nand_version >= 0x0702) + ctrl->max_oob = 128; + else if (ctrl->nand_version >= 0x0600) ctrl->max_oob = 64; else if (ctrl->nand_version >= 0x0500) ctrl->max_oob = 32; @@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; int cs = host->cs; - if (ctrl->nand_version >= 0x0600) + if (ctrl->nand_version >= 0x0702) + bits = 7; + else if (ctrl->nand_version >= 0x0600) bits = 6; else if (ctrl->nand_version >= 0x0500) bits = 5; else bits = 4; - if (ctrl->nand_version >= 0x0600) { + if (ctrl->nand_version >= 0x0702) { + if (cs >= 4) + reg = BRCMNAND_CORR_THRESHOLD_EXT; + shift = (cs % 4) * bits; + } else if (ctrl->nand_version >= 0x0600) { if (cs >= 5) reg = BRCMNAND_CORR_THRESHOLD_EXT; shift = (cs % 5) * bits; @@ -631,19 +671,28 @@ enum { static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) { - if (ctrl->nand_version >= 0x0600) + if (ctrl->nand_version >= 0x0702) + return GENMASK(7, 0); + else if (ctrl->nand_version >= 0x0600) return GENMASK(6, 0); else return GENMASK(5, 0); } #define NAND_ACC_CONTROL_ECC_SHIFT 16 +#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) { u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; - return mask << NAND_ACC_CONTROL_ECC_SHIFT; + mask <<= NAND_ACC_CONTROL_ECC_SHIFT; + + /* v7.2 includes additional ECC levels */ + if (ctrl->nand_version >= 0x0702) + mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; + + return mask; } static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) @@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) { - if (ctrl->nand_version >= 0x0600) + if (ctrl->nand_version >= 0x0702) + return 9; + else if (ctrl->nand_version >= 0x0600) return 7; else if (ctrl->nand_version >= 0x0500) return 6; @@ -773,10 +824,16 @@ enum brcmnand_llop_type { * Internal support functions ***********************************************************************/ -static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg) +static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, + struct brcmnand_cfg *cfg) { - return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && - cfg->ecc_level == 15; + if (ctrl->nand_version <= 0x0701) + return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && + cfg->ecc_level == 15; + else + return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && + cfg->ecc_level == 15) || + (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); } /* @@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) if (p->sector_size_1k) ecc_level <<= 1; - if (is_hamming_ecc(p)) { + if (is_hamming_ecc(host->ctrl, p)) { ecc->bytes = 3 * sectors; mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); return 0; @@ -1108,7 +1165,7 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) ctrl->cmd_pending = cmd; intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); - BUG_ON(!(intfc & INTFC_CTLR_READY)); + WARN_ON(!(intfc & INTFC_CTLR_READY)); mb(); /* flush previous writes */ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, @@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, return ret; } +/* + * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC + * error + * + * Because the HW ECC signals an ECC error if an erase paged has even a single + * bitflip, we must check each ECC error to see if it is actually an erased + * page with bitflips, not a truly corrupted page. + * + * On a real error, return a negative error code (-EBADMSG for ECC error), and + * buf will contain raw data. + * Otherwise, buf gets filled with 0xffs and return the maximum number of + * bitflips-per-ECC-sector to the caller. + * + */ +static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, + struct nand_chip *chip, void *buf, u64 addr) +{ + int i, sas; + void *oob = chip->oob_poi; + int bitflips = 0; + int page = addr >> chip->page_shift; + int ret; + + if (!buf) { + buf = chip->buffers->databuf; + /* Invalidate page cache */ + chip->pagebuf = -1; + } + + sas = mtd->oobsize / chip->ecc.steps; + + /* read without ecc for verification */ + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); + if (ret) + return ret; + + for (i = 0; i < chip->ecc.steps; i++, oob += sas) { + ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, + oob, sas, NULL, 0, + chip->ecc.strength); + if (ret < 0) + return ret; + + bitflips = max(bitflips, ret); + } + + return bitflips; +} + static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, u64 addr, unsigned int trans, u32 *buf, u8 *oob) { @@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, struct brcmnand_controller *ctrl = host->ctrl; u64 err_addr = 0; int err; + bool retry = true; dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); +try_dmaread: brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { @@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, } if (mtd_is_eccerr(err)) { + /* + * On controller version and 7.0, 7.1 , DMA read after a + * prior PIO read that reported uncorrectable error, + * the DMA engine captures this error following DMA read + * cleared only on subsequent DMA read, so just retry once + * to clear a possible false error reported for current DMA + * read + */ + if ((ctrl->nand_version == 0x0700) || + (ctrl->nand_version == 0x0701)) { + if (retry) { + retry = false; + goto try_dmaread; + } + } + + /* + * Controller version 7.2 has hw encoder to detect erased page + * bitflips, apply sw verification for older controllers only + */ + if (ctrl->nand_version < 0x0702) { + err = brcmstb_nand_verify_erased_page(mtd, chip, buf, + addr); + /* erased page bitflips corrected */ + if (err > 0) + return err; + } + dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", (unsigned long long)err_addr); mtd->ecc_stats.failed++; @@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, return 0; } -static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) +static void brcmnand_print_cfg(struct brcmnand_host *host, + char *buf, struct brcmnand_cfg *cfg) { buf += sprintf(buf, "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", @@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) cfg->spare_area_size, cfg->device_width); /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ - if (is_hamming_ecc(cfg)) + if (is_hamming_ecc(host->ctrl, cfg)) sprintf(buf, ", Hamming ECC"); else if (cfg->sector_size_1k) sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); @@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) brcmnand_set_ecc_enabled(host, 1); - brcmnand_print_cfg(msg, cfg); + brcmnand_print_cfg(host, msg, cfg); dev_info(ctrl->dev, "detected %s\n", msg); /* Configure ACC_CONTROL */ @@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) tmp = nand_readreg(ctrl, offs); tmp &= ~ACC_CONTROL_PARTIAL_PAGE; tmp &= ~ACC_CONTROL_RD_ERASED; + + /* We need to turn on Read from erased paged protected by ECC */ + if (ctrl->nand_version >= 0x0702) + tmp |= ACC_CONTROL_RD_ERASED; tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; if (ctrl->features & BRCMNAND_HAS_PREFETCH) { /* @@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = { { .compatible = "brcm,brcmnand-v6.2" }, { .compatible = "brcm,brcmnand-v7.0" }, { .compatible = "brcm,brcmnand-v7.1" }, + { .compatible = "brcm,brcmnand-v7.2" }, {}, }; MODULE_DEVICE_TABLE(of, brcmnand_of_match); diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c index d74f4ba4a6f4..731c6051d91e 100644 --- a/drivers/mtd/nand/jz4780_bch.c +++ b/drivers/mtd/nand/jz4780_bch.c @@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = { module_platform_driver(jz4780_bch_driver); MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); -MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); +MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index daf3c4217f4d..175f67da25af 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c @@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = { module_platform_driver(jz4780_nand_driver); MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); -MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); +MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c new file mode 100644 index 000000000000..25a4fbd4d24a --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.c @@ -0,0 +1,530 @@ +/* + * MTK ECC controller driver. + * Copyright (C) 2016 MediaTek Inc. + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/mutex.h> + +#include "mtk_ecc.h" + +#define ECC_IDLE_MASK BIT(0) +#define ECC_IRQ_EN BIT(0) +#define ECC_OP_ENABLE (1) +#define ECC_OP_DISABLE (0) + +#define ECC_ENCCON (0x00) +#define ECC_ENCCNFG (0x04) +#define ECC_CNFG_4BIT (0) +#define ECC_CNFG_6BIT (1) +#define ECC_CNFG_8BIT (2) +#define ECC_CNFG_10BIT (3) +#define ECC_CNFG_12BIT (4) +#define ECC_CNFG_14BIT (5) +#define ECC_CNFG_16BIT (6) +#define ECC_CNFG_18BIT (7) +#define ECC_CNFG_20BIT (8) +#define ECC_CNFG_22BIT (9) +#define ECC_CNFG_24BIT (0xa) +#define ECC_CNFG_28BIT (0xb) +#define ECC_CNFG_32BIT (0xc) +#define ECC_CNFG_36BIT (0xd) +#define ECC_CNFG_40BIT (0xe) +#define ECC_CNFG_44BIT (0xf) +#define ECC_CNFG_48BIT (0x10) +#define ECC_CNFG_52BIT (0x11) +#define ECC_CNFG_56BIT (0x12) +#define ECC_CNFG_60BIT (0x13) +#define ECC_MODE_SHIFT (5) +#define ECC_MS_SHIFT (16) +#define ECC_ENCDIADDR (0x08) +#define ECC_ENCIDLE (0x0C) +#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32)) +#define ECC_ENCIRQ_EN (0x80) +#define ECC_ENCIRQ_STA (0x84) +#define ECC_DECCON (0x100) +#define ECC_DECCNFG (0x104) +#define DEC_EMPTY_EN BIT(31) +#define DEC_CNFG_CORRECT (0x3 << 12) +#define ECC_DECIDLE (0x10C) +#define ECC_DECENUM0 (0x114) +#define ERR_MASK (0x3f) +#define ECC_DECDONE (0x124) +#define ECC_DECIRQ_EN (0x200) +#define ECC_DECIRQ_STA (0x204) + +#define ECC_TIMEOUT (500000) + +#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) +#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) +#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \ + ECC_ENCIRQ_EN : ECC_DECIRQ_EN) + +struct mtk_ecc { + struct device *dev; + void __iomem *regs; + struct clk *clk; + + struct completion done; + struct mutex lock; + u32 sectors; +}; + +static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, + enum mtk_ecc_operation op) +{ + struct device *dev = ecc->dev; + u32 val; + int ret; + + ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, + val & ECC_IDLE_MASK, + 10, ECC_TIMEOUT); + if (ret) + dev_warn(dev, "%s NOT idle\n", + op == ECC_ENCODE ? "encoder" : "decoder"); +} + +static irqreturn_t mtk_ecc_irq(int irq, void *id) +{ + struct mtk_ecc *ecc = id; + enum mtk_ecc_operation op; + u32 dec, enc; + + dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN; + if (dec) { + op = ECC_DECODE; + dec = readw(ecc->regs + ECC_DECDONE); + if (dec & ecc->sectors) { + ecc->sectors = 0; + complete(&ecc->done); + } else { + return IRQ_HANDLED; + } + } else { + enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN; + if (enc) { + op = ECC_ENCODE; + complete(&ecc->done); + } else { + return IRQ_NONE; + } + } + + writel(0, ecc->regs + ECC_IRQ_REG(op)); + + return IRQ_HANDLED; +} + +static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) +{ + u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz; + u32 reg; + + switch (config->strength) { + case 4: + ecc_bit = ECC_CNFG_4BIT; + break; + case 6: + ecc_bit = ECC_CNFG_6BIT; + break; + case 8: + ecc_bit = ECC_CNFG_8BIT; + break; + case 10: + ecc_bit = ECC_CNFG_10BIT; + break; + case 12: + ecc_bit = ECC_CNFG_12BIT; + break; + case 14: + ecc_bit = ECC_CNFG_14BIT; + break; + case 16: + ecc_bit = ECC_CNFG_16BIT; + break; + case 18: + ecc_bit = ECC_CNFG_18BIT; + break; + case 20: + ecc_bit = ECC_CNFG_20BIT; + break; + case 22: + ecc_bit = ECC_CNFG_22BIT; + break; + case 24: + ecc_bit = ECC_CNFG_24BIT; + break; + case 28: + ecc_bit = ECC_CNFG_28BIT; + break; + case 32: + ecc_bit = ECC_CNFG_32BIT; + break; + case 36: + ecc_bit = ECC_CNFG_36BIT; + break; + case 40: + ecc_bit = ECC_CNFG_40BIT; + break; + case 44: + ecc_bit = ECC_CNFG_44BIT; + break; + case 48: + ecc_bit = ECC_CNFG_48BIT; + break; + case 52: + ecc_bit = ECC_CNFG_52BIT; + break; + case 56: + ecc_bit = ECC_CNFG_56BIT; + break; + case 60: + ecc_bit = ECC_CNFG_60BIT; + break; + default: + dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n", + config->strength); + } + + if (config->op == ECC_ENCODE) { + /* configure ECC encoder (in bits) */ + enc_sz = config->len << 3; + + reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); + reg |= (enc_sz << ECC_MS_SHIFT); + writel(reg, ecc->regs + ECC_ENCCNFG); + + if (config->mode != ECC_NFI_MODE) + writel(lower_32_bits(config->addr), + ecc->regs + ECC_ENCDIADDR); + + } else { + /* configure ECC decoder (in bits) */ + dec_sz = (config->len << 3) + + config->strength * ECC_PARITY_BITS; + + reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); + reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; + reg |= DEC_EMPTY_EN; + writel(reg, ecc->regs + ECC_DECCNFG); + + if (config->sectors) + ecc->sectors = 1 << (config->sectors - 1); + } +} + +void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, + int sectors) +{ + u32 offset, i, err; + u32 bitflips = 0; + + stats->corrected = 0; + stats->failed = 0; + + for (i = 0; i < sectors; i++) { + offset = (i >> 2) << 2; + err = readl(ecc->regs + ECC_DECENUM0 + offset); + err = err >> ((i % 4) * 8); + err &= ERR_MASK; + if (err == ERR_MASK) { + /* uncorrectable errors */ + stats->failed++; + continue; + } + + stats->corrected += err; + bitflips = max_t(u32, bitflips, err); + } + + stats->bitflips = bitflips; +} +EXPORT_SYMBOL(mtk_ecc_get_stats); + +void mtk_ecc_release(struct mtk_ecc *ecc) +{ + clk_disable_unprepare(ecc->clk); + put_device(ecc->dev); +} +EXPORT_SYMBOL(mtk_ecc_release); + +static void mtk_ecc_hw_init(struct mtk_ecc *ecc) +{ + mtk_ecc_wait_idle(ecc, ECC_ENCODE); + writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); + + mtk_ecc_wait_idle(ecc, ECC_DECODE); + writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); +} + +static struct mtk_ecc *mtk_ecc_get(struct device_node *np) +{ + struct platform_device *pdev; + struct mtk_ecc *ecc; + + pdev = of_find_device_by_node(np); + if (!pdev || !platform_get_drvdata(pdev)) + return ERR_PTR(-EPROBE_DEFER); + + get_device(&pdev->dev); + ecc = platform_get_drvdata(pdev); + clk_prepare_enable(ecc->clk); + mtk_ecc_hw_init(ecc); + + return ecc; +} + +struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) +{ + struct mtk_ecc *ecc = NULL; + struct device_node *np; + + np = of_parse_phandle(of_node, "ecc-engine", 0); + if (np) { + ecc = mtk_ecc_get(np); + of_node_put(np); + } + + return ecc; +} +EXPORT_SYMBOL(of_mtk_ecc_get); + +int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) +{ + enum mtk_ecc_operation op = config->op; + int ret; + + ret = mutex_lock_interruptible(&ecc->lock); + if (ret) { + dev_err(ecc->dev, "interrupted when attempting to lock\n"); + return ret; + } + + mtk_ecc_wait_idle(ecc, op); + mtk_ecc_config(ecc, config); + writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); + + init_completion(&ecc->done); + writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op)); + + return 0; +} +EXPORT_SYMBOL(mtk_ecc_enable); + +void mtk_ecc_disable(struct mtk_ecc *ecc) +{ + enum mtk_ecc_operation op = ECC_ENCODE; + + /* find out the running operation */ + if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) + op = ECC_DECODE; + + /* disable it */ + mtk_ecc_wait_idle(ecc, op); + writew(0, ecc->regs + ECC_IRQ_REG(op)); + writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); + + mutex_unlock(&ecc->lock); +} +EXPORT_SYMBOL(mtk_ecc_disable); + +int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) +{ + int ret; + + ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); + if (!ret) { + dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", + (op == ECC_ENCODE) ? "encoder" : "decoder"); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL(mtk_ecc_wait_done); + +int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, + u8 *data, u32 bytes) +{ + dma_addr_t addr; + u32 *p, len, i; + int ret = 0; + + addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); + ret = dma_mapping_error(ecc->dev, addr); + if (ret) { + dev_err(ecc->dev, "dma mapping error\n"); + return -EINVAL; + } + + config->op = ECC_ENCODE; + config->addr = addr; + ret = mtk_ecc_enable(ecc, config); + if (ret) { + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); + return ret; + } + + ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); + if (ret) + goto timeout; + + mtk_ecc_wait_idle(ecc, ECC_ENCODE); + + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ + len = (config->strength * ECC_PARITY_BITS + 7) >> 3; + p = (u32 *)(data + bytes); + + /* write the parity bytes generated by the ECC back to the OOB region */ + for (i = 0; i < len; i++) + p[i] = readl(ecc->regs + ECC_ENCPAR(i)); +timeout: + + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); + mtk_ecc_disable(ecc); + + return ret; +} +EXPORT_SYMBOL(mtk_ecc_encode); + +void mtk_ecc_adjust_strength(u32 *p) +{ + u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, + 40, 44, 48, 52, 56, 60}; + int i; + + for (i = 0; i < ARRAY_SIZE(ecc); i++) { + if (*p <= ecc[i]) { + if (!i) + *p = ecc[i]; + else if (*p != ecc[i]) + *p = ecc[i - 1]; + return; + } + } + + *p = ecc[ARRAY_SIZE(ecc) - 1]; +} +EXPORT_SYMBOL(mtk_ecc_adjust_strength); + +static int mtk_ecc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_ecc *ecc; + struct resource *res; + int irq, ret; + + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); + if (!ecc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ecc->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(ecc->regs)) { + dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs)); + return PTR_ERR(ecc->regs); + } + + ecc->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ecc->clk)) { + dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); + return PTR_ERR(ecc->clk); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "failed to get irq\n"); + return -EINVAL; + } + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "failed to set DMA mask\n"); + return ret; + } + + ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return -EINVAL; + } + + ecc->dev = dev; + mutex_init(&ecc->lock); + platform_set_drvdata(pdev, ecc); + dev_info(dev, "probed\n"); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_ecc_suspend(struct device *dev) +{ + struct mtk_ecc *ecc = dev_get_drvdata(dev); + + clk_disable_unprepare(ecc->clk); + + return 0; +} + +static int mtk_ecc_resume(struct device *dev) +{ + struct mtk_ecc *ecc = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(ecc->clk); + if (ret) { + dev_err(dev, "failed to enable clk\n"); + return ret; + } + + mtk_ecc_hw_init(ecc); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); +#endif + +static const struct of_device_id mtk_ecc_dt_match[] = { + { .compatible = "mediatek,mt2701-ecc" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); + +static struct platform_driver mtk_ecc_driver = { + .probe = mtk_ecc_probe, + .driver = { + .name = "mtk-ecc", + .of_match_table = of_match_ptr(mtk_ecc_dt_match), +#ifdef CONFIG_PM_SLEEP + .pm = &mtk_ecc_pm_ops, +#endif + }, +}; + +module_platform_driver(mtk_ecc_driver); + +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); +MODULE_DESCRIPTION("MTK Nand ECC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h new file mode 100644 index 000000000000..cbeba5cd1c13 --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.h @@ -0,0 +1,50 @@ +/* + * MTK SDG1 ECC controller + * + * Copyright (c) 2016 Mediatek + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ +#define __DRIVERS_MTD_NAND_MTK_ECC_H__ + +#include <linux/types.h> + +#define ECC_PARITY_BITS (14) + +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; +enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; + +struct device_node; +struct mtk_ecc; + +struct mtk_ecc_stats { + u32 corrected; + u32 bitflips; + u32 failed; +}; + +struct mtk_ecc_config { + enum mtk_ecc_operation op; + enum mtk_ecc_mode mode; + dma_addr_t addr; + u32 strength; + u32 sectors; + u32 len; +}; + +int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); +int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); +void mtk_ecc_disable(struct mtk_ecc *); +void mtk_ecc_adjust_strength(u32 *); + +struct mtk_ecc *of_mtk_ecc_get(struct device_node *); +void mtk_ecc_release(struct mtk_ecc *); + +#endif diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c new file mode 100644 index 000000000000..ddaa2acb9dd7 --- /dev/null +++ b/drivers/mtd/nand/mtk_nand.c @@ -0,0 +1,1526 @@ +/* + * MTK NAND Flash controller driver. + * Copyright (C) 2016 MediaTek Inc. + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/mtd.h> +#include <linux/module.h> +#include <linux/iopoll.h> +#include <linux/of.h> +#include "mtk_ecc.h" + +/* NAND controller register definition */ +#define NFI_CNFG (0x00) +#define CNFG_AHB BIT(0) +#define CNFG_READ_EN BIT(1) +#define CNFG_DMA_BURST_EN BIT(2) +#define CNFG_BYTE_RW BIT(6) +#define CNFG_HW_ECC_EN BIT(8) +#define CNFG_AUTO_FMT_EN BIT(9) +#define CNFG_OP_CUST (6 << 12) +#define NFI_PAGEFMT (0x04) +#define PAGEFMT_FDM_ECC_SHIFT (12) +#define PAGEFMT_FDM_SHIFT (8) +#define PAGEFMT_SPARE_16 (0) +#define PAGEFMT_SPARE_26 (1) +#define PAGEFMT_SPARE_27 (2) +#define PAGEFMT_SPARE_28 (3) +#define PAGEFMT_SPARE_32 (4) +#define PAGEFMT_SPARE_36 (5) +#define PAGEFMT_SPARE_40 (6) +#define PAGEFMT_SPARE_44 (7) +#define PAGEFMT_SPARE_48 (8) +#define PAGEFMT_SPARE_49 (9) +#define PAGEFMT_SPARE_50 (0xa) +#define PAGEFMT_SPARE_51 (0xb) +#define PAGEFMT_SPARE_52 (0xc) +#define PAGEFMT_SPARE_62 (0xd) +#define PAGEFMT_SPARE_63 (0xe) +#define PAGEFMT_SPARE_64 (0xf) +#define PAGEFMT_SPARE_SHIFT (4) +#define PAGEFMT_SEC_SEL_512 BIT(2) +#define PAGEFMT_512_2K (0) +#define PAGEFMT_2K_4K (1) +#define PAGEFMT_4K_8K (2) +#define PAGEFMT_8K_16K (3) +/* NFI control */ +#define NFI_CON (0x08) +#define CON_FIFO_FLUSH BIT(0) +#define CON_NFI_RST BIT(1) +#define CON_BRD BIT(8) /* burst read */ +#define CON_BWR BIT(9) /* burst write */ +#define CON_SEC_SHIFT (12) +/* Timming control register */ +#define NFI_ACCCON (0x0C) +#define NFI_INTR_EN (0x10) +#define INTR_AHB_DONE_EN BIT(6) +#define NFI_INTR_STA (0x14) +#define NFI_CMD (0x20) +#define NFI_ADDRNOB (0x30) +#define NFI_COLADDR (0x34) +#define NFI_ROWADDR (0x38) +#define NFI_STRDATA (0x40) +#define STAR_EN (1) +#define STAR_DE (0) +#define NFI_CNRNB (0x44) +#define NFI_DATAW (0x50) +#define NFI_DATAR (0x54) +#define NFI_PIO_DIRDY (0x58) +#define PIO_DI_RDY (0x01) +#define NFI_STA (0x60) +#define STA_CMD BIT(0) +#define STA_ADDR BIT(1) +#define STA_BUSY BIT(8) +#define STA_EMP_PAGE BIT(12) +#define NFI_FSM_CUSTDATA (0xe << 16) +#define NFI_FSM_MASK (0xf << 16) +#define NFI_ADDRCNTR (0x70) +#define CNTR_MASK GENMASK(16, 12) +#define NFI_STRADDR (0x80) +#define NFI_BYTELEN (0x84) +#define NFI_CSEL (0x90) +#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2) +#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) +#define NFI_FDM_MAX_SIZE (8) +#define NFI_FDM_MIN_SIZE (1) +#define NFI_MASTER_STA (0x224) +#define MASTER_STA_MASK (0x0FFF) +#define NFI_EMPTY_THRESH (0x23C) + +#define MTK_NAME "mtk-nand" +#define KB(x) ((x) * 1024UL) +#define MB(x) (KB(x) * 1024UL) + +#define MTK_TIMEOUT (500000) +#define MTK_RESET_TIMEOUT (1000000) +#define MTK_MAX_SECTOR (16) +#define MTK_NAND_MAX_NSELS (2) + +struct mtk_nfc_bad_mark_ctl { + void (*bm_swap)(struct mtd_info *, u8 *buf, int raw); + u32 sec; + u32 pos; +}; + +/* + * FDM: region used to store free OOB data + */ +struct mtk_nfc_fdm { + u32 reg_size; + u32 ecc_size; +}; + +struct mtk_nfc_nand_chip { + struct list_head node; + struct nand_chip nand; + + struct mtk_nfc_bad_mark_ctl bad_mark; + struct mtk_nfc_fdm fdm; + u32 spare_per_sector; + + int nsels; + u8 sels[0]; + /* nothing after this field */ +}; + +struct mtk_nfc_clk { + struct clk *nfi_clk; + struct clk *pad_clk; +}; + +struct mtk_nfc { + struct nand_hw_control controller; + struct mtk_ecc_config ecc_cfg; + struct mtk_nfc_clk clk; + struct mtk_ecc *ecc; + + struct device *dev; + void __iomem *regs; + + struct completion done; + struct list_head chips; + + u8 *buffer; +}; + +static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) +{ + return container_of(nand, struct mtk_nfc_nand_chip, nand); +} + +static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i) +{ + return (u8 *)p + i * chip->ecc.size; +} + +static inline u8 *oob_ptr(struct nand_chip *chip, int i) +{ + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + u8 *poi; + + /* map the sector's FDM data to free oob: + * the beginning of the oob area stores the FDM data of bad mark sectors + */ + + if (i < mtk_nand->bad_mark.sec) + poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size; + else if (i == mtk_nand->bad_mark.sec) + poi = chip->oob_poi; + else + poi = chip->oob_poi + i * mtk_nand->fdm.reg_size; + + return poi; +} + +static inline int mtk_data_len(struct nand_chip *chip) +{ + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + + return chip->ecc.size + mtk_nand->spare_per_sector; +} + +static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->buffer + i * mtk_data_len(chip); +} + +static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size; +} + +static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg) +{ + writel(val, nfc->regs + reg); +} + +static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg) +{ + writew(val, nfc->regs + reg); +} + +static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg) +{ + writeb(val, nfc->regs + reg); +} + +static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg) +{ + return readl_relaxed(nfc->regs + reg); +} + +static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg) +{ + return readw_relaxed(nfc->regs + reg); +} + +static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg) +{ + return readb_relaxed(nfc->regs + reg); +} + +static void mtk_nfc_hw_reset(struct mtk_nfc *nfc) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + /* reset all registers and force the NFI master to terminate */ + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); + + /* wait for the master to finish the last transaction */ + ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val, + !(val & MASTER_STA_MASK), 50, + MTK_RESET_TIMEOUT); + if (ret) + dev_warn(dev, "master active in reset [0x%x] = 0x%x\n", + NFI_MASTER_STA, val); + + /* ensure any status register affected by the NFI master is reset */ + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); + nfi_writew(nfc, STAR_DE, NFI_STRDATA); +} + +static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + nfi_writel(nfc, command, NFI_CMD); + + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, + !(val & STA_CMD), 10, MTK_TIMEOUT); + if (ret) { + dev_warn(dev, "nfi core timed out entering command mode\n"); + return -EIO; + } + + return 0; +} + +static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + nfi_writel(nfc, addr, NFI_COLADDR); + nfi_writel(nfc, 0, NFI_ROWADDR); + nfi_writew(nfc, 1, NFI_ADDRNOB); + + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, + !(val & STA_ADDR), 10, MTK_TIMEOUT); + if (ret) { + dev_warn(dev, "nfi core timed out entering address mode\n"); + return -EIO; + } + + return 0; +} + +static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + u32 fmt, spare; + + if (!mtd->writesize) + return 0; + + spare = mtk_nand->spare_per_sector; + + switch (mtd->writesize) { + case 512: + fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512; + break; + case KB(2): + if (chip->ecc.size == 512) + fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512; + else + fmt = PAGEFMT_512_2K; + break; + case KB(4): + if (chip->ecc.size == 512) + fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512; + else + fmt = PAGEFMT_2K_4K; + break; + case KB(8): + if (chip->ecc.size == 512) + fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512; + else + fmt = PAGEFMT_4K_8K; + break; + case KB(16): + fmt = PAGEFMT_8K_16K; + break; + default: + dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize); + return -EINVAL; + } + + /* + * the hardware will double the value for this eccsize, so we need to + * halve it + */ + if (chip->ecc.size == 1024) + spare >>= 1; + + switch (spare) { + case 16: + fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT); + break; + case 26: + fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT); + break; + case 27: + fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT); + break; + case 28: + fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT); + break; + case 32: + fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT); + break; + case 36: + fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT); + break; + case 40: + fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT); + break; + case 44: + fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT); + break; + case 48: + fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT); + break; + case 49: + fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT); + break; + case 50: + fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT); + break; + case 51: + fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT); + break; + case 52: + fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT); + break; + case 62: + fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT); + break; + case 63: + fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT); + break; + case 64: + fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT); + break; + default: + dev_err(nfc->dev, "invalid spare per sector %d\n", spare); + return -EINVAL; + } + + fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT; + fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT; + nfi_writew(nfc, fmt, NFI_PAGEFMT); + + nfc->ecc_cfg.strength = chip->ecc.strength; + nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size; + + return 0; +} + +static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct mtk_nfc *nfc = nand_get_controller_data(nand); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); + + if (chip < 0) + return; + + mtk_nfc_hw_runtime_config(mtd); + + nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL); +} + +static int mtk_nfc_dev_ready(struct mtd_info *mtd) +{ + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + + if (nfi_readl(nfc, NFI_STA) & STA_BUSY) + return 0; + + return 1; +} + +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) +{ + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + + if (ctrl & NAND_ALE) { + mtk_nfc_send_address(nfc, dat); + } else if (ctrl & NAND_CLE) { + mtk_nfc_hw_reset(nfc); + + nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); + mtk_nfc_send_command(nfc, dat); + } +} + +static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc) +{ + int rc; + u8 val; + + rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val, + val & PIO_DI_RDY, 10, MTK_TIMEOUT); + if (rc < 0) + dev_err(nfc->dev, "data not ready\n"); +} + +static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + u32 reg; + + /* after each byte read, the NFI_STA reg is reset by the hardware */ + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; + if (reg != NFI_FSM_CUSTDATA) { + reg = nfi_readw(nfc, NFI_CNFG); + reg |= CNFG_BYTE_RW | CNFG_READ_EN; + nfi_writew(nfc, reg, NFI_CNFG); + + /* + * set to max sector to allow the HW to continue reading over + * unaligned accesses + */ + reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD; + nfi_writel(nfc, reg, NFI_CON); + + /* trigger to fetch data */ + nfi_writew(nfc, STAR_EN, NFI_STRDATA); + } + + mtk_nfc_wait_ioready(nfc); + + return nfi_readb(nfc, NFI_DATAR); +} + +static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + buf[i] = mtk_nfc_read_byte(mtd); +} + +static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte) +{ + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + u32 reg; + + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; + + if (reg != NFI_FSM_CUSTDATA) { + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW; + nfi_writew(nfc, reg, NFI_CNFG); + + reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR; + nfi_writel(nfc, reg, NFI_CON); + + nfi_writew(nfc, STAR_EN, NFI_STRDATA); + } + + mtk_nfc_wait_ioready(nfc); + nfi_writeb(nfc, byte, NFI_DATAW); +} + +static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + mtk_nfc_write_byte(mtd, buf[i]); +} + +static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + int size = chip->ecc.size + mtk_nand->fdm.reg_size; + + nfc->ecc_cfg.mode = ECC_DMA_MODE; + nfc->ecc_cfg.op = ECC_ENCODE; + + return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size); +} + +static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c) +{ + /* nop */ +} + +static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip); + u32 bad_pos = nand->bad_mark.pos; + + if (raw) + bad_pos += nand->bad_mark.sec * mtk_data_len(chip); + else + bad_pos += nand->bad_mark.sec * chip->ecc.size; + + swap(chip->oob_poi[0], buf[bad_pos]); +} + +static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset, + u32 len, const u8 *buf) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + u32 start, end; + int i, ret; + + start = offset / chip->ecc.size; + end = DIV_ROUND_UP(offset + len, chip->ecc.size); + + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); + for (i = 0; i < chip->ecc.steps; i++) { + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), + chip->ecc.size); + + if (start > i || i >= end) + continue; + + if (i == mtk_nand->bad_mark.sec) + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); + + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); + + /* program the CRC back to the OOB */ + ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i)); + if (ret < 0) + return ret; + } + + return 0; +} + +static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + u32 i; + + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); + for (i = 0; i < chip->ecc.steps; i++) { + if (buf) + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), + chip->ecc.size); + + if (i == mtk_nand->bad_mark.sec) + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); + + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); + } +} + +static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start, + u32 sectors) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + u32 vall, valm; + u8 *oobptr; + int i, j; + + for (i = 0; i < sectors; i++) { + oobptr = oob_ptr(chip, start + i); + vall = nfi_readl(nfc, NFI_FDML(i)); + valm = nfi_readl(nfc, NFI_FDMM(i)); + + for (j = 0; j < fdm->reg_size; j++) + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); + } +} + +static inline void mtk_nfc_write_fdm(struct nand_chip *chip) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + u32 vall, valm; + u8 *oobptr; + int i, j; + + for (i = 0; i < chip->ecc.steps; i++) { + oobptr = oob_ptr(chip, i); + vall = 0; + valm = 0; + for (j = 0; j < 8; j++) { + if (j < 4) + vall |= (j < fdm->reg_size ? oobptr[j] : 0xff) + << (j * 8); + else + valm |= (j < fdm->reg_size ? oobptr[j] : 0xff) + << ((j - 4) * 8); + } + nfi_writel(nfc, vall, NFI_FDML(i)); + nfi_writel(nfc, valm, NFI_FDMM(i)); + } +} + +static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, int page, int len) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct device *dev = nfc->dev; + dma_addr_t addr; + u32 reg; + int ret; + + addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE); + ret = dma_mapping_error(nfc->dev, addr); + if (ret) { + dev_err(nfc->dev, "dma mapping error\n"); + return -EINVAL; + } + + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN; + nfi_writew(nfc, reg, NFI_CNFG); + + nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON); + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); + + init_completion(&nfc->done); + + reg = nfi_readl(nfc, NFI_CON) | CON_BWR; + nfi_writel(nfc, reg, NFI_CON); + nfi_writew(nfc, STAR_EN, NFI_STRDATA); + + ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); + if (!ret) { + dev_err(dev, "program ahb done timeout\n"); + nfi_writew(nfc, 0, NFI_INTR_EN); + ret = -ETIMEDOUT; + goto timeout; + } + + ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, + (reg & CNTR_MASK) >= chip->ecc.steps, + 10, MTK_TIMEOUT); + if (ret) + dev_err(dev, "hwecc write timeout\n"); + +timeout: + + dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE); + nfi_writel(nfc, 0, NFI_CON); + + return ret; +} + +static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, int page, int raw) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + size_t len; + const u8 *bufpoi; + u32 reg; + int ret; + + if (!raw) { + /* OOB => FDM: from register, ECC: from HW */ + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN; + nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG); + + nfc->ecc_cfg.op = ECC_ENCODE; + nfc->ecc_cfg.mode = ECC_NFI_MODE; + ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); + if (ret) { + /* clear NFI config */ + reg = nfi_readw(nfc, NFI_CNFG); + reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); + nfi_writew(nfc, reg, NFI_CNFG); + + return ret; + } + + memcpy(nfc->buffer, buf, mtd->writesize); + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw); + bufpoi = nfc->buffer; + + /* write OOB into the FDM registers (OOB area in MTK NAND) */ + mtk_nfc_write_fdm(chip); + } else { + bufpoi = buf; + } + + len = mtd->writesize + (raw ? mtd->oobsize : 0); + ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len); + + if (!raw) + mtk_ecc_disable(nfc->ecc); + + return ret; +} + +static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, const u8 *buf, + int oob_on, int page) +{ + return mtk_nfc_write_page(mtd, chip, buf, page, 0); +} + +static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, int oob_on, int pg) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + + mtk_nfc_format_page(mtd, buf); + return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1); +} + +static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, u32 offset, + u32 data_len, const u8 *buf, + int oob_on, int page) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + int ret; + + ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf); + if (ret < 0) + return ret; + + /* use the data in the private buffer (now with FDM and CRC) */ + return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1); +} + +static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + int ret; + + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); + + ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page); + if (ret < 0) + return -EIO; + + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + ret = chip->waitfunc(mtd, chip); + + return ret & NAND_STATUS_FAIL ? -EIO : 0; +} + +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_ecc_stats stats; + int rc, i; + + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; + if (rc) { + memset(buf, 0xff, sectors * chip->ecc.size); + for (i = 0; i < sectors; i++) + memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); + return 0; + } + + mtk_ecc_get_stats(nfc->ecc, &stats, sectors); + mtd->ecc_stats.corrected += stats.corrected; + mtd->ecc_stats.failed += stats.failed; + + return stats.bitflips; +} + +static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, + u32 data_offs, u32 readlen, + u8 *bufpoi, int page, int raw) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + u32 spare = mtk_nand->spare_per_sector; + u32 column, sectors, start, end, reg; + dma_addr_t addr; + int bitflips; + size_t len; + u8 *buf; + int rc; + + start = data_offs / chip->ecc.size; + end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); + + sectors = end - start; + column = start * (chip->ecc.size + spare); + + len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); + buf = bufpoi + start * chip->ecc.size; + + if (column != 0) + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1); + + addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE); + rc = dma_mapping_error(nfc->dev, addr); + if (rc) { + dev_err(nfc->dev, "dma mapping error\n"); + + return -EINVAL; + } + + reg = nfi_readw(nfc, NFI_CNFG); + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB; + if (!raw) { + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN; + nfi_writew(nfc, reg, NFI_CNFG); + + nfc->ecc_cfg.mode = ECC_NFI_MODE; + nfc->ecc_cfg.sectors = sectors; + nfc->ecc_cfg.op = ECC_DECODE; + rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); + if (rc) { + dev_err(nfc->dev, "ecc enable\n"); + /* clear NFI_CNFG */ + reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN | + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); + nfi_writew(nfc, reg, NFI_CNFG); + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); + + return rc; + } + } else { + nfi_writew(nfc, reg, NFI_CNFG); + } + + nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON); + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); + + init_completion(&nfc->done); + reg = nfi_readl(nfc, NFI_CON) | CON_BRD; + nfi_writel(nfc, reg, NFI_CON); + nfi_writew(nfc, STAR_EN, NFI_STRDATA); + + rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); + if (!rc) + dev_warn(nfc->dev, "read ahb/dma done timeout\n"); + + rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, + (reg & CNTR_MASK) >= sectors, 10, + MTK_TIMEOUT); + if (rc < 0) { + dev_err(nfc->dev, "subpage done timeout\n"); + bitflips = -EIO; + } else { + bitflips = 0; + if (!raw) { + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); + bitflips = rc < 0 ? -ETIMEDOUT : + mtk_nfc_update_ecc_stats(mtd, buf, sectors); + mtk_nfc_read_fdm(chip, start, sectors); + } + } + + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); + + if (raw) + goto done; + + mtk_ecc_disable(nfc->ecc); + + if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec) + mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw); +done: + nfi_writel(nfc, 0, NFI_CON); + + return bitflips; +} + +static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, u32 off, + u32 len, u8 *p, int pg) +{ + return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0); +} + +static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, u8 *p, + int oob_on, int pg) +{ + return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0); +} + +static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, int oob_on, int page) +{ + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc *nfc = nand_get_controller_data(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + int i, ret; + + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); + ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer, + page, 1); + if (ret < 0) + return ret; + + for (i = 0; i < chip->ecc.steps; i++) { + memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size); + + if (i == mtk_nand->bad_mark.sec) + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); + + if (buf) + memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i), + chip->ecc.size); + } + + return ret; +} + +static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + + return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page); +} + +static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc) +{ + /* + * ACCON: access timing control register + * ------------------------------------- + * 31:28: minimum required time for CS post pulling down after accessing + * the device + * 27:22: minimum required time for CS pre pulling down before accessing + * the device + * 21:16: minimum required time from NCEB low to NREB low + * 15:12: minimum required time from NWEB high to NREB low. + * 11:08: write enable hold time + * 07:04: write wait states + * 03:00: read wait states + */ + nfi_writel(nfc, 0x10804211, NFI_ACCCON); + + /* + * CNRNB: nand ready/busy register + * ------------------------------- + * 7:4: timeout register for polling the NAND busy/ready signal + * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. + */ + nfi_writew(nfc, 0xf1, NFI_CNRNB); + nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); + + mtk_nfc_hw_reset(nfc); + + nfi_readl(nfc, NFI_INTR_STA); + nfi_writel(nfc, 0, NFI_INTR_EN); +} + +static irqreturn_t mtk_nfc_irq(int irq, void *id) +{ + struct mtk_nfc *nfc = id; + u16 sta, ien; + + sta = nfi_readw(nfc, NFI_INTR_STA); + ien = nfi_readw(nfc, NFI_INTR_EN); + + if (!(sta & ien)) + return IRQ_NONE; + + nfi_writew(nfc, ~sta & ien, NFI_INTR_EN); + complete(&nfc->done); + + return IRQ_HANDLED; +} + +static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk) +{ + int ret; + + ret = clk_prepare_enable(clk->nfi_clk); + if (ret) { + dev_err(dev, "failed to enable nfi clk\n"); + return ret; + } + + ret = clk_prepare_enable(clk->pad_clk); + if (ret) { + dev_err(dev, "failed to enable pad clk\n"); + clk_disable_unprepare(clk->nfi_clk); + return ret; + } + + return 0; +} + +static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk) +{ + clk_disable_unprepare(clk->nfi_clk); + clk_disable_unprepare(clk->pad_clk); +} + +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; + u32 eccsteps; + + eccsteps = mtd->writesize / chip->ecc.size; + + if (section >= eccsteps) + return -ERANGE; + + oob_region->length = fdm->reg_size - fdm->ecc_size; + oob_region->offset = section * fdm->reg_size + fdm->ecc_size; + + return 0; +} + +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); + u32 eccsteps; + + if (section) + return -ERANGE; + + eccsteps = mtd->writesize / chip->ecc.size; + oob_region->offset = mtk_nand->fdm.reg_size * eccsteps; + oob_region->length = mtd->oobsize - oob_region->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = { + .free = mtk_nfc_ooblayout_free, + .ecc = mtk_nfc_ooblayout_ecc, +}; + +static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand); + u32 ecc_bytes; + + ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); + + fdm->reg_size = chip->spare_per_sector - ecc_bytes; + if (fdm->reg_size > NFI_FDM_MAX_SIZE) + fdm->reg_size = NFI_FDM_MAX_SIZE; + + /* bad block mark storage */ + fdm->ecc_size = 1; +} + +static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl, + struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (mtd->writesize == 512) { + bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap; + } else { + bm_ctl->bm_swap = mtk_nfc_bad_mark_swap; + bm_ctl->sec = mtd->writesize / mtk_data_len(nand); + bm_ctl->pos = mtd->writesize % mtk_data_len(nand); + } +} + +static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44, + 48, 49, 50, 51, 52, 62, 63, 64}; + u32 eccsteps, i; + + eccsteps = mtd->writesize / nand->ecc.size; + *sps = mtd->oobsize / eccsteps; + + if (nand->ecc.size == 1024) + *sps >>= 1; + + for (i = 0; i < ARRAY_SIZE(spare); i++) { + if (*sps <= spare[i]) { + if (!i) + *sps = spare[i]; + else if (*sps != spare[i]) + *sps = spare[i - 1]; + break; + } + } + + if (i >= ARRAY_SIZE(spare)) + *sps = spare[ARRAY_SIZE(spare) - 1]; + + if (nand->ecc.size == 1024) + *sps <<= 1; +} + +static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + u32 spare; + int free; + + /* support only ecc hw mode */ + if (nand->ecc.mode != NAND_ECC_HW) { + dev_err(dev, "ecc.mode not supported\n"); + return -EINVAL; + } + + /* if optional dt settings not present */ + if (!nand->ecc.size || !nand->ecc.strength) { + /* use datasheet requirements */ + nand->ecc.strength = nand->ecc_strength_ds; + nand->ecc.size = nand->ecc_step_ds; + + /* + * align eccstrength and eccsize + * this controller only supports 512 and 1024 sizes + */ + if (nand->ecc.size < 1024) { + if (mtd->writesize > 512) { + nand->ecc.size = 1024; + nand->ecc.strength <<= 1; + } else { + nand->ecc.size = 512; + } + } else { + nand->ecc.size = 1024; + } + + mtk_nfc_set_spare_per_sector(&spare, mtd); + + /* calculate oob bytes except ecc parity data */ + free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; + free = spare - free; + + /* + * enhance ecc strength if oob left is bigger than max FDM size + * or reduce ecc strength if oob size is not enough for ecc + * parity data. + */ + if (free > NFI_FDM_MAX_SIZE) { + spare -= NFI_FDM_MAX_SIZE; + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; + } else if (free < 0) { + spare -= NFI_FDM_MIN_SIZE; + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; + } + } + + mtk_ecc_adjust_strength(&nand->ecc.strength); + + dev_info(dev, "eccsize %d eccstrength %d\n", + nand->ecc.size, nand->ecc.strength); + + return 0; +} + +static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, + struct device_node *np) +{ + struct mtk_nfc_nand_chip *chip; + struct nand_chip *nand; + struct mtd_info *mtd; + int nsels, len; + u32 tmp; + int ret; + int i; + + if (!of_get_property(np, "reg", &nsels)) + return -ENODEV; + + nsels /= sizeof(u32); + if (!nsels || nsels > MTK_NAND_MAX_NSELS) { + dev_err(dev, "invalid reg property size %d\n", nsels); + return -EINVAL; + } + + chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8), + GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->nsels = nsels; + for (i = 0; i < nsels; i++) { + ret = of_property_read_u32_index(np, "reg", i, &tmp); + if (ret) { + dev_err(dev, "reg property failure : %d\n", ret); + return ret; + } + chip->sels[i] = tmp; + } + + nand = &chip->nand; + nand->controller = &nfc->controller; + + nand_set_flash_node(nand, np); + nand_set_controller_data(nand, nfc); + + nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; + nand->dev_ready = mtk_nfc_dev_ready; + nand->select_chip = mtk_nfc_select_chip; + nand->write_byte = mtk_nfc_write_byte; + nand->write_buf = mtk_nfc_write_buf; + nand->read_byte = mtk_nfc_read_byte; + nand->read_buf = mtk_nfc_read_buf; + nand->cmd_ctrl = mtk_nfc_cmd_ctrl; + + /* set default mode in case dt entry is missing */ + nand->ecc.mode = NAND_ECC_HW; + + nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc; + nand->ecc.write_page_raw = mtk_nfc_write_page_raw; + nand->ecc.write_page = mtk_nfc_write_page_hwecc; + nand->ecc.write_oob_raw = mtk_nfc_write_oob_std; + nand->ecc.write_oob = mtk_nfc_write_oob_std; + + nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc; + nand->ecc.read_page_raw = mtk_nfc_read_page_raw; + nand->ecc.read_page = mtk_nfc_read_page_hwecc; + nand->ecc.read_oob_raw = mtk_nfc_read_oob_std; + nand->ecc.read_oob = mtk_nfc_read_oob_std; + + mtd = nand_to_mtd(nand); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + mtd->name = MTK_NAME; + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops); + + mtk_nfc_hw_init(nfc); + + ret = nand_scan_ident(mtd, nsels, NULL); + if (ret) + return -ENODEV; + + /* store bbt magic in page, cause OOB is not protected */ + if (nand->bbt_options & NAND_BBT_USE_FLASH) + nand->bbt_options |= NAND_BBT_NO_OOB; + + ret = mtk_nfc_ecc_init(dev, mtd); + if (ret) + return -EINVAL; + + if (nand->options & NAND_BUSWIDTH_16) { + dev_err(dev, "16bits buswidth not supported"); + return -EINVAL; + } + + mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); + mtk_nfc_set_fdm(&chip->fdm, mtd); + mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd); + + len = mtd->writesize + mtd->oobsize; + nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL); + if (!nfc->buffer) + return -ENOMEM; + + ret = nand_scan_tail(mtd); + if (ret) + return -ENODEV; + + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); + if (ret) { + dev_err(dev, "mtd parse partition error\n"); + nand_release(mtd); + return ret; + } + + list_add_tail(&chip->node, &nfc->chips); + + return 0; +} + +static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) +{ + struct device_node *np = dev->of_node; + struct device_node *nand_np; + int ret; + + for_each_child_of_node(np, nand_np) { + ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { + of_node_put(nand_np); + return ret; + } + } + + return 0; +} + +static int mtk_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct mtk_nfc *nfc; + struct resource *res; + int ret, irq; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + spin_lock_init(&nfc->controller.lock); + init_waitqueue_head(&nfc->controller.wq); + INIT_LIST_HEAD(&nfc->chips); + + /* probe defer if not ready */ + nfc->ecc = of_mtk_ecc_get(np); + if (IS_ERR(nfc->ecc)) + return PTR_ERR(nfc->ecc); + else if (!nfc->ecc) + return -ENODEV; + + nfc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nfc->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->regs)) { + ret = PTR_ERR(nfc->regs); + dev_err(dev, "no nfi base\n"); + goto release_ecc; + } + + nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk"); + if (IS_ERR(nfc->clk.nfi_clk)) { + dev_err(dev, "no clk\n"); + ret = PTR_ERR(nfc->clk.nfi_clk); + goto release_ecc; + } + + nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk"); + if (IS_ERR(nfc->clk.pad_clk)) { + dev_err(dev, "no pad clk\n"); + ret = PTR_ERR(nfc->clk.pad_clk); + goto release_ecc; + } + + ret = mtk_nfc_enable_clk(dev, &nfc->clk); + if (ret) + goto release_ecc; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no nfi irq resource\n"); + ret = -EINVAL; + goto clk_disable; + } + + ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc); + if (ret) { + dev_err(dev, "failed to request nfi irq\n"); + goto clk_disable; + } + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "failed to set dma mask\n"); + goto clk_disable; + } + + platform_set_drvdata(pdev, nfc); + + ret = mtk_nfc_nand_chips_init(dev, nfc); + if (ret) { + dev_err(dev, "failed to init nand chips\n"); + goto clk_disable; + } + + return 0; + +clk_disable: + mtk_nfc_disable_clk(&nfc->clk); + +release_ecc: + mtk_ecc_release(nfc->ecc); + + return ret; +} + +static int mtk_nfc_remove(struct platform_device *pdev) +{ + struct mtk_nfc *nfc = platform_get_drvdata(pdev); + struct mtk_nfc_nand_chip *chip; + + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, + node); + nand_release(nand_to_mtd(&chip->nand)); + list_del(&chip->node); + } + + mtk_ecc_release(nfc->ecc); + mtk_nfc_disable_clk(&nfc->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_nfc_suspend(struct device *dev) +{ + struct mtk_nfc *nfc = dev_get_drvdata(dev); + + mtk_nfc_disable_clk(&nfc->clk); + + return 0; +} + +static int mtk_nfc_resume(struct device *dev) +{ + struct mtk_nfc *nfc = dev_get_drvdata(dev); + struct mtk_nfc_nand_chip *chip; + struct nand_chip *nand; + struct mtd_info *mtd; + int ret; + u32 i; + + udelay(200); + + ret = mtk_nfc_enable_clk(dev, &nfc->clk); + if (ret) + return ret; + + mtk_nfc_hw_init(nfc); + + /* reset NAND chip if VCC was powered off */ + list_for_each_entry(chip, &nfc->chips, node) { + nand = &chip->nand; + mtd = nand_to_mtd(nand); + for (i = 0; i < chip->nsels; i++) { + nand->select_chip(mtd, i); + nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + } + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); +#endif + +static const struct of_device_id mtk_nfc_id_table[] = { + { .compatible = "mediatek,mt2701-nfc" }, + {} +}; +MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); + +static struct platform_driver mtk_nfc_driver = { + .probe = mtk_nfc_probe, + .remove = mtk_nfc_remove, + .driver = { + .name = MTK_NAME, + .of_match_table = mtk_nfc_id_table, +#ifdef CONFIG_PM_SLEEP + .pm = &mtk_nfc_pm_ops, +#endif + }, +}; + +module_platform_driver(mtk_nfc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver"); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 0b0dc29d2af7..77533f7f2429 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, int cached = writelen > bytes && page != blockmask; uint8_t *wbuf = buf; int use_bufpoi; - int part_pagewr = (column || writelen < (mtd->writesize - 1)); + int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) use_bufpoi = 1; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index ccc05f5b2695..2af9869a115e 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = { /* Manufacturer IDs */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, + {NAND_MFR_ESMT, "ESMT"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index a136da8df6fe..a59361c36f40 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -118,8 +118,6 @@ #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) #define STATUS_BUFF_EMPTY 0x00000001 -#define OMAP24XX_DMA_GPMC 4 - #define SECTOR_BYTES 512 /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ #define BCH4_BIT_PAD 4 @@ -1811,7 +1809,6 @@ static int omap_nand_probe(struct platform_device *pdev) struct nand_chip *nand_chip; int err; dma_cap_mask_t mask; - unsigned sig; struct resource *res; struct device *dev = &pdev->dev; int min_oobbytes = BADBLOCK_MARKER_LENGTH; @@ -1924,11 +1921,11 @@ static int omap_nand_probe(struct platform_device *pdev) case NAND_OMAP_PREFETCH_DMA: dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - sig = OMAP24XX_DMA_GPMC; - info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); - if (!info->dma) { + info->dma = dma_request_chan(pdev->dev.parent, "rxtx"); + + if (IS_ERR(info->dma)) { dev_err(&pdev->dev, "DMA engine request failed\n"); - err = -ENXIO; + err = PTR_ERR(info->dma); goto return_error; } else { struct dma_slave_config cfg; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index a83a690688b4..e414b31b71c1 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -39,6 +39,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/iopoll.h> +#include <linux/reset.h> #define NFC_REG_CTL 0x0000 #define NFC_REG_ST 0x0004 @@ -153,6 +154,7 @@ /* define bit use in NFC_ECC_ST */ #define NFC_ECC_ERR(x) BIT(x) +#define NFC_ECC_ERR_MSK GENMASK(15, 0) #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) @@ -269,10 +271,12 @@ struct sunxi_nfc { void __iomem *regs; struct clk *ahb_clk; struct clk *mod_clk; + struct reset_control *reset; unsigned long assigned_cs; unsigned long clk_rate; struct list_head chips; struct completion complete; + struct dma_chan *dmac; }; static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) @@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc) return ret; } +static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, + int chunksize, int nchunks, + enum dma_data_direction ddir, + struct scatterlist *sg) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct dma_async_tx_descriptor *dmad; + enum dma_transfer_direction tdir; + dma_cookie_t dmat; + int ret; + + if (ddir == DMA_FROM_DEVICE) + tdir = DMA_DEV_TO_MEM; + else + tdir = DMA_MEM_TO_DEV; + + sg_init_one(sg, buf, nchunks * chunksize); + ret = dma_map_sg(nfc->dev, sg, 1, ddir); + if (!ret) + return -ENOMEM; + + dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); + if (!dmad) { + ret = -EINVAL; + goto err_unmap_buf; + } + + writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, + nfc->regs + NFC_REG_CTL); + writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); + writel(chunksize, nfc->regs + NFC_REG_CNT); + dmat = dmaengine_submit(dmad); + + ret = dma_submit_error(dmat); + if (ret) + goto err_clr_dma_flag; + + return 0; + +err_clr_dma_flag: + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, + nfc->regs + NFC_REG_CTL); + +err_unmap_buf: + dma_unmap_sg(nfc->dev, sg, 1, ddir); + return ret; +} + +static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, + enum dma_data_direction ddir, + struct scatterlist *sg) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + dma_unmap_sg(nfc->dev, sg, 1, ddir); + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, + nfc->regs + NFC_REG_CTL); +} + static int sunxi_nfc_dev_ready(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); @@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, } static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, - int step, bool *erased) + int step, u32 status, bool *erased) { struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; - u32 status, tmp; + u32 tmp; *erased = false; - status = readl(nfc->regs + NFC_REG_ECC_ST); - if (status & NFC_ECC_ERR(step)) return -EBADMSG; @@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, *cur_off = oob_off + ecc->bytes + 4; ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, + readl(nfc->regs + NFC_REG_ECC_ST), &erased); if (erased) return 1; @@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, *cur_off = mtd->oobsize + mtd->writesize; } +static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, + int oob_required, int page, + int nchunks) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + bool randomized = nand->options & NAND_NEED_SCRAMBLING; + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct nand_ecc_ctrl *ecc = &nand->ecc; + unsigned int max_bitflips = 0; + int ret, i, raw_mode = 0; + struct scatterlist sg; + u32 status; + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, + DMA_FROM_DEVICE, &sg); + if (ret) + return ret; + + sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_randomizer_config(mtd, page, false); + sunxi_nfc_randomizer_enable(mtd); + + writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | + NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); + + dma_async_issue_pending(nfc->dmac); + + writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, + nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); + if (ret) + dmaengine_terminate_all(nfc->dmac); + + sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_hw_ecc_disable(mtd); + + sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); + + if (ret) + return ret; + + status = readl(nfc->regs + NFC_REG_ECC_ST); + + for (i = 0; i < nchunks; i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + u8 *data = buf + data_off; + u8 *oob = nand->oob_poi + oob_off; + bool erased; + + ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, + oob_required ? oob : NULL, + i, status, &erased); + + /* ECC errors are handled in the second loop. */ + if (ret < 0) + continue; + + if (oob_required && !erased) { + /* TODO: use DMA to retrieve OOB */ + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, + mtd->writesize + oob_off, -1); + nand->read_buf(mtd, oob, ecc->bytes + 4); + + sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, + !i, page); + } + + if (erased) + raw_mode = 1; + + sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + } + + if (status & NFC_ECC_ERR_MSK) { + for (i = 0; i < nchunks; i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + u8 *data = buf + data_off; + u8 *oob = nand->oob_poi + oob_off; + + if (!(status & NFC_ECC_ERR(i))) + continue; + + /* + * Re-read the data with the randomizer disabled to + * identify bitflips in erased pages. + */ + if (randomized) { + /* TODO: use DMA to read page in raw mode */ + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, + data_off, -1); + nand->read_buf(mtd, data, ecc->size); + } + + /* TODO: use DMA to retrieve OOB */ + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, + mtd->writesize + oob_off, -1); + nand->read_buf(mtd, oob, ecc->bytes + 4); + + ret = nand_check_erased_ecc_chunk(data, ecc->size, + oob, ecc->bytes + 4, + NULL, 0, + ecc->strength); + if (ret >= 0) + raw_mode = 1; + + sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + } + } + + if (oob_required) + sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, + NULL, !raw_mode, + page); + + return max_bitflips; +} + static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, const u8 *data, int data_off, const u8 *oob, int oob_off, @@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, return max_bitflips; } +static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd, + struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + int ret; + + ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, + chip->ecc.steps); + if (ret >= 0) + return ret; + + /* Fallback to PIO mode */ + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); + + return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page); +} + static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u32 data_offs, u32 readlen, @@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, return max_bitflips; } +static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd, + struct nand_chip *chip, + u32 data_offs, u32 readlen, + u8 *buf, int page) +{ + int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); + int ret; + + ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); + if (ret >= 0) + return ret; + + /* Fallback to PIO mode */ + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); + + return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen, + buf, page); +} + static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, @@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, return 0; } +static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, + struct nand_chip *chip, + u32 data_offs, u32 data_len, + const u8 *buf, int oob_required, + int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = data_offs / ecc->size; + i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + const u8 *data = buf + data_off; + const u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + oob_off + mtd->writesize, + &cur_off, !i, page); + if (ret) + return ret; + } + + sunxi_nfc_hw_ecc_disable(mtd); + + return 0; +} + +static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, + int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct nand_ecc_ctrl *ecc = &nand->ecc; + struct scatterlist sg; + int ret, i; + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, + DMA_TO_DEVICE, &sg); + if (ret) + goto pio_fallback; + + for (i = 0; i < ecc->steps; i++) { + const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); + + sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); + } + + sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_randomizer_config(mtd, page, false); + sunxi_nfc_randomizer_enable(mtd); + + writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, + nfc->regs + NFC_REG_RCMD_SET); + + dma_async_issue_pending(nfc->dmac); + + writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | + NFC_DATA_TRANS | NFC_ACCESS_DIR, + nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); + if (ret) + dmaengine_terminate_all(nfc->dmac); + + sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_hw_ecc_disable(mtd); + + sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); + + if (ret) + return ret; + + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + /* TODO: use DMA to transfer extra OOB bytes ? */ + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + NULL, page); + + return 0; + +pio_fallback: + return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page); +} + static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, @@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, int ret; int i; + if (ecc->size != 512 && ecc->size != 1024) + return -EINVAL; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + /* Prefer 1k ECC chunk over 512 ones */ + if (ecc->size == 512 && mtd->writesize > 512) { + ecc->size = 1024; + ecc->strength *= 2; + } + /* Add ECC info retrieval from DT */ for (i = 0; i < ARRAY_SIZE(strengths); i++) { if (ecc->strength <= strengths[i]) @@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, struct device_node *np) { + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); int ret; ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); if (ret) return ret; - ecc->read_page = sunxi_nfc_hw_ecc_read_page; - ecc->write_page = sunxi_nfc_hw_ecc_write_page; + if (nfc->dmac) { + ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; + ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; + nand->options |= NAND_USE_BOUNCE_BUFFER; + } else { + ecc->read_page = sunxi_nfc_hw_ecc_read_page; + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; + ecc->write_page = sunxi_nfc_hw_ecc_write_page; + } + + /* TODO: support DMA for raw accesses and subpage write */ + ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; ecc->read_oob_raw = nand_read_oob_std; ecc->write_oob_raw = nand_write_oob_std; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; @@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev) if (ret) goto out_ahb_clk_unprepare; + nfc->reset = devm_reset_control_get_optional(dev, "ahb"); + if (!IS_ERR(nfc->reset)) { + ret = reset_control_deassert(nfc->reset); + if (ret) { + dev_err(dev, "reset err %d\n", ret); + goto out_mod_clk_unprepare; + } + } else if (PTR_ERR(nfc->reset) != -ENOENT) { + ret = PTR_ERR(nfc->reset); + goto out_mod_clk_unprepare; + } + ret = sunxi_nfc_rst(nfc); if (ret) - goto out_mod_clk_unprepare; + goto out_ahb_reset_reassert; writel(0, nfc->regs + NFC_REG_INT); ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, 0, "sunxi-nand", nfc); if (ret) - goto out_mod_clk_unprepare; + goto out_ahb_reset_reassert; + + nfc->dmac = dma_request_slave_channel(dev, "rxtx"); + if (nfc->dmac) { + struct dma_slave_config dmac_cfg = { }; + + dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA; + dmac_cfg.dst_addr = dmac_cfg.src_addr; + dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; + dmac_cfg.src_maxburst = 4; + dmac_cfg.dst_maxburst = 4; + dmaengine_slave_config(nfc->dmac, &dmac_cfg); + } else { + dev_warn(dev, "failed to request rxtx DMA channel\n"); + } platform_set_drvdata(pdev, nfc); ret = sunxi_nand_chips_init(dev, nfc); if (ret) { dev_err(dev, "failed to init nand chips\n"); - goto out_mod_clk_unprepare; + goto out_release_dmac; } return 0; +out_release_dmac: + if (nfc->dmac) + dma_release_channel(nfc->dmac); +out_ahb_reset_reassert: + if (!IS_ERR(nfc->reset)) + reset_control_assert(nfc->reset); out_mod_clk_unprepare: clk_disable_unprepare(nfc->mod_clk); out_ahb_clk_unprepare: @@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev) struct sunxi_nfc *nfc = platform_get_drvdata(pdev); sunxi_nand_chips_cleanup(nfc); + + if (!IS_ERR(nfc->reset)) + reset_control_assert(nfc->reset); + + if (nfc->dmac) + dma_release_channel(nfc->dmac); clk_disable_unprepare(nfc->mod_clk); clk_disable_unprepare(nfc->ahb_clk); diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 0cf0ac07a8c2..1f2948c0c458 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -4,6 +4,7 @@ * by the Free Software Foundation. * * Copyright © 2012 John Crispin <blogic@openwrt.org> + * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de> */ #include <linux/mtd/nand.h> @@ -16,20 +17,28 @@ #define EBU_ADDSEL1 0x24 #define EBU_NAND_CON 0xB0 #define EBU_NAND_WAIT 0xB4 +#define NAND_WAIT_RD BIT(0) /* NAND flash status output */ +#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */ #define EBU_NAND_ECC0 0xB8 #define EBU_NAND_ECC_AC 0xBC -/* nand commands */ -#define NAND_CMD_ALE (1 << 2) -#define NAND_CMD_CLE (1 << 3) -#define NAND_CMD_CS (1 << 4) -#define NAND_WRITE_CMD_RESET 0xff +/* + * nand commands + * The pins of the NAND chip are selected based on the address bits of the + * "register" read and write. There are no special registers, but an + * address range and the lower address bits are used to activate the + * correct line. For example when the bit (1 << 2) is set in the address + * the ALE pin will be activated. + */ +#define NAND_CMD_ALE BIT(2) /* address latch enable */ +#define NAND_CMD_CLE BIT(3) /* command latch enable */ +#define NAND_CMD_CS BIT(4) /* chip select */ +#define NAND_CMD_SE BIT(5) /* spare area access latch */ +#define NAND_CMD_WP BIT(6) /* write protect */ #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) #define NAND_WRITE_DATA (NAND_CMD_CS) #define NAND_READ_DATA (NAND_CMD_CS) -#define NAND_WAIT_WR_C (1 << 3) -#define NAND_WAIT_RD (0x1) /* we need to tel the ebu which addr we mapped the nand to */ #define ADDSEL1_MASK(x) (x << 4) @@ -54,31 +63,41 @@ #define NAND_CON_CSMUX (1 << 1) #define NAND_CON_NANDM 1 -static void xway_reset_chip(struct nand_chip *chip) +struct xway_nand_data { + struct nand_chip chip; + unsigned long csflags; + void __iomem *nandaddr; +}; + +static u8 xway_readb(struct mtd_info *mtd, int op) { - unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; - unsigned long flags; + struct nand_chip *chip = mtd_to_nand(mtd); + struct xway_nand_data *data = nand_get_controller_data(chip); - nandaddr &= ~NAND_WRITE_ADDR; - nandaddr |= NAND_WRITE_CMD; + return readb(data->nandaddr + op); +} - /* finish with a reset */ - spin_lock_irqsave(&ebu_lock, flags); - writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); - while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) - ; - spin_unlock_irqrestore(&ebu_lock, flags); +static void xway_writeb(struct mtd_info *mtd, int op, u8 value) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct xway_nand_data *data = nand_get_controller_data(chip); + + writeb(value, data->nandaddr + op); } -static void xway_select_chip(struct mtd_info *mtd, int chip) +static void xway_select_chip(struct mtd_info *mtd, int select) { + struct nand_chip *chip = mtd_to_nand(mtd); + struct xway_nand_data *data = nand_get_controller_data(chip); - switch (chip) { + switch (select) { case -1: ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); + spin_unlock_irqrestore(&ebu_lock, data->csflags); break; case 0: + spin_lock_irqsave(&ebu_lock, data->csflags); ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); break; @@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip) static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { - struct nand_chip *this = mtd_to_nand(mtd); - unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; - unsigned long flags; - - if (ctrl & NAND_CTRL_CHANGE) { - nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); - if (ctrl & NAND_CLE) - nandaddr |= NAND_WRITE_CMD; - else - nandaddr |= NAND_WRITE_ADDR; - this->IO_ADDR_W = (void __iomem *) nandaddr; - } + if (cmd == NAND_CMD_NONE) + return; - if (cmd != NAND_CMD_NONE) { - spin_lock_irqsave(&ebu_lock, flags); - writeb(cmd, this->IO_ADDR_W); - while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) - ; - spin_unlock_irqrestore(&ebu_lock, flags); - } + if (ctrl & NAND_CLE) + xway_writeb(mtd, NAND_WRITE_CMD, cmd); + else if (ctrl & NAND_ALE) + xway_writeb(mtd, NAND_WRITE_ADDR, cmd); + + while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) + ; } static int xway_dev_ready(struct mtd_info *mtd) @@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd) static unsigned char xway_read_byte(struct mtd_info *mtd) { - struct nand_chip *this = mtd_to_nand(mtd); - unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; - unsigned long flags; - int ret; + return xway_readb(mtd, NAND_READ_DATA); +} + +static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; - spin_lock_irqsave(&ebu_lock, flags); - ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); - spin_unlock_irqrestore(&ebu_lock, flags); + for (i = 0; i < len; i++) + buf[i] = xway_readb(mtd, NAND_WRITE_DATA); +} - return ret; +static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + xway_writeb(mtd, NAND_WRITE_DATA, buf[i]); } +/* + * Probe for the NAND device. + */ static int xway_nand_probe(struct platform_device *pdev) { - struct nand_chip *this = platform_get_drvdata(pdev); - unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; - const __be32 *cs = of_get_property(pdev->dev.of_node, - "lantiq,cs", NULL); + struct xway_nand_data *data; + struct mtd_info *mtd; + struct resource *res; + int err; + u32 cs; u32 cs_flag = 0; + /* Allocate memory for the device structure (and zero it) */ + data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->nandaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->nandaddr)) + return PTR_ERR(data->nandaddr); + + nand_set_flash_node(&data->chip, pdev->dev.of_node); + mtd = nand_to_mtd(&data->chip); + mtd->dev.parent = &pdev->dev; + + data->chip.cmd_ctrl = xway_cmd_ctrl; + data->chip.dev_ready = xway_dev_ready; + data->chip.select_chip = xway_select_chip; + data->chip.write_buf = xway_write_buf; + data->chip.read_buf = xway_read_buf; + data->chip.read_byte = xway_read_byte; + data->chip.chip_delay = 30; + + data->chip.ecc.mode = NAND_ECC_SOFT; + data->chip.ecc.algo = NAND_ECC_HAMMING; + + platform_set_drvdata(pdev, data); + nand_set_controller_data(&data->chip, data); + /* load our CS from the DT. Either we find a valid 1 or default to 0 */ - if (cs && (*cs == 1)) + err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); + if (!err && cs == 1) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ - ltq_ebu_w32(CPHYSADDR(nandaddr) - | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); + ltq_ebu_w32(CPHYSADDR(data->nandaddr) + | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 - | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 - | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); + | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 + | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P - | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P - | cs_flag, EBU_NAND_CON); + | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P + | cs_flag, EBU_NAND_CON); - /* finish with a reset */ - xway_reset_chip(this); + /* Scan to find existence of the device */ + err = nand_scan(mtd, 1); + if (err) + return err; - return 0; -} + err = mtd_device_register(mtd, NULL, 0); + if (err) + nand_release(mtd); -static struct platform_nand_data xway_nand_data = { - .chip = { - .nr_chips = 1, - .chip_delay = 30, - }, - .ctrl = { - .probe = xway_nand_probe, - .cmd_ctrl = xway_cmd_ctrl, - .dev_ready = xway_dev_ready, - .select_chip = xway_select_chip, - .read_byte = xway_read_byte, - } -}; + return err; +} /* - * Try to find the node inside the DT. If it is available attach out - * platform_nand_data + * Remove a NAND device. */ -static int __init xway_register_nand(void) +static int xway_nand_remove(struct platform_device *pdev) { - struct device_node *node; - struct platform_device *pdev; - - node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); - if (!node) - return -ENOENT; - pdev = of_find_device_by_node(node); - if (!pdev) - return -EINVAL; - pdev->dev.platform_data = &xway_nand_data; - of_node_put(node); + struct xway_nand_data *data = platform_get_drvdata(pdev); + + nand_release(nand_to_mtd(&data->chip)); + return 0; } -subsys_initcall(xway_register_nand); +static const struct of_device_id xway_nand_match[] = { + { .compatible = "lantiq,nand-xway" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xway_nand_match); + +static struct platform_driver xway_nand_driver = { + .probe = xway_nand_probe, + .remove = xway_nand_remove, + .driver = { + .name = "lantiq,nand-xway", + .of_match_table = xway_nand_match, + }, +}; + +module_platform_driver(xway_nand_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index a4b029a417f0..1a6d0e367b89 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -3188,13 +3188,13 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, size_t tmp_retlen; ret = action(mtd, from, len, &tmp_retlen, buf); + if (ret) + break; buf += tmp_retlen; len -= tmp_retlen; *retlen += tmp_retlen; - if (ret) - break; } otp_pages--; } diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index d42c98e1f581..4a682ee0f632 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -29,6 +29,26 @@ config MTD_SPI_NOR_USE_4K_SECTORS Please note that some tools/drivers/filesystems may not work with 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). +config SPI_ATMEL_QUADSPI + tristate "Atmel Quad SPI Controller" + depends on ARCH_AT91 || (ARM && COMPILE_TEST) + depends on OF && HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports SPI NOR. + +config SPI_CADENCE_QUADSPI + tristate "Cadence Quad SPI controller" + depends on OF && ARM + help + Enable support for the Cadence Quad SPI Flash controller. + + Cadence QSPI is a specialized controller for connecting an SPI + Flash over 1/2/4-bit wide bus. Enable this option if you have a + device with a Cadence QSPI controller and want to access the + Flash as an MTD device. + config SPI_FSL_QUADSPI tristate "Freescale Quad SPI controller" depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST @@ -38,6 +58,13 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI. It only supports SPI NOR. +config SPI_HISI_SFC + tristate "Hisilicon SPI-NOR Flash Controller(SFC)" + depends on ARCH_HISI || COMPILE_TEST + depends on HAS_IOMEM && HAS_DMA + help + This enables support for hisilicon SPI-NOR flash controller. + config SPI_NXP_SPIFI tristate "NXP SPI Flash Interface (SPIFI)" depends on OF && (ARCH_LPC18XX || COMPILE_TEST) diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 0bf3a7f81675..121695e83542 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -1,4 +1,7 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o +obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o +obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o +obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c new file mode 100644 index 000000000000..47937d9beec6 --- /dev/null +++ b/drivers/mtd/spi-nor/atmel-quadspi.c @@ -0,0 +1,732 @@ +/* + * Driver for Atmel QSPI Controller + * + * Copyright (C) 2015 Atmel Corporation + * + * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/platform_data/atmel.h> +#include <linux/of.h> + +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/pinctrl/consumer.h> + +/* QSPI register offsets */ +#define QSPI_CR 0x0000 /* Control Register */ +#define QSPI_MR 0x0004 /* Mode Register */ +#define QSPI_RD 0x0008 /* Receive Data Register */ +#define QSPI_TD 0x000c /* Transmit Data Register */ +#define QSPI_SR 0x0010 /* Status Register */ +#define QSPI_IER 0x0014 /* Interrupt Enable Register */ +#define QSPI_IDR 0x0018 /* Interrupt Disable Register */ +#define QSPI_IMR 0x001c /* Interrupt Mask Register */ +#define QSPI_SCR 0x0020 /* Serial Clock Register */ + +#define QSPI_IAR 0x0030 /* Instruction Address Register */ +#define QSPI_ICR 0x0034 /* Instruction Code Register */ +#define QSPI_IFR 0x0038 /* Instruction Frame Register */ + +#define QSPI_SMR 0x0040 /* Scrambling Mode Register */ +#define QSPI_SKR 0x0044 /* Scrambling Key Register */ + +#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */ +#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */ + +#define QSPI_VERSION 0x00FC /* Version Register */ + + +/* Bitfields in QSPI_CR (Control Register) */ +#define QSPI_CR_QSPIEN BIT(0) +#define QSPI_CR_QSPIDIS BIT(1) +#define QSPI_CR_SWRST BIT(7) +#define QSPI_CR_LASTXFER BIT(24) + +/* Bitfields in QSPI_MR (Mode Register) */ +#define QSPI_MR_SSM BIT(0) +#define QSPI_MR_LLB BIT(1) +#define QSPI_MR_WDRBT BIT(2) +#define QSPI_MR_SMRM BIT(3) +#define QSPI_MR_CSMODE_MASK GENMASK(5, 4) +#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4) +#define QSPI_MR_CSMODE_LASTXFER (1 << 4) +#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4) +#define QSPI_MR_NBBITS_MASK GENMASK(11, 8) +#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK) +#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16) +#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK) +#define QSPI_MR_DLYCS_MASK GENMASK(31, 24) +#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK) + +/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */ +#define QSPI_SR_RDRF BIT(0) +#define QSPI_SR_TDRE BIT(1) +#define QSPI_SR_TXEMPTY BIT(2) +#define QSPI_SR_OVRES BIT(3) +#define QSPI_SR_CSR BIT(8) +#define QSPI_SR_CSS BIT(9) +#define QSPI_SR_INSTRE BIT(10) +#define QSPI_SR_QSPIENS BIT(24) + +#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR) + +/* Bitfields in QSPI_SCR (Serial Clock Register) */ +#define QSPI_SCR_CPOL BIT(0) +#define QSPI_SCR_CPHA BIT(1) +#define QSPI_SCR_SCBR_MASK GENMASK(15, 8) +#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK) +#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) +#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) + +/* Bitfields in QSPI_ICR (Instruction Code Register) */ +#define QSPI_ICR_INST_MASK GENMASK(7, 0) +#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) +#define QSPI_ICR_OPT_MASK GENMASK(23, 16) +#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK) + +/* Bitfields in QSPI_IFR (Instruction Frame Register) */ +#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0) +#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0) +#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0) +#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0) +#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0) +#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0) +#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0) +#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0) +#define QSPI_IFR_INSTEN BIT(4) +#define QSPI_IFR_ADDREN BIT(5) +#define QSPI_IFR_OPTEN BIT(6) +#define QSPI_IFR_DATAEN BIT(7) +#define QSPI_IFR_OPTL_MASK GENMASK(9, 8) +#define QSPI_IFR_OPTL_1BIT (0 << 8) +#define QSPI_IFR_OPTL_2BIT (1 << 8) +#define QSPI_IFR_OPTL_4BIT (2 << 8) +#define QSPI_IFR_OPTL_8BIT (3 << 8) +#define QSPI_IFR_ADDRL BIT(10) +#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) +#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) +#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12) +#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12) +#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13) +#define QSPI_IFR_CRM BIT(14) +#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) +#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) + +/* Bitfields in QSPI_SMR (Scrambling Mode Register) */ +#define QSPI_SMR_SCREN BIT(0) +#define QSPI_SMR_RVDIS BIT(1) + +/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */ +#define QSPI_WPMR_WPEN BIT(0) +#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8) +#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK) + +/* Bitfields in QSPI_WPSR (Write Protection Status Register) */ +#define QSPI_WPSR_WPVS BIT(0) +#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) +#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) + + +struct atmel_qspi { + void __iomem *regs; + void __iomem *mem; + struct clk *clk; + struct platform_device *pdev; + u32 pending; + + struct spi_nor nor; + u32 clk_rate; + struct completion cmd_completion; +}; + +struct atmel_qspi_command { + union { + struct { + u32 instruction:1; + u32 address:3; + u32 mode:1; + u32 dummy:1; + u32 data:1; + u32 reserved:25; + } bits; + u32 word; + } enable; + u8 instruction; + u8 mode; + u8 num_mode_cycles; + u8 num_dummy_cycles; + u32 address; + + size_t buf_len; + const void *tx_buf; + void *rx_buf; +}; + +/* Register access functions */ +static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) +{ + return readl_relaxed(aq->regs + reg); +} + +static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value) +{ + writel_relaxed(value, aq->regs + reg); +} + +static int atmel_qspi_run_transfer(struct atmel_qspi *aq, + const struct atmel_qspi_command *cmd) +{ + void __iomem *ahb_mem; + + /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */ + ahb_mem = aq->mem; + if (cmd->enable.bits.address) + ahb_mem += cmd->address; + if (cmd->tx_buf) + _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len); + else + _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len); + + return 0; +} + +#ifdef DEBUG +static void atmel_qspi_debug_command(struct atmel_qspi *aq, + const struct atmel_qspi_command *cmd, + u32 ifr) +{ + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + size_t len = 0; + int i; + + if (cmd->enable.bits.instruction) + cmd_buf[len++] = cmd->instruction; + + for (i = cmd->enable.bits.address-1; i >= 0; --i) + cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff; + + if (cmd->enable.bits.mode) + cmd_buf[len++] = cmd->mode; + + if (cmd->enable.bits.dummy) { + int num = cmd->num_dummy_cycles; + + switch (ifr & QSPI_IFR_WIDTH_MASK) { + case QSPI_IFR_WIDTH_SINGLE_BIT_SPI: + case QSPI_IFR_WIDTH_DUAL_OUTPUT: + case QSPI_IFR_WIDTH_QUAD_OUTPUT: + num >>= 3; + break; + case QSPI_IFR_WIDTH_DUAL_IO: + case QSPI_IFR_WIDTH_DUAL_CMD: + num >>= 2; + break; + case QSPI_IFR_WIDTH_QUAD_IO: + case QSPI_IFR_WIDTH_QUAD_CMD: + num >>= 1; + break; + default: + return; + } + + for (i = 0; i < num; ++i) + cmd_buf[len++] = 0; + } + + /* Dump the SPI command */ + print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE, + 32, 1, cmd_buf, len, false); + +#ifdef VERBOSE_DEBUG + /* If verbose debug is enabled, also dump the TX data */ + if (cmd->enable.bits.data && cmd->tx_buf) + print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE, + 32, 1, cmd->tx_buf, cmd->buf_len, false); +#endif +} +#else +#define atmel_qspi_debug_command(aq, cmd, ifr) +#endif + +static int atmel_qspi_run_command(struct atmel_qspi *aq, + const struct atmel_qspi_command *cmd, + u32 ifr_tfrtyp, u32 ifr_width) +{ + u32 iar, icr, ifr, sr; + int err = 0; + + iar = 0; + icr = 0; + ifr = ifr_tfrtyp | ifr_width; + + /* Compute instruction parameters */ + if (cmd->enable.bits.instruction) { + icr |= QSPI_ICR_INST(cmd->instruction); + ifr |= QSPI_IFR_INSTEN; + } + + /* Compute address parameters */ + switch (cmd->enable.bits.address) { + case 4: + ifr |= QSPI_IFR_ADDRL; + /* fall through to the 24bit (3 byte) address case. */ + case 3: + iar = (cmd->enable.bits.data) ? 0 : cmd->address; + ifr |= QSPI_IFR_ADDREN; + break; + case 0: + break; + default: + return -EINVAL; + } + + /* Compute option parameters */ + if (cmd->enable.bits.mode && cmd->num_mode_cycles) { + u32 mode_cycle_bits, mode_bits; + + icr |= QSPI_ICR_OPT(cmd->mode); + ifr |= QSPI_IFR_OPTEN; + + switch (ifr & QSPI_IFR_WIDTH_MASK) { + case QSPI_IFR_WIDTH_SINGLE_BIT_SPI: + case QSPI_IFR_WIDTH_DUAL_OUTPUT: + case QSPI_IFR_WIDTH_QUAD_OUTPUT: + mode_cycle_bits = 1; + break; + case QSPI_IFR_WIDTH_DUAL_IO: + case QSPI_IFR_WIDTH_DUAL_CMD: + mode_cycle_bits = 2; + break; + case QSPI_IFR_WIDTH_QUAD_IO: + case QSPI_IFR_WIDTH_QUAD_CMD: + mode_cycle_bits = 4; + break; + default: + return -EINVAL; + } + + mode_bits = cmd->num_mode_cycles * mode_cycle_bits; + switch (mode_bits) { + case 1: + ifr |= QSPI_IFR_OPTL_1BIT; + break; + + case 2: + ifr |= QSPI_IFR_OPTL_2BIT; + break; + + case 4: + ifr |= QSPI_IFR_OPTL_4BIT; + break; + + case 8: + ifr |= QSPI_IFR_OPTL_8BIT; + break; + + default: + return -EINVAL; + } + } + + /* Set number of dummy cycles */ + if (cmd->enable.bits.dummy) + ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles); + + /* Set data enable */ + if (cmd->enable.bits.data) { + ifr |= QSPI_IFR_DATAEN; + + /* Special case for Continuous Read Mode */ + if (!cmd->tx_buf && !cmd->rx_buf) + ifr |= QSPI_IFR_CRM; + } + + /* Clear pending interrupts */ + (void)qspi_readl(aq, QSPI_SR); + + /* Set QSPI Instruction Frame registers */ + atmel_qspi_debug_command(aq, cmd, ifr); + qspi_writel(aq, QSPI_IAR, iar); + qspi_writel(aq, QSPI_ICR, icr); + qspi_writel(aq, QSPI_IFR, ifr); + + /* Skip to the final steps if there is no data */ + if (!cmd->enable.bits.data) + goto no_data; + + /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ + (void)qspi_readl(aq, QSPI_IFR); + + /* Stop here for continuous read */ + if (!cmd->tx_buf && !cmd->rx_buf) + return 0; + /* Send/Receive data */ + err = atmel_qspi_run_transfer(aq, cmd); + + /* Release the chip-select */ + qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); + + if (err) + return err; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + /* + * If verbose debug is enabled, also dump the RX data in addition to + * the SPI command previously dumped by atmel_qspi_debug_command() + */ + if (cmd->rx_buf) + print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE, + 32, 1, cmd->rx_buf, cmd->buf_len, false); +#endif +no_data: + /* Poll INSTRuction End status */ + sr = qspi_readl(aq, QSPI_SR); + if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) + return err; + + /* Wait for INSTRuction End interrupt */ + reinit_completion(&aq->cmd_completion); + aq->pending = sr & QSPI_SR_CMD_COMPLETED; + qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); + if (!wait_for_completion_timeout(&aq->cmd_completion, + msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); + + return err; +} + +static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct atmel_qspi *aq = nor->priv; + struct atmel_qspi_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.enable.bits.instruction = 1; + cmd.enable.bits.data = 1; + cmd.instruction = opcode; + cmd.rx_buf = buf; + cmd.buf_len = len; + return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ, + QSPI_IFR_WIDTH_SINGLE_BIT_SPI); +} + +static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct atmel_qspi *aq = nor->priv; + struct atmel_qspi_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.enable.bits.instruction = 1; + cmd.enable.bits.data = (buf != NULL && len > 0); + cmd.instruction = opcode; + cmd.tx_buf = buf; + cmd.buf_len = len; + return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, + QSPI_IFR_WIDTH_SINGLE_BIT_SPI); +} + +static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *write_buf) +{ + struct atmel_qspi *aq = nor->priv; + struct atmel_qspi_command cmd; + ssize_t ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.enable.bits.instruction = 1; + cmd.enable.bits.address = nor->addr_width; + cmd.enable.bits.data = 1; + cmd.instruction = nor->program_opcode; + cmd.address = (u32)to; + cmd.tx_buf = write_buf; + cmd.buf_len = len; + ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM, + QSPI_IFR_WIDTH_SINGLE_BIT_SPI); + return (ret < 0) ? ret : len; +} + +static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs) +{ + struct atmel_qspi *aq = nor->priv; + struct atmel_qspi_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.enable.bits.instruction = 1; + cmd.enable.bits.address = nor->addr_width; + cmd.instruction = nor->erase_opcode; + cmd.address = (u32)offs; + return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, + QSPI_IFR_WIDTH_SINGLE_BIT_SPI); +} + +static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct atmel_qspi *aq = nor->priv; + struct atmel_qspi_command cmd; + u8 num_mode_cycles, num_dummy_cycles; + u32 ifr_width; + ssize_t ret; + + switch (nor->flash_read) { + case SPI_NOR_NORMAL: + case SPI_NOR_FAST: + ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI; + break; + + case SPI_NOR_DUAL: + ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT; + break; + + case SPI_NOR_QUAD: + ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT; + break; + + default: + return -EINVAL; + } + + if (nor->read_dummy >= 2) { + num_mode_cycles = 2; + num_dummy_cycles = nor->read_dummy - 2; + } else { + num_mode_cycles = nor->read_dummy; + num_dummy_cycles = 0; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.enable.bits.instruction = 1; + cmd.enable.bits.address = nor->addr_width; + cmd.enable.bits.mode = (num_mode_cycles > 0); + cmd.enable.bits.dummy = (num_dummy_cycles > 0); + cmd.enable.bits.data = 1; + cmd.instruction = nor->read_opcode; + cmd.address = (u32)from; + cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */ + cmd.num_mode_cycles = num_mode_cycles; + cmd.num_dummy_cycles = num_dummy_cycles; + cmd.rx_buf = read_buf; + cmd.buf_len = len; + ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM, + ifr_width); + return (ret < 0) ? ret : len; +} + +static int atmel_qspi_init(struct atmel_qspi *aq) +{ + unsigned long src_rate; + u32 mr, scr, scbr; + + /* Reset the QSPI controller */ + qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); + + /* Set the QSPI controller in Serial Memory Mode */ + mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM; + qspi_writel(aq, QSPI_MR, mr); + + src_rate = clk_get_rate(aq->clk); + if (!src_rate) + return -EINVAL; + + /* Compute the QSPI baudrate */ + scbr = DIV_ROUND_UP(src_rate, aq->clk_rate); + if (scbr > 0) + scbr--; + scr = QSPI_SCR_SCBR(scbr); + qspi_writel(aq, QSPI_SCR, scr); + + /* Enable the QSPI controller */ + qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); + + return 0; +} + +static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) +{ + struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; + u32 status, mask, pending; + + status = qspi_readl(aq, QSPI_SR); + mask = qspi_readl(aq, QSPI_IMR); + pending = status & mask; + + if (!pending) + return IRQ_NONE; + + aq->pending |= pending; + if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) + complete(&aq->cmd_completion); + + return IRQ_HANDLED; +} + +static int atmel_qspi_probe(struct platform_device *pdev) +{ + struct device_node *child, *np = pdev->dev.of_node; + struct atmel_qspi *aq; + struct resource *res; + struct spi_nor *nor; + struct mtd_info *mtd; + int irq, err = 0; + + if (of_get_child_count(np) != 1) + return -ENODEV; + child = of_get_next_child(np, NULL); + + aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL); + if (!aq) { + err = -ENOMEM; + goto exit; + } + + platform_set_drvdata(pdev, aq); + init_completion(&aq->cmd_completion); + aq->pdev = pdev; + + /* Map the registers */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base"); + aq->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(aq->regs)) { + dev_err(&pdev->dev, "missing registers\n"); + err = PTR_ERR(aq->regs); + goto exit; + } + + /* Map the AHB memory */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap"); + aq->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(aq->mem)) { + dev_err(&pdev->dev, "missing AHB memory\n"); + err = PTR_ERR(aq->mem); + goto exit; + } + + /* Get the peripheral clock */ + aq->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(aq->clk)) { + dev_err(&pdev->dev, "missing peripheral clock\n"); + err = PTR_ERR(aq->clk); + goto exit; + } + + /* Enable the peripheral clock */ + err = clk_prepare_enable(aq->clk); + if (err) { + dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); + goto exit; + } + + /* Request the IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "missing IRQ\n"); + err = irq; + goto disable_clk; + } + err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, + 0, dev_name(&pdev->dev), aq); + if (err) + goto disable_clk; + + /* Setup the spi-nor */ + nor = &aq->nor; + mtd = &nor->mtd; + + nor->dev = &pdev->dev; + spi_nor_set_flash_node(nor, child); + nor->priv = aq; + mtd->priv = nor; + + nor->read_reg = atmel_qspi_read_reg; + nor->write_reg = atmel_qspi_write_reg; + nor->read = atmel_qspi_read; + nor->write = atmel_qspi_write; + nor->erase = atmel_qspi_erase; + + err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate); + if (err < 0) + goto disable_clk; + + err = atmel_qspi_init(aq); + if (err) + goto disable_clk; + + err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + if (err) + goto disable_clk; + + err = mtd_device_register(mtd, NULL, 0); + if (err) + goto disable_clk; + + of_node_put(child); + + return 0; + +disable_clk: + clk_disable_unprepare(aq->clk); +exit: + of_node_put(child); + + return err; +} + +static int atmel_qspi_remove(struct platform_device *pdev) +{ + struct atmel_qspi *aq = platform_get_drvdata(pdev); + + mtd_device_unregister(&aq->nor.mtd); + qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); + clk_disable_unprepare(aq->clk); + return 0; +} + + +static const struct of_device_id atmel_qspi_dt_ids[] = { + { .compatible = "atmel,sama5d2-qspi" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids); + +static struct platform_driver atmel_qspi_driver = { + .driver = { + .name = "atmel_qspi", + .of_match_table = atmel_qspi_dt_ids, + }, + .probe = atmel_qspi_probe, + .remove = atmel_qspi_remove, +}; +module_platform_driver(atmel_qspi_driver); + +MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>"); +MODULE_DESCRIPTION("Atmel QSPI Controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c new file mode 100644 index 000000000000..d403ba7b8f43 --- /dev/null +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -0,0 +1,1299 @@ +/* + * Driver for Cadence QSPI Controller + * + * Copyright Altera Corporation (C) 2012-2014. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/spi/spi.h> +#include <linux/timer.h> + +#define CQSPI_NAME "cadence-qspi" +#define CQSPI_MAX_CHIPSELECT 16 + +struct cqspi_st; + +struct cqspi_flash_pdata { + struct spi_nor nor; + struct cqspi_st *cqspi; + u32 clk_rate; + u32 read_delay; + u32 tshsl_ns; + u32 tsd2d_ns; + u32 tchsh_ns; + u32 tslch_ns; + u8 inst_width; + u8 addr_width; + u8 data_width; + u8 cs; + bool registered; +}; + +struct cqspi_st { + struct platform_device *pdev; + + struct clk *clk; + unsigned int sclk; + + void __iomem *iobase; + void __iomem *ahb_base; + struct completion transfer_complete; + struct mutex bus_mutex; + + int current_cs; + int current_page_size; + int current_erase_size; + int current_addr_width; + unsigned long master_ref_clk_hz; + bool is_decoded_cs; + u32 fifo_depth; + u32 fifo_width; + u32 trigger_address; + struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; +}; + +/* Operation timeout value */ +#define CQSPI_TIMEOUT_MS 500 +#define CQSPI_READ_TIMEOUT_MS 10 + +/* Instruction type */ +#define CQSPI_INST_TYPE_SINGLE 0 +#define CQSPI_INST_TYPE_DUAL 1 +#define CQSPI_INST_TYPE_QUAD 2 + +#define CQSPI_DUMMY_CLKS_PER_BYTE 8 +#define CQSPI_DUMMY_BYTES_MAX 4 +#define CQSPI_DUMMY_CLKS_MAX 31 + +#define CQSPI_STIG_DATA_LEN_MAX 8 + +/* Register map */ +#define CQSPI_REG_CONFIG 0x00 +#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) +#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) +#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 +#define CQSPI_REG_CONFIG_DMA_MASK BIT(15) +#define CQSPI_REG_CONFIG_BAUD_LSB 19 +#define CQSPI_REG_CONFIG_IDLE_LSB 31 +#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF +#define CQSPI_REG_CONFIG_BAUD_MASK 0xF + +#define CQSPI_REG_RD_INSTR 0x04 +#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 +#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 +#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 +#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 +#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 +#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F + +#define CQSPI_REG_WR_INSTR 0x08 +#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 +#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 +#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 + +#define CQSPI_REG_DELAY 0x0C +#define CQSPI_REG_DELAY_TSLCH_LSB 0 +#define CQSPI_REG_DELAY_TCHSH_LSB 8 +#define CQSPI_REG_DELAY_TSD2D_LSB 16 +#define CQSPI_REG_DELAY_TSHSL_LSB 24 +#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF +#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF +#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF +#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF + +#define CQSPI_REG_READCAPTURE 0x10 +#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 +#define CQSPI_REG_READCAPTURE_DELAY_LSB 1 +#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF + +#define CQSPI_REG_SIZE 0x14 +#define CQSPI_REG_SIZE_ADDRESS_LSB 0 +#define CQSPI_REG_SIZE_PAGE_LSB 4 +#define CQSPI_REG_SIZE_BLOCK_LSB 16 +#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF +#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF +#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F + +#define CQSPI_REG_SRAMPARTITION 0x18 +#define CQSPI_REG_INDIRECTTRIGGER 0x1C + +#define CQSPI_REG_DMA 0x20 +#define CQSPI_REG_DMA_SINGLE_LSB 0 +#define CQSPI_REG_DMA_BURST_LSB 8 +#define CQSPI_REG_DMA_SINGLE_MASK 0xFF +#define CQSPI_REG_DMA_BURST_MASK 0xFF + +#define CQSPI_REG_REMAP 0x24 +#define CQSPI_REG_MODE_BIT 0x28 + +#define CQSPI_REG_SDRAMLEVEL 0x2C +#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 +#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 +#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF +#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF + +#define CQSPI_REG_IRQSTATUS 0x40 +#define CQSPI_REG_IRQMASK 0x44 + +#define CQSPI_REG_INDIRECTRD 0x60 +#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) +#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) +#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) + +#define CQSPI_REG_INDIRECTRDWATERMARK 0x64 +#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 +#define CQSPI_REG_INDIRECTRDBYTES 0x6C + +#define CQSPI_REG_CMDCTRL 0x90 +#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) +#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) +#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 +#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 +#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 +#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 +#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 +#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 +#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 +#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 +#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 +#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 + +#define CQSPI_REG_INDIRECTWR 0x70 +#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) +#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) +#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) + +#define CQSPI_REG_INDIRECTWRWATERMARK 0x74 +#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 +#define CQSPI_REG_INDIRECTWRBYTES 0x7C + +#define CQSPI_REG_CMDADDRESS 0x94 +#define CQSPI_REG_CMDREADDATALOWER 0xA0 +#define CQSPI_REG_CMDREADDATAUPPER 0xA4 +#define CQSPI_REG_CMDWRITEDATALOWER 0xA8 +#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC + +/* Interrupt status bits */ +#define CQSPI_REG_IRQ_MODE_ERR BIT(0) +#define CQSPI_REG_IRQ_UNDERFLOW BIT(1) +#define CQSPI_REG_IRQ_IND_COMP BIT(2) +#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) +#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) +#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) +#define CQSPI_REG_IRQ_WATERMARK BIT(6) +#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) + +#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ + CQSPI_REG_IRQ_IND_SRAM_FULL | \ + CQSPI_REG_IRQ_IND_COMP) + +#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ + CQSPI_REG_IRQ_WATERMARK | \ + CQSPI_REG_IRQ_UNDERFLOW) + +#define CQSPI_IRQ_STATUS_MASK 0x1FFFF + +static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear) +{ + unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); + u32 val; + + while (1) { + val = readl(reg); + if (clear) + val = ~val; + val &= mask; + + if (val == mask) + return 0; + + if (time_after(jiffies, end)) + return -ETIMEDOUT; + } +} + +static bool cqspi_is_idle(struct cqspi_st *cqspi) +{ + u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); + + return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB); +} + +static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) +{ + u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); + + reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; + return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; +} + +static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) +{ + struct cqspi_st *cqspi = dev; + unsigned int irq_status; + + /* Read interrupt status */ + irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); + + /* Clear interrupt */ + writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); + + irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; + + if (irq_status) + complete(&cqspi->transfer_complete); + + return IRQ_HANDLED; +} + +static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + u32 rdreg = 0; + + rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; + rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; + rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; + + return rdreg; +} + +static int cqspi_wait_idle(struct cqspi_st *cqspi) +{ + const unsigned int poll_idle_retry = 3; + unsigned int count = 0; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); + while (1) { + /* + * Read few times in succession to ensure the controller + * is indeed idle, that is, the bit does not transition + * low again. + */ + if (cqspi_is_idle(cqspi)) + count++; + else + count = 0; + + if (count >= poll_idle_retry) + return 0; + + if (time_after(jiffies, timeout)) { + /* Timeout, in busy mode. */ + dev_err(&cqspi->pdev->dev, + "QSPI is still busy after %dms timeout.\n", + CQSPI_TIMEOUT_MS); + return -ETIMEDOUT; + } + + cpu_relax(); + } +} + +static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) +{ + void __iomem *reg_base = cqspi->iobase; + int ret; + + /* Write the CMDCTRL without start execution. */ + writel(reg, reg_base + CQSPI_REG_CMDCTRL); + /* Start execute */ + reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; + writel(reg, reg_base + CQSPI_REG_CMDCTRL); + + /* Polling for completion. */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL, + CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1); + if (ret) { + dev_err(&cqspi->pdev->dev, + "Flash command execution timed out.\n"); + return ret; + } + + /* Polling QSPI idle status. */ + return cqspi_wait_idle(cqspi); +} + +static int cqspi_command_read(struct spi_nor *nor, + const u8 *txbuf, const unsigned n_tx, + u8 *rxbuf, const unsigned n_rx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int rdreg; + unsigned int reg; + unsigned int read_len; + int status; + + if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { + dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n", + n_rx, rxbuf); + return -EINVAL; + } + + reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB; + + rdreg = cqspi_calc_rdreg(nor, txbuf[0]); + writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); + + reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); + + /* 0 means 1 byte. */ + reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) + << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); + status = cqspi_exec_flash_cmd(cqspi, reg); + if (status) + return status; + + reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); + + /* Put the read value into rx_buf */ + read_len = (n_rx > 4) ? 4 : n_rx; + memcpy(rxbuf, ®, read_len); + rxbuf += read_len; + + if (n_rx > 4) { + reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); + + read_len = n_rx - read_len; + memcpy(rxbuf, ®, read_len); + } + + return 0; +} + +static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, + const u8 *txbuf, const unsigned n_tx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + unsigned int data; + int ret; + + if (n_tx > 4 || (n_tx && !txbuf)) { + dev_err(nor->dev, + "Invalid input argument, cmdlen %d txbuf 0x%p\n", + n_tx, txbuf); + return -EINVAL; + } + + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; + if (n_tx) { + reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); + reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) + << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; + data = 0; + memcpy(&data, txbuf, n_tx); + writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); + } + + ret = cqspi_exec_flash_cmd(cqspi, reg); + return ret; +} + +static int cqspi_command_write_addr(struct spi_nor *nor, + const u8 opcode, const unsigned int addr) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; + reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); + reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) + << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; + + writel(addr, reg_base + CQSPI_REG_CMDADDRESS); + + return cqspi_exec_flash_cmd(cqspi, reg); +} + +static int cqspi_indirect_read_setup(struct spi_nor *nor, + const unsigned int from_addr) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int dummy_clk = 0; + unsigned int reg; + + writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); + + reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; + reg |= cqspi_calc_rdreg(nor, nor->read_opcode); + + /* Setup dummy clock cycles */ + dummy_clk = nor->read_dummy; + if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) + dummy_clk = CQSPI_DUMMY_CLKS_MAX; + + if (dummy_clk / 8) { + reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); + /* Set mode bits high to ensure chip doesn't enter XIP */ + writel(0xFF, reg_base + CQSPI_REG_MODE_BIT); + + /* Need to subtract the mode byte (8 clocks). */ + if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD) + dummy_clk -= 8; + + if (dummy_clk) + reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) + << CQSPI_REG_RD_INSTR_DUMMY_LSB; + } + + writel(reg, reg_base + CQSPI_REG_RD_INSTR); + + /* Set address width */ + reg = readl(reg_base + CQSPI_REG_SIZE); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->addr_width - 1); + writel(reg, reg_base + CQSPI_REG_SIZE); + return 0; +} + +static int cqspi_indirect_read_execute(struct spi_nor *nor, + u8 *rxbuf, const unsigned n_rx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + void __iomem *ahb_base = cqspi->ahb_base; + unsigned int remaining = n_rx; + unsigned int bytes_to_read = 0; + int ret = 0; + + writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); + + /* Clear all interrupts. */ + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); + + writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); + + reinit_completion(&cqspi->transfer_complete); + writel(CQSPI_REG_INDIRECTRD_START_MASK, + reg_base + CQSPI_REG_INDIRECTRD); + + while (remaining > 0) { + ret = wait_for_completion_timeout(&cqspi->transfer_complete, + msecs_to_jiffies + (CQSPI_READ_TIMEOUT_MS)); + + bytes_to_read = cqspi_get_rd_sram_level(cqspi); + + if (!ret && bytes_to_read == 0) { + dev_err(nor->dev, "Indirect read timeout, no bytes\n"); + ret = -ETIMEDOUT; + goto failrd; + } + + while (bytes_to_read != 0) { + bytes_to_read *= cqspi->fifo_width; + bytes_to_read = bytes_to_read > remaining ? + remaining : bytes_to_read; + readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4)); + rxbuf += bytes_to_read; + remaining -= bytes_to_read; + bytes_to_read = cqspi_get_rd_sram_level(cqspi); + } + + if (remaining > 0) + reinit_completion(&cqspi->transfer_complete); + } + + /* Check indirect done status */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, + CQSPI_REG_INDIRECTRD_DONE_MASK, 0); + if (ret) { + dev_err(nor->dev, + "Indirect read completion error (%i)\n", ret); + goto failrd; + } + + /* Disable interrupt */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Clear indirect completion status */ + writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); + + return 0; + +failrd: + /* Disable interrupt */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Cancel the indirect read */ + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, + reg_base + CQSPI_REG_INDIRECTRD); + return ret; +} + +static int cqspi_indirect_write_setup(struct spi_nor *nor, + const unsigned int to_addr) +{ + unsigned int reg; + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + + /* Set opcode. */ + reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; + writel(reg, reg_base + CQSPI_REG_WR_INSTR); + reg = cqspi_calc_rdreg(nor, nor->program_opcode); + writel(reg, reg_base + CQSPI_REG_RD_INSTR); + + writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); + + reg = readl(reg_base + CQSPI_REG_SIZE); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->addr_width - 1); + writel(reg, reg_base + CQSPI_REG_SIZE); + return 0; +} + +static int cqspi_indirect_write_execute(struct spi_nor *nor, + const u8 *txbuf, const unsigned n_tx) +{ + const unsigned int page_size = nor->page_size; + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int remaining = n_tx; + unsigned int write_bytes; + int ret; + + writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); + + /* Clear all interrupts. */ + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); + + writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); + + reinit_completion(&cqspi->transfer_complete); + writel(CQSPI_REG_INDIRECTWR_START_MASK, + reg_base + CQSPI_REG_INDIRECTWR); + + while (remaining > 0) { + write_bytes = remaining > page_size ? page_size : remaining; + writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4)); + + ret = wait_for_completion_timeout(&cqspi->transfer_complete, + msecs_to_jiffies + (CQSPI_TIMEOUT_MS)); + if (!ret) { + dev_err(nor->dev, "Indirect write timeout\n"); + ret = -ETIMEDOUT; + goto failwr; + } + + txbuf += write_bytes; + remaining -= write_bytes; + + if (remaining > 0) + reinit_completion(&cqspi->transfer_complete); + } + + /* Check indirect done status */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, + CQSPI_REG_INDIRECTWR_DONE_MASK, 0); + if (ret) { + dev_err(nor->dev, + "Indirect write completion error (%i)\n", ret); + goto failwr; + } + + /* Disable interrupt. */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Clear indirect completion status */ + writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); + + cqspi_wait_idle(cqspi); + + return 0; + +failwr: + /* Disable interrupt. */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Cancel the indirect write */ + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, + reg_base + CQSPI_REG_INDIRECTWR); + return ret; +} + +static void cqspi_chipselect(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int chip_select = f_pdata->cs; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + if (cqspi->is_decoded_cs) { + reg |= CQSPI_REG_CONFIG_DECODE_MASK; + } else { + reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; + + /* Convert CS if without decoder. + * CS0 to 4b'1110 + * CS1 to 4b'1101 + * CS2 to 4b'1011 + * CS3 to 4b'0111 + */ + chip_select = 0xF & ~(1 << chip_select); + } + + reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK + << CQSPI_REG_CONFIG_CHIPSELECT_LSB); + reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) + << CQSPI_REG_CONFIG_CHIPSELECT_LSB; + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_configure_cs_and_sizes(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *iobase = cqspi->iobase; + unsigned int reg; + + /* configure page size and block size. */ + reg = readl(iobase + CQSPI_REG_SIZE); + reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); + reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); + reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); + reg |= (nor->addr_width - 1); + writel(reg, iobase + CQSPI_REG_SIZE); + + /* configure the chip select */ + cqspi_chipselect(nor); + + /* Store the new configuration of the controller */ + cqspi->current_page_size = nor->page_size; + cqspi->current_erase_size = nor->mtd.erasesize; + cqspi->current_addr_width = nor->addr_width; +} + +static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, + const unsigned int ns_val) +{ + unsigned int ticks; + + ticks = ref_clk_hz / 1000; /* kHz */ + ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); + + return ticks; +} + +static void cqspi_delay(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *iobase = cqspi->iobase; + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; + unsigned int tshsl, tchsh, tslch, tsd2d; + unsigned int reg; + unsigned int tsclk; + + /* calculate the number of ref ticks for one sclk tick */ + tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); + + tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); + /* this particular value must be at least one sclk */ + if (tshsl < tsclk) + tshsl = tsclk; + + tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); + tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); + tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); + + reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) + << CQSPI_REG_DELAY_TSHSL_LSB; + reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) + << CQSPI_REG_DELAY_TCHSH_LSB; + reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) + << CQSPI_REG_DELAY_TSLCH_LSB; + reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) + << CQSPI_REG_DELAY_TSD2D_LSB; + writel(reg, iobase + CQSPI_REG_DELAY); +} + +static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) +{ + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; + void __iomem *reg_base = cqspi->iobase; + u32 reg, div; + + /* Recalculate the baudrate divisor based on QSPI specification. */ + div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); + reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_readdata_capture(struct cqspi_st *cqspi, + const unsigned int bypass, + const unsigned int delay) +{ + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_READCAPTURE); + + if (bypass) + reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); + else + reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); + + reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK + << CQSPI_REG_READCAPTURE_DELAY_LSB); + + reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) + << CQSPI_REG_READCAPTURE_DELAY_LSB; + + writel(reg, reg_base + CQSPI_REG_READCAPTURE); +} + +static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) +{ + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + + if (enable) + reg |= CQSPI_REG_CONFIG_ENABLE_MASK; + else + reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; + + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_configure(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + const unsigned int sclk = f_pdata->clk_rate; + int switch_cs = (cqspi->current_cs != f_pdata->cs); + int switch_ck = (cqspi->sclk != sclk); + + if ((cqspi->current_page_size != nor->page_size) || + (cqspi->current_erase_size != nor->mtd.erasesize) || + (cqspi->current_addr_width != nor->addr_width)) + switch_cs = 1; + + if (switch_cs || switch_ck) + cqspi_controller_enable(cqspi, 0); + + /* Switch chip select. */ + if (switch_cs) { + cqspi->current_cs = f_pdata->cs; + cqspi_configure_cs_and_sizes(nor); + } + + /* Setup baudrate divisor and delays */ + if (switch_ck) { + cqspi->sclk = sclk; + cqspi_config_baudrate_div(cqspi); + cqspi_delay(nor); + cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); + } + + if (switch_cs || switch_ck) + cqspi_controller_enable(cqspi, 1); +} + +static int cqspi_set_protocol(struct spi_nor *nor, const int read) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + + f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; + f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; + f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; + + if (read) { + switch (nor->flash_read) { + case SPI_NOR_NORMAL: + case SPI_NOR_FAST: + f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; + break; + case SPI_NOR_DUAL: + f_pdata->data_width = CQSPI_INST_TYPE_DUAL; + break; + case SPI_NOR_QUAD: + f_pdata->data_width = CQSPI_INST_TYPE_QUAD; + break; + default: + return -EINVAL; + } + } + + cqspi_configure(nor); + + return 0; +} + +static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *buf) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (ret) + return ret; + + ret = cqspi_indirect_write_setup(nor, to); + if (ret) + return ret; + + ret = cqspi_indirect_write_execute(nor, buf, len); + if (ret) + return ret; + + return (ret < 0) ? ret : len; +} + +static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, + size_t len, u_char *buf) +{ + int ret; + + ret = cqspi_set_protocol(nor, 1); + if (ret) + return ret; + + ret = cqspi_indirect_read_setup(nor, from); + if (ret) + return ret; + + ret = cqspi_indirect_read_execute(nor, buf, len); + if (ret) + return ret; + + return (ret < 0) ? ret : len; +} + +static int cqspi_erase(struct spi_nor *nor, loff_t offs) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (ret) + return ret; + + /* Send write enable, then erase commands. */ + ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); + if (ret) + return ret; + + /* Set up command buffer. */ + ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs); + if (ret) + return ret; + + return 0; +} + +static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + + mutex_lock(&cqspi->bus_mutex); + + return 0; +} + +static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + + mutex_unlock(&cqspi->bus_mutex); +} + +static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (!ret) + ret = cqspi_command_read(nor, &opcode, 1, buf, len); + + return ret; +} + +static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (!ret) + ret = cqspi_command_write(nor, opcode, buf, len); + + return ret; +} + +static int cqspi_of_get_flash_pdata(struct platform_device *pdev, + struct cqspi_flash_pdata *f_pdata, + struct device_node *np) +{ + if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { + dev_err(&pdev->dev, "couldn't determine read-delay\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { + dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { + dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { + dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { + dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { + dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); + return -ENXIO; + } + + return 0; +} + +static int cqspi_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct cqspi_st *cqspi = platform_get_drvdata(pdev); + + cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); + + if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { + dev_err(&pdev->dev, "couldn't determine fifo-depth\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { + dev_err(&pdev->dev, "couldn't determine fifo-width\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,trigger-address", + &cqspi->trigger_address)) { + dev_err(&pdev->dev, "couldn't determine trigger-address\n"); + return -ENXIO; + } + + return 0; +} + +static void cqspi_controller_init(struct cqspi_st *cqspi) +{ + cqspi_controller_enable(cqspi, 0); + + /* Configure the remap address register, no remap */ + writel(0, cqspi->iobase + CQSPI_REG_REMAP); + + /* Disable all interrupts. */ + writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); + + /* Configure the SRAM split to 1:1 . */ + writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); + + /* Load indirect trigger address. */ + writel(cqspi->trigger_address, + cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); + + /* Program read watermark -- 1/2 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 2, + cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); + /* Program write watermark -- 1/8 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 8, + cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); + + cqspi_controller_enable(cqspi, 1); +} + +static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) +{ + struct platform_device *pdev = cqspi->pdev; + struct device *dev = &pdev->dev; + struct cqspi_flash_pdata *f_pdata; + struct spi_nor *nor; + struct mtd_info *mtd; + unsigned int cs; + int i, ret; + + /* Get flash device data */ + for_each_available_child_of_node(dev->of_node, np) { + if (of_property_read_u32(np, "reg", &cs)) { + dev_err(dev, "Couldn't determine chip select.\n"); + goto err; + } + + if (cs > CQSPI_MAX_CHIPSELECT) { + dev_err(dev, "Chip select %d out of range.\n", cs); + goto err; + } + + f_pdata = &cqspi->f_pdata[cs]; + f_pdata->cqspi = cqspi; + f_pdata->cs = cs; + + ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); + if (ret) + goto err; + + nor = &f_pdata->nor; + mtd = &nor->mtd; + + mtd->priv = nor; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + nor->priv = f_pdata; + + nor->read_reg = cqspi_read_reg; + nor->write_reg = cqspi_write_reg; + nor->read = cqspi_read; + nor->write = cqspi_write; + nor->erase = cqspi_erase; + nor->prepare = cqspi_prep; + nor->unprepare = cqspi_unprep; + + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", + dev_name(dev), cs); + if (!mtd->name) { + ret = -ENOMEM; + goto err; + } + + ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + if (ret) + goto err; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err; + + f_pdata->registered = true; + } + + return 0; + +err: + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) + if (cqspi->f_pdata[i].registered) + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); + return ret; +} + +static int cqspi_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct cqspi_st *cqspi; + struct resource *res; + struct resource *res_ahb; + int ret; + int irq; + + cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); + if (!cqspi) + return -ENOMEM; + + mutex_init(&cqspi->bus_mutex); + cqspi->pdev = pdev; + platform_set_drvdata(pdev, cqspi); + + /* Obtain configuration from OF. */ + ret = cqspi_of_get_pdata(pdev); + if (ret) { + dev_err(dev, "Cannot get mandatory OF data.\n"); + return -ENODEV; + } + + /* Obtain QSPI clock. */ + cqspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(cqspi->clk)) { + dev_err(dev, "Cannot claim QSPI clock.\n"); + return PTR_ERR(cqspi->clk); + } + + /* Obtain and remap controller address. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cqspi->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(cqspi->iobase)) { + dev_err(dev, "Cannot remap controller address.\n"); + return PTR_ERR(cqspi->iobase); + } + + /* Obtain and remap AHB address. */ + res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); + if (IS_ERR(cqspi->ahb_base)) { + dev_err(dev, "Cannot remap AHB address.\n"); + return PTR_ERR(cqspi->ahb_base); + } + + init_completion(&cqspi->transfer_complete); + + /* Obtain IRQ line. */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot obtain IRQ.\n"); + return -ENXIO; + } + + ret = clk_prepare_enable(cqspi->clk); + if (ret) { + dev_err(dev, "Cannot enable QSPI clock.\n"); + return ret; + } + + cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); + + ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, + pdev->name, cqspi); + if (ret) { + dev_err(dev, "Cannot request IRQ.\n"); + goto probe_irq_failed; + } + + cqspi_wait_idle(cqspi); + cqspi_controller_init(cqspi); + cqspi->current_cs = -1; + cqspi->sclk = 0; + + ret = cqspi_setup_flash(cqspi, np); + if (ret) { + dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); + goto probe_setup_failed; + } + + return ret; +probe_irq_failed: + cqspi_controller_enable(cqspi, 0); +probe_setup_failed: + clk_disable_unprepare(cqspi->clk); + return ret; +} + +static int cqspi_remove(struct platform_device *pdev) +{ + struct cqspi_st *cqspi = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) + if (cqspi->f_pdata[i].registered) + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); + + cqspi_controller_enable(cqspi, 0); + + clk_disable_unprepare(cqspi->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cqspi_suspend(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + cqspi_controller_enable(cqspi, 0); + return 0; +} + +static int cqspi_resume(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + cqspi_controller_enable(cqspi, 1); + return 0; +} + +static const struct dev_pm_ops cqspi__dev_pm_ops = { + .suspend = cqspi_suspend, + .resume = cqspi_resume, +}; + +#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops) +#else +#define CQSPI_DEV_PM_OPS NULL +#endif + +static struct of_device_id const cqspi_dt_ids[] = { + {.compatible = "cdns,qspi-nor",}, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, cqspi_dt_ids); + +static struct platform_driver cqspi_platform_driver = { + .probe = cqspi_probe, + .remove = cqspi_remove, + .driver = { + .name = CQSPI_NAME, + .pm = CQSPI_DEV_PM_OPS, + .of_match_table = cqspi_dt_ids, + }, +}; + +module_platform_driver(cqspi_platform_driver); + +MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" CQSPI_NAME); +MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); +MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 9ab2b51d54b8..5c82e4ef1904 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -618,9 +618,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q) qspi_writel(q, reg, q->iobase + QUADSPI_MCR); } -static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, +static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, u8 opcode, unsigned int to, u32 *txbuf, - unsigned count, size_t *retlen) + unsigned count) { int ret, i, j; u32 tmp; @@ -647,8 +647,8 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, /* Trigger it */ ret = fsl_qspi_runcmd(q, opcode, to, count); - if (ret == 0 && retlen) - *retlen += count; + if (ret == 0) + return count; return ret; } @@ -859,7 +859,9 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) } else if (len > 0) { ret = fsl_qspi_nor_write(q, nor, opcode, 0, - (u32 *)buf, len, NULL); + (u32 *)buf, len); + if (ret > 0) + return 0; } else { dev_err(q->dev, "invalid cmd %d\n", opcode); ret = -EINVAL; @@ -868,20 +870,20 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return ret; } -static void fsl_qspi_write(struct spi_nor *nor, loff_t to, - size_t len, size_t *retlen, const u_char *buf) +static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *buf) { struct fsl_qspi *q = nor->priv; - - fsl_qspi_nor_write(q, nor, nor->program_opcode, to, - (u32 *)buf, len, retlen); + ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, + (u32 *)buf, len); /* invalid the data in the AHB buffer. */ fsl_qspi_invalid(q); + return ret; } -static int fsl_qspi_read(struct spi_nor *nor, loff_t from, - size_t len, size_t *retlen, u_char *buf) +static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, + size_t len, u_char *buf) { struct fsl_qspi *q = nor->priv; u8 cmd = nor->read_opcode; @@ -923,8 +925,7 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from, memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, len); - *retlen += len; - return 0; + return len; } static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c new file mode 100644 index 000000000000..20378b0d55e9 --- /dev/null +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -0,0 +1,489 @@ +/* + * HiSilicon SPI Nor Flash Controller Driver + * + * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Hardware register offsets and field definitions */ +#define FMC_CFG 0x00 +#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) +#define FMC_CFG_OP_MODE_BOOT 0 +#define FMC_CFG_OP_MODE_NORMAL 1 +#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1) +#define FMC_CFG_FLASH_SEL_MASK 0x6 +#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5) +#define FMC_ECC_TYPE_MASK GENMASK(7, 5) +#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) +#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) +#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) +#define FMC_GLOBAL_CFG 0x04 +#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) +#define FMC_SPI_TIMING_CFG 0x08 +#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8) +#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4) +#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf) +#define CS_HOLD_TIME 0x6 +#define CS_SETUP_TIME 0x6 +#define CS_DESELECT_TIME 0xf +#define FMC_INT 0x18 +#define FMC_INT_OP_DONE BIT(0) +#define FMC_INT_CLR 0x20 +#define FMC_CMD 0x24 +#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff) +#define FMC_ADDRL 0x2c +#define FMC_OP_CFG 0x30 +#define OP_CFG_FM_CS(cs) ((cs) << 11) +#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7) +#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4) +#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf) +#define FMC_DATA_NUM 0x38 +#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0)) +#define FMC_OP 0x3c +#define FMC_OP_DUMMY_EN BIT(8) +#define FMC_OP_CMD1_EN BIT(7) +#define FMC_OP_ADDR_EN BIT(6) +#define FMC_OP_WRITE_DATA_EN BIT(5) +#define FMC_OP_READ_DATA_EN BIT(2) +#define FMC_OP_READ_STATUS_EN BIT(1) +#define FMC_OP_REG_OP_START BIT(0) +#define FMC_DMA_LEN 0x40 +#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0)) +#define FMC_DMA_SADDR_D0 0x4c +#define HIFMC_DMA_MAX_LEN (4096) +#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1) +#define FMC_OP_DMA 0x68 +#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16) +#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8) +#define OP_CTRL_RW_OP(op) ((op) << 1) +#define OP_CTRL_DMA_OP_READY BIT(0) +#define FMC_OP_READ 0x0 +#define FMC_OP_WRITE 0x1 +#define FMC_WAIT_TIMEOUT 1000000 + +enum hifmc_iftype { + IF_TYPE_STD, + IF_TYPE_DUAL, + IF_TYPE_DIO, + IF_TYPE_QUAD, + IF_TYPE_QIO, +}; + +struct hifmc_priv { + u32 chipselect; + u32 clkrate; + struct hifmc_host *host; +}; + +#define HIFMC_MAX_CHIP_NUM 2 +struct hifmc_host { + struct device *dev; + struct mutex lock; + + void __iomem *regbase; + void __iomem *iobase; + struct clk *clk; + void *buffer; + dma_addr_t dma_buffer; + + struct spi_nor *nor[HIFMC_MAX_CHIP_NUM]; + u32 num_chip; +}; + +static inline int wait_op_finish(struct hifmc_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + FMC_INT, reg, + (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); +} + +static int get_if_type(enum read_mode flash_read) +{ + enum hifmc_iftype if_type; + + switch (flash_read) { + case SPI_NOR_DUAL: + if_type = IF_TYPE_DUAL; + break; + case SPI_NOR_QUAD: + if_type = IF_TYPE_QUAD; + break; + case SPI_NOR_NORMAL: + case SPI_NOR_FAST: + default: + if_type = IF_TYPE_STD; + break; + } + + return if_type; +} + +static void hisi_spi_nor_init(struct hifmc_host *host) +{ + u32 reg; + + reg = TIMING_CFG_TCSH(CS_HOLD_TIME) + | TIMING_CFG_TCSS(CS_SETUP_TIME) + | TIMING_CFG_TSHSL(CS_DESELECT_TIME); + writel(reg, host->regbase + FMC_SPI_TIMING_CFG); +} + +static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + mutex_lock(&host->lock); + + ret = clk_set_rate(host->clk, priv->clkrate); + if (ret) + goto out; + + ret = clk_prepare_enable(host->clk); + if (ret) + goto out; + + return 0; + +out: + mutex_unlock(&host->lock); + return ret; +} + +static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + clk_disable_unprepare(host->clk); + mutex_unlock(&host->lock); +} + +static int hisi_spi_nor_op_reg(struct spi_nor *nor, + u8 opcode, int len, u8 optype) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u32 reg; + + reg = FMC_CMD_CMD1(opcode); + writel(reg, host->regbase + FMC_CMD); + + reg = FMC_DATA_NUM_CNT(len); + writel(reg, host->regbase + FMC_DATA_NUM); + + reg = OP_CFG_FM_CS(priv->chipselect); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; + writel(reg, host->regbase + FMC_OP); + + return wait_op_finish(host); +} + +static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + int len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); + if (ret) + return ret; + + memcpy_fromio(buf, host->iobase, len); + return 0; +} + +static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + if (len) + memcpy_toio(host->iobase, buf, len); + + return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); +} + +static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, + dma_addr_t dma_buf, size_t len, u8 op_type) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u8 if_type = 0; + u32 reg; + + reg = readl(host->regbase + FMC_CFG); + reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); + reg |= FMC_CFG_OP_MODE_NORMAL; + reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES + : SPI_NOR_ADDR_MODE_3BYTES; + writel(reg, host->regbase + FMC_CFG); + + writel(start_off, host->regbase + FMC_ADDRL); + writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); + writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); + + reg = OP_CFG_FM_CS(priv->chipselect); + if_type = get_if_type(nor->flash_read); + reg |= OP_CFG_MEM_IF_TYPE(if_type); + if (op_type == FMC_OP_READ) + reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY; + reg |= (op_type == FMC_OP_READ) + ? OP_CTRL_RD_OPCODE(nor->read_opcode) + : OP_CTRL_WR_OPCODE(nor->program_opcode); + writel(reg, host->regbase + FMC_OP_DMA); + + return wait_op_finish(host); +} + +static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + ret = hisi_spi_nor_dma_transfer(nor, + from + offset, host->dma_buffer, trans, FMC_OP_READ); + if (ret) { + dev_warn(nor->dev, "DMA read timeout\n"); + return ret; + } + memcpy(read_buf + offset, host->buffer, trans); + } + + return len; +} + +static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + memcpy(host->buffer, write_buf + offset, trans); + ret = hisi_spi_nor_dma_transfer(nor, + to + offset, host->dma_buffer, trans, FMC_OP_WRITE); + if (ret) { + dev_warn(nor->dev, "DMA write timeout\n"); + return ret; + } + } + + return len; +} + +/** + * Get spi flash device information and register it as a mtd device. + */ +static int hisi_spi_nor_register(struct device_node *np, + struct hifmc_host *host) +{ + struct device *dev = host->dev; + struct spi_nor *nor; + struct hifmc_priv *priv; + struct mtd_info *mtd; + int ret; + + nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "reg", &priv->chipselect); + if (ret) { + dev_err(dev, "There's no reg property for %s\n", + np->full_name); + return ret; + } + + ret = of_property_read_u32(np, "spi-max-frequency", + &priv->clkrate); + if (ret) { + dev_err(dev, "There's no spi-max-frequency property for %s\n", + np->full_name); + return ret; + } + priv->host = host; + nor->priv = priv; + + nor->prepare = hisi_spi_nor_prep; + nor->unprepare = hisi_spi_nor_unprep; + nor->read_reg = hisi_spi_nor_read_reg; + nor->write_reg = hisi_spi_nor_write_reg; + nor->read = hisi_spi_nor_read; + nor->write = hisi_spi_nor_write; + nor->erase = NULL; + ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + if (ret) + return ret; + + mtd = &nor->mtd; + mtd->name = np->name; + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + return ret; + + host->nor[host->num_chip] = nor; + host->num_chip++; + return 0; +} + +static void hisi_spi_nor_unregister_all(struct hifmc_host *host) +{ + int i; + + for (i = 0; i < host->num_chip; i++) + mtd_device_unregister(&host->nor[i]->mtd); +} + +static int hisi_spi_nor_register_all(struct hifmc_host *host) +{ + struct device *dev = host->dev; + struct device_node *np; + int ret; + + for_each_available_child_of_node(dev->of_node, np) { + ret = hisi_spi_nor_register(np, host); + if (ret) + goto fail; + + if (host->num_chip == HIFMC_MAX_CHIP_NUM) { + dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); + break; + } + } + + return 0; + +fail: + hisi_spi_nor_unregister_all(host); + return ret; +} + +static int hisi_spi_nor_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct hifmc_host *host; + int ret; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + platform_set_drvdata(pdev, host); + host->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); + host->regbase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->regbase)) + return PTR_ERR(host->regbase); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); + host->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->iobase)) + return PTR_ERR(host->iobase); + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) + return PTR_ERR(host->clk); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(dev, "Unable to set dma mask\n"); + return ret; + } + + host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN, + &host->dma_buffer, GFP_KERNEL); + if (!host->buffer) + return -ENOMEM; + + mutex_init(&host->lock); + clk_prepare_enable(host->clk); + hisi_spi_nor_init(host); + ret = hisi_spi_nor_register_all(host); + if (ret) + mutex_destroy(&host->lock); + + clk_disable_unprepare(host->clk); + return ret; +} + +static int hisi_spi_nor_remove(struct platform_device *pdev) +{ + struct hifmc_host *host = platform_get_drvdata(pdev); + + hisi_spi_nor_unregister_all(host); + mutex_destroy(&host->lock); + clk_disable_unprepare(host->clk); + return 0; +} + +static const struct of_device_id hisi_spi_nor_dt_ids[] = { + { .compatible = "hisilicon,fmc-spi-nor"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids); + +static struct platform_driver hisi_spi_nor_driver = { + .driver = { + .name = "hisi-sfc", + .of_match_table = hisi_spi_nor_dt_ids, + }, + .probe = hisi_spi_nor_probe, + .remove = hisi_spi_nor_remove, +}; +module_platform_driver(hisi_spi_nor_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver"); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 8bed1a4cb79c..e661877c23de 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -21,7 +21,6 @@ #include <linux/ioport.h> #include <linux/math64.h> #include <linux/module.h> -#include <linux/mtd/mtd.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> @@ -243,8 +242,8 @@ static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr) writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG); } -static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, - size_t *retlen, u_char *buffer) +static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, + u_char *buffer) { int i, ret; int addr = (int)from; @@ -255,13 +254,13 @@ static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, mt8173_nor_set_read_mode(mt8173_nor); mt8173_nor_set_addr(mt8173_nor, addr); - for (i = 0; i < length; i++, (*retlen)++) { + for (i = 0; i < length; i++) { ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD); if (ret < 0) return ret; buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG); } - return 0; + return length; } static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor, @@ -297,36 +296,44 @@ static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr, return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD); } -static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len, - size_t *retlen, const u_char *buf) +static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) { int ret; struct mt8173_nor *mt8173_nor = nor->priv; + size_t i; ret = mt8173_nor_write_buffer_enable(mt8173_nor); - if (ret < 0) + if (ret < 0) { dev_warn(mt8173_nor->dev, "write buffer enable failed!\n"); + return ret; + } - while (len >= SFLASH_WRBUF_SIZE) { + for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) { ret = mt8173_nor_write_buffer(mt8173_nor, to, buf); - if (ret < 0) + if (ret < 0) { dev_err(mt8173_nor->dev, "write buffer failed!\n"); - len -= SFLASH_WRBUF_SIZE; + return ret; + } to += SFLASH_WRBUF_SIZE; buf += SFLASH_WRBUF_SIZE; - (*retlen) += SFLASH_WRBUF_SIZE; } ret = mt8173_nor_write_buffer_disable(mt8173_nor); - if (ret < 0) + if (ret < 0) { dev_warn(mt8173_nor->dev, "write buffer disable failed!\n"); + return ret; + } - if (len) { - ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len, - (u8 *)buf); - if (ret < 0) + if (i < len) { + ret = mt8173_nor_write_single_byte(mt8173_nor, to, + (int)(len - i), (u8 *)buf); + if (ret < 0) { dev_err(mt8173_nor->dev, "write single byte failed!\n"); - (*retlen) += len; + return ret; + } } + + return len; } static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c index ae428cb0e04b..73a14f40928b 100644 --- a/drivers/mtd/spi-nor/nxp-spifi.c +++ b/drivers/mtd/spi-nor/nxp-spifi.c @@ -172,8 +172,8 @@ static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) return nxp_spifi_wait_for_cmd(spifi); } -static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, - size_t *retlen, u_char *buf) +static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) { struct nxp_spifi *spifi = nor->priv; int ret; @@ -183,24 +183,23 @@ static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, return ret; memcpy_fromio(buf, spifi->flash_base + from, len); - *retlen += len; - return 0; + return len; } -static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, - size_t *retlen, const u_char *buf) +static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) { struct nxp_spifi *spifi = nor->priv; u32 cmd; int ret; + size_t i; ret = nxp_spifi_set_memory_mode_off(spifi); if (ret) - return; + return ret; writel(to, spifi->io_base + SPIFI_ADDR); - *retlen += len; cmd = SPIFI_CMD_DOUT | SPIFI_CMD_DATALEN(len) | @@ -209,10 +208,14 @@ static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); writel(cmd, spifi->io_base + SPIFI_CMD); - while (len--) - writeb(*buf++, spifi->io_base + SPIFI_DATA); + for (i = 0; i < len; i++) + writeb(buf[i], spifi->io_base + SPIFI_DATA); + + ret = nxp_spifi_wait_for_cmd(spifi); + if (ret) + return ret; - nxp_spifi_wait_for_cmd(spifi); + return len; } static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index c52e45594bfd..d0fc165d7d66 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -661,7 +661,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) status_new = (status_old & ~mask & ~SR_TB) | val; /* Don't protect status register if we're fully unlocked */ - if (lock_len == mtd->size) + if (lock_len == 0) status_new &= ~SR_SRWD; if (!use_top) @@ -830,10 +830,26 @@ static const struct flash_info spi_nor_ids[] = { { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) }, /* GigaDevice */ - { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, - { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, - { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, + { + "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, /* Intel/Numonyx -- xxxs33b */ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, @@ -871,6 +887,7 @@ static const struct flash_info spi_nor_ids[] = { { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, /* PMC */ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, @@ -1031,8 +1048,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, if (ret) return ret; - ret = nor->read(nor, from, len, retlen, buf); + while (len) { + ret = nor->read(nor, from, len, buf); + if (ret == 0) { + /* We shouldn't see 0-length reads */ + ret = -EIO; + goto read_err; + } + if (ret < 0) + goto read_err; + + WARN_ON(ret > len); + *retlen += ret; + buf += ret; + from += ret; + len -= ret; + } + ret = 0; +read_err: spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); return ret; } @@ -1060,10 +1094,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, nor->program_opcode = SPINOR_OP_BP; /* write one byte. */ - nor->write(nor, to, 1, retlen, buf); + ret = nor->write(nor, to, 1, buf); + if (ret < 0) + goto sst_write_err; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", + (int)ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto time_out; + goto sst_write_err; } to += actual; @@ -1072,10 +1110,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, nor->program_opcode = SPINOR_OP_AAI_WP; /* write two bytes. */ - nor->write(nor, to, 2, retlen, buf + actual); + ret = nor->write(nor, to, 2, buf + actual); + if (ret < 0) + goto sst_write_err; + WARN(ret != 2, "While writing 2 bytes written %i bytes\n", + (int)ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto time_out; + goto sst_write_err; to += 2; nor->sst_write_second = true; } @@ -1084,21 +1126,26 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, write_disable(nor); ret = spi_nor_wait_till_ready(nor); if (ret) - goto time_out; + goto sst_write_err; /* Write out trailing byte if it exists. */ if (actual != len) { write_enable(nor); nor->program_opcode = SPINOR_OP_BP; - nor->write(nor, to, 1, retlen, buf + actual); - + ret = nor->write(nor, to, 1, buf + actual); + if (ret < 0) + goto sst_write_err; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", + (int)ret); ret = spi_nor_wait_till_ready(nor); if (ret) - goto time_out; + goto sst_write_err; write_disable(nor); + actual += 1; } -time_out: +sst_write_err: + *retlen += actual; spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); return ret; } @@ -1112,8 +1159,8 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct spi_nor *nor = mtd_to_spi_nor(mtd); - u32 page_offset, page_size, i; - int ret; + size_t page_offset, page_remain, i; + ssize_t ret; dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); @@ -1121,35 +1168,37 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, if (ret) return ret; - write_enable(nor); - - page_offset = to & (nor->page_size - 1); + for (i = 0; i < len; ) { + ssize_t written; - /* do all the bytes fit onto one page? */ - if (page_offset + len <= nor->page_size) { - nor->write(nor, to, len, retlen, buf); - } else { + page_offset = (to + i) & (nor->page_size - 1); + WARN_ONCE(page_offset, + "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.", + page_offset); /* the size of data remaining on the first page */ - page_size = nor->page_size - page_offset; - nor->write(nor, to, page_size, retlen, buf); - - /* write everything in nor->page_size chunks */ - for (i = page_size; i < len; i += page_size) { - page_size = len - i; - if (page_size > nor->page_size) - page_size = nor->page_size; + page_remain = min_t(size_t, + nor->page_size - page_offset, len - i); - ret = spi_nor_wait_till_ready(nor); - if (ret) - goto write_err; - - write_enable(nor); + write_enable(nor); + ret = nor->write(nor, to + i, page_remain, buf + i); + if (ret < 0) + goto write_err; + written = ret; - nor->write(nor, to + i, page_size, retlen, buf + i); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto write_err; + *retlen += written; + i += written; + if (written != page_remain) { + dev_err(nor->dev, + "While writing %zu bytes written %zd bytes\n", + page_remain, written); + ret = -EIO; + goto write_err; } } - ret = spi_nor_wait_till_ready(nor); write_err: spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); return ret; diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index daf82ba7aba0..41b13d1cdcc4 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -380,8 +380,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, block_address); - if (block_address >= ssfdc->map_len) - BUG(); + BUG_ON(block_address >= ssfdc->map_len); block_address = ssfdc->logic_block_map[block_address]; diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c index 09a4ccac53a2..f26dec896afa 100644 --- a/drivers/mtd/tests/nandbiterrs.c +++ b/drivers/mtd/tests/nandbiterrs.c @@ -290,7 +290,7 @@ static int overwrite_test(void) while (opno < max_overwrite) { - err = rewrite_page(0); + err = write_page(0); if (err) break; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 4721948a92f6..3a529fbe539f 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -185,8 +185,8 @@ static ssize_t print_frame(char *buf, size_t size, char *frm, /* Fast forward. */ i = count - cut; len += snprintf((buf + len), (size - len), - "--- %u bytes skipped ---\n", - (int)(count - (cut * 2))); + "--- %zu bytes skipped ---\n", + count - (cut * 2)); } if ((!(i % 10)) && i) { diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 21f1068b0804..77ffc4312808 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -233,8 +233,7 @@ static int b53_mmap_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - if (pdata) - dev->pdata = pdata; + dev->pdata = pdata; platform_set_drvdata(pdev, dev); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index cd1d630ae3a9..b2b838724a9b 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1622,7 +1622,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); - goto out_unmap; + goto out_mdio; } ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, @@ -1679,6 +1679,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) out_free_irq0: free_irq(priv->irq0, priv); +out_mdio: + bcm_sf2_mdio_unregister(priv); out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { @@ -1686,7 +1688,6 @@ out_unmap: iounmap(*base); base++; } - bcm_sf2_mdio_unregister(priv); return ret; } diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 5698f5354c0b..39ca9350d1b2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -910,7 +910,8 @@ static int ax_probe(struct platform_device *pdev) iounmap(ax->map2); exit_mem2: - release_mem_region(mem2->start, mem2_size); + if (mem2) + release_mem_region(mem2->start, mem2_size); exit_mem1: iounmap(ei_local->mem); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 49025e99fb0e..bda31f308cc2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev) phydev = of_phy_connect(dev, phynode, &altera_tse_adjust_link, 0, priv->phy_iface); } + of_node_put(phynode); if (!phydev) { netdev_err(dev, "Could not find the PHY\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ebf9224b2d31..a9b2709567ec 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) goto err_rx_ring; for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); channel->pdata = pdata; channel->queue_index = i; channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 7714b7d4026a..37a0f463b8de 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -772,6 +772,7 @@ int xgene_enet_phy_connect(struct net_device *ndev) phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 0, pdata->phy_mode); + of_node_put(np); if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 586bedac457d..4bff0f3040df 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -749,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = of_address_to_resource(dev->of_node, 0, &res_regs); if (err) { dev_err(dev, "failed to retrieve registers base from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } /* Get IRQ from device tree */ irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "failed to retrieve <irq> value from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } ndev->netdev_ops = &arc_emac_netdev_ops; @@ -778,7 +780,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = clk_prepare_enable(priv->clk); if (err) { dev_err(dev, "failed to enable clock\n"); - return err; + goto out_put_node; } clock_frequency = clk_get_rate(priv->clk); @@ -787,7 +789,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) if (of_property_read_u32(dev->of_node, "clock-frequency", &clock_frequency)) { dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); - return -EINVAL; + err = -EINVAL; + goto out_put_node; } } @@ -867,6 +870,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) goto out_netif_api; } + of_node_put(phy_node); return 0; out_netif_api: @@ -877,6 +881,9 @@ out_mdio: out_clken: if (priv->clk) clk_disable_unprepare(priv->clk); +out_put_node: + of_node_put(phy_node); + return err; } EXPORT_SYMBOL_GPL(arc_emac_probe); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e708e360a9e3..6453148d066a 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct alx_priv *alx; struct alx_hw *hw; bool phy_configured; - int bars, err; + int err; err = pci_enable_device_mem(pdev); if (err) @@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_request_selected_regions(pdev, bars, alx_drv_name); + err = pci_request_mem_regions(pdev, alx_drv_name); if (err) { dev_err(&pdev->dev, - "pci_request_selected_regions failed(bars:%d)\n", bars); + "pci_request_mem_regions failed\n"); goto out_pci_disable; } @@ -1401,7 +1400,7 @@ out_unmap: out_free_netdev: free_netdev(netdev); out_pci_release: - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); out_pci_disable: pci_disable_device(pdev); return err; @@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev) unregister_netdev(alx->dev); iounmap(hw->hw_addr); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 0d4ea92a0d37..b047fd607b83 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1504,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev) err_free_dma: nb8800_dma_free(dev); err_free_bus: + of_node_put(priv->phy_node); mdiobus_unregister(bus); err_disable_clk: clk_disable_unprepare(priv->clk); @@ -1519,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev) struct nb8800_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); + of_node_put(priv->phy_node); mdiobus_unregister(priv->mii_bus); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 87c6b5bdd616..6c8bc5fadac7 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev) } else { /* run platform code to initialize PHY device */ - if (pd->mii_config && + if (pd && pd->mii_config && pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii)) { dev_err(&pdev->dev, "unable to configure mdio bus\n"); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 8fc246ea1fb8..05c1c1dd7751 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bnad_debug_info *regrd_debug = file->private_data; struct bnad *bnad = (struct bnad *)regrd_debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, len, rc, i; + int rc, i; + u32 addr, len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, struct bnad_debug_info *debug = file->private_data; struct bnad *bnad = (struct bnad *)debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, val, rc; + int rc; + u32 addr, val; void __iomem *reg_addr; unsigned long flags; void *kern_buf; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index e8bc15bcde70..4ab404f45b21 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1513,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) return 0; err: + of_node_put(p->phy_np); free_netdev(netdev); return result; } @@ -1520,8 +1521,10 @@ err: static int octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); + struct octeon_mgmt *p = netdev_priv(netdev); unregister_netdev(netdev); + of_node_put(p->phy_np); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index bad253beb8c8..ad3552df0545 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb); /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1bb57d3fbbe8..c8fd4f8fe1fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f15560a06718..48f82ab6c25b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_rq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-rx-%d", netdev->name, i); + "%.11s-rx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[i]; } @@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_wq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-tx-%d", netdev->name, i); + "%.11s-tx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[wq]; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index cbe84972ff7a..f0e9e2ef62a0 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev) if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { - printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); + printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq); if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { printk("\n Cannot get IRQ- reconfigure your hardware.\n"); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 92fd5c0bf4df..c865135f3cb9 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -444,6 +444,8 @@ struct bufdesc_ex { #define FEC_QUIRK_HAS_RACC (1 << 12) /* Controller supports interrupt coalesc */ #define FEC_QUIRK_HAS_COALESCE (1 << 13) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 14) struct bufdesc_prop { int qid; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4040003a74f9..01f7e811739b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -60,6 +60,7 @@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <soc/imx/cpuidle.h> #include <asm/cacheflush.h> @@ -2825,6 +2826,9 @@ fec_enet_open(struct net_device *ndev) if (ret) goto err_enet_mii_probe; + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@ -2860,6 +2864,9 @@ fec_enet_close(struct net_device *ndev) phy_disconnect(ndev->phydev); + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@ -3304,6 +3311,11 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 3fb87e233c49..5c8afe1a5ccb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -795,6 +795,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", mac_cb->mac_id, np->name); } + of_node_put(np); return 0; } @@ -812,10 +813,12 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", mac_cb->mac_id, np->name); } + of_node_put(np); - syscon = syscon_node_to_regmap( - of_parse_phandle(to_of_node(mac_cb->fw_port), - "serdes-syscon", 0)); + np = of_parse_phandle(to_of_node(mac_cb->fw_port), + "serdes-syscon", 0); + syscon = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR_OR_NULL(syscon)) { dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 2ef4277d00b3..afb5daa3721d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -51,7 +51,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) const char *mode_str; struct regmap *syscon; struct resource *res; - struct device_node *np = dsaf_dev->dev->of_node; + struct device_node *np = dsaf_dev->dev->of_node, *np_temp; struct platform_device *pdev = to_platform_device(dsaf_dev->dev); if (dev_of_node(dsaf_dev->dev)) { @@ -102,8 +102,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; if (dev_of_node(dsaf_dev->dev)) { - syscon = syscon_node_to_regmap( - of_parse_phandle(np, "subctrl-syscon", 0)); + np_temp = of_parse_phandle(np, "subctrl-syscon", 0); + syscon = syscon_node_to_regmap(np_temp); + of_node_put(np_temp); if (IS_ERR_OR_NULL(syscon)) { res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 41f32c0b341e..02f443958f31 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7330,8 +7330,7 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -7398,8 +7397,7 @@ static void e1000_remove(struct pci_dev *pdev) if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b8245c734c96..774a5654bf42 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1963,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_dma; } - err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, - IORESOURCE_MEM), - fm10k_driver_name); + err = pci_request_mem_regions(pdev, fm10k_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed: %d\n", err); @@ -2070,8 +2067,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2119,8 +2115,7 @@ static void fm10k_remove(struct pci_dev *pdev) free_netdev(netdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 339d99be4702..81c99e1be708 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10710,8 +10710,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@ -11208,8 +11207,7 @@ err_ioremap: kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -11320,8 +11318,7 @@ static void i40e_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9bcba42abb91..942a89fb0090 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2324,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); + err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg; @@ -2750,8 +2748,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2916,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev) pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); kfree(adapter->shadow_vfta); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7871f538f0ad..5418c69a7463 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9353,8 +9353,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_using_dac = 0; } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@ -9740,8 +9739,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@ -9808,8 +9806,7 @@ static void ixgbe_remove(struct pci_dev *pdev) #endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev); e_dev_info("complete\n"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f92018b13d28..d41c28d00b57 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4118,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->bm_priv = NULL; } } + of_node_put(bm_node); err = mvneta_init(&pdev->dev, pp); if (err < 0) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 0b047178cda1..60227a3452a4 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6234,6 +6234,7 @@ err_free_stats: err_free_irq: irq_dispose_mapping(port->irq); err_free_netdev: + of_node_put(phy_node); free_netdev(dev); return err; } @@ -6244,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index aeeb2e79a91a..5d5000c8edf1 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1506,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); + of_node_put(np); } /* Hardware supports only 3 ports */ diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index b3cc3ab63799..6fc156a3918d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, + uar->index << PAGE_SHIFT, + PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 2874dffe77de..eaa37c079a7c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7412,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || - (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && + (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && (dev->features & NETIF_F_RXCSUM)) { l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b26fe267a150..0e4f4a9306b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -509,6 +509,7 @@ int qed_resc_alloc(struct qed_dev *cdev) DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", n_eqes, 0xFFFF); + rc = -EINVAL; goto alloc_err; } @@ -888,7 +889,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, if (hw_mode & (1 << MODE_MF_SI)) { u8 pf_id = 0; - u32 val; + u32 val = 0; if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { if (p_hwfn->rel_pf_id == pf_id) { @@ -2539,7 +2540,7 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); - if (!rc) { + if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index a12c6caa6c66..401e738543b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -590,7 +590,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_hw_cid_data *p_rx_cid; - u64 init_prod_val = 0; + u32 init_prod_val = 0; u16 abs_l2_queue = 0; u8 abs_stats_id = 0; int rc; @@ -618,7 +618,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); /* Allocate a CID for the queue */ @@ -1664,6 +1664,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_tc = 1; if (IS_PF(cdev)) { + int max_vf_vlan_filters = 0; + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) info->num_queues += @@ -1676,7 +1678,12 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_queues = cdev->num_hwfns; } - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + if (IS_QED_SRIOV(cdev)) + max_vf_vlan_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_VLAN_FILTERS; + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - + max_vf_vlan_filters; + ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1f13abb5c316..c7dc34bfdd0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -659,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, struct qed_sb_cnt_info sb_cnt_info; int rc; int i; - memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 4d161c751c12..15399da268d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1404,7 +1404,7 @@ static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, params.anti_spoofing_en = val; rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); - if (rc) { + if (!rc) { p_vf->spoof_chk = val; p_vf->req_spoofchk_val = p_vf->spoof_chk; DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 9819230947bf..9b780b31b15c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -388,7 +388,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, /* Learn the address of the producer from the response */ if (pp_prod) { - u64 init_prod_val = 0; + u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -396,7 +396,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, rx_qid, *pp_prod, resp->offset); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 91e7bb0b85c8..e4bd02e46e57 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2064,10 +2064,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) } /* Remove vlan */ - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - return -EINVAL; + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } } qede_del_vlan_from_list(edev, vlan); @@ -3268,6 +3271,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) start.vport_id = 0; start.drop_ttl0 = true; start.remove_inner_vlan = vlan_removal_en; + start.clear_stats = clear_stats; rc = edev->ops->vport_start(cdev, &start); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 9777e5713525..f4aa6331b367 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -45,7 +45,6 @@ struct qlcnic_dcb { static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) { kfree(dcb); - dcb = NULL; } static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index fd5d1c93b55b..fd4a8e473f11 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->len += length; skb->data_len += length; skb->truesize += length; - length -= length; ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, &hlen); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ef668d300800..da4c2d8a4173 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) int i; u8 tmp8; + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", RTL_R8(ChipCmd), RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus)); @@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) spin_unlock_irq(&tp->lock); /* ...and finally, reset everything */ - if (netif_running(dev)) { - rtl8139_hw_start (dev); - netif_wake_queue (dev); - } + napi_enable(&tp->napi); + rtl8139_hw_start(dev); + netif_wake_queue(dev); + spin_unlock_bh(&tp->rx_lock); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0e62d74b09b3..e55638c7505a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1749,13 +1749,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); wol->supported = WAKE_ANY; - wol->wolopts = __rtl8169_get_wol(tp); + if (pm_runtime_active(d)) + wol->wolopts = __rtl8169_get_wol(tp); + else + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); + + pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1845,6 +1853,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); @@ -1852,12 +1863,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; - __rtl8169_set_wol(tp, wol->wolopts); + if (pm_runtime_active(d)) + __rtl8169_set_wol(tp, wol->wolopts); + else + tp->saved_wolopts = wol->wolopts; rtl_unlock_work(tp); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + pm_runtime_put_noidle(d); + return 0; } @@ -2292,11 +2308,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct rtl8169_counters *counters = tp->counters; ASSERT_RTNL(); - rtl8169_update_counters(dev); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl8169_update_counters(dev); + + pm_runtime_put_noidle(d); data[0] = le64_to_cpu(counters->tx_packets); data[1] = le64_to_cpu(counters->rx_packets); @@ -4458,6 +4480,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -4465,7 +4488,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rtl_rar_set(tp, dev->dev_addr); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl_rar_set(tp, dev->dev_addr); + + pm_runtime_put_noidle(d); return 0; } @@ -7868,6 +7896,7 @@ static int rtl8169_runtime_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + rtl_rar_set(tp, dev->dev_addr); if (!tp->TxDescArray) return 0; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8377d0220fa8..1e1cc0fad17f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1005,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev) } phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, priv->phy_interface); + of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); return -ENOENT; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7bd910ce8b34..799d58d86e6d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1780,6 +1780,7 @@ static int sh_eth_phy_init(struct net_device *ndev) sh_eth_adjust_link, 0, mdp->phy_interface); + of_node_put(pn); if (!phydev) phydev = ERR_PTR(-ENOENT); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index edd20c3b2b3d..bec6963ac71e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -135,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); if (np_splitter) { - if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + ret = of_address_to_resource(np_splitter, 0, &res_splitter); + of_node_put(np_splitter); + if (ret) { dev_info(dev, "Missing emac splitter address\n"); return -EINVAL; } @@ -159,14 +161,17 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * dev_err(dev, "%s: ERROR: missing emac splitter address\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_node_put; } dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter); - if (IS_ERR(dwmac->splitter_base)) - return PTR_ERR(dwmac->splitter_base); + if (IS_ERR(dwmac->splitter_base)) { + ret = PTR_ERR(dwmac->splitter_base); + goto err_node_put; + } } index = of_property_match_string(np_sgmii_adapter, "reg-names", @@ -178,14 +183,17 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * dev_err(dev, "%s: ERROR: failed mapping adapter\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_node_put; } dwmac->pcs.sgmii_adapter_base = devm_ioremap_resource(dev, &res_sgmii_adapter); - if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) - return PTR_ERR(dwmac->pcs.sgmii_adapter_base); + if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) { + ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base); + goto err_node_put; + } } index = of_property_match_string(np_sgmii_adapter, "reg-names", @@ -197,22 +205,30 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * dev_err(dev, "%s: ERROR: failed mapping tse control port\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_node_put; } dwmac->pcs.tse_pcs_base = devm_ioremap_resource(dev, &res_tse_pcs); - if (IS_ERR(dwmac->pcs.tse_pcs_base)) - return PTR_ERR(dwmac->pcs.tse_pcs_base); + if (IS_ERR(dwmac->pcs.tse_pcs_base)) { + ret = PTR_ERR(dwmac->pcs.tse_pcs_base); + goto err_node_put; + } } } dwmac->reg_offset = reg_offset; dwmac->reg_shift = reg_shift; dwmac->sys_mgr_base_addr = sys_mgr_base_addr; dwmac->dev = dev; + of_node_put(np_sgmii_adapter); return 0; + +err_node_put: + of_node_put(np_sgmii_adapter); + return ret; } static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c23ccabc2d8a..4c8c60af7985 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3397,6 +3397,7 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); + of_node_put(priv->plat->phy_node); if (priv->stmmac_rst) reset_control_assert(priv->stmmac_rst); clk_disable_unprepare(priv->pclk); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index f7dfc0ae8e9c..756bb548e81a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) return NULL; axi = kzalloc(sizeof(*axi), GFP_KERNEL); - if (!axi) + if (!axi) { + of_node_put(np); return ERR_PTR(-ENOMEM); + } axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); @@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + of_node_put(np); return axi; } @@ -302,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - of_node_put(np); + of_node_put(plat->phy_node); return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1a93a1f28433..c51f34693eae 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2564,19 +2564,17 @@ clean_ndev_ret: return ret; } -static int cpsw_remove_child_device(struct device *dev, void *c) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - - return 0; -} - static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + return ret; + } if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); @@ -2584,8 +2582,9 @@ static int cpsw_remove(struct platform_device *pdev) cpsw_ale_destroy(priv->ale); cpdma_ctlr_destroy(priv->dma); + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); if (priv->data.dual_emac) free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 73638f7a55d4..19e5f32a8a64 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -357,13 +357,11 @@ EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { - unsigned long flags; int ret = 0, i; if (!ctlr) return -EINVAL; - spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); @@ -371,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); - spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 6e305a82ed43..727a79f3c7dd 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1964,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_ctlr_destroy(priv->dma); unregister_netdev(ndev); + of_node_put(priv->phy_node); pm_runtime_disable(&pdev->dev); free_netdev(ndev); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 2d0beb1b801c..d13e6e15d7b5 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -344,7 +344,6 @@ static void free_rxsa(struct rcu_head *head) crypto_free_aead(sa->key.tfm); free_percpu(sa->stats); - macsec_rxsc_put(sa->sc); kfree(sa); } @@ -863,6 +862,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; + struct macsec_rx_sc *rx_sc = rx_sa->sc; int len, ret; u32 pn; @@ -891,6 +891,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) out: macsec_rxsa_put(rx_sa); + macsec_rxsc_put(rx_sc); dev_put(dev); } @@ -1106,6 +1107,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); + sc = sc ? macsec_rxsc_get(sc) : NULL; if (sc) { secy = &macsec->secy; @@ -1180,8 +1182,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) if (IS_ERR(skb)) { /* the decrypt callback needs the reference */ - if (PTR_ERR(skb) != -EINPROGRESS) + if (PTR_ERR(skb) != -EINPROGRESS) { macsec_rxsa_put(rx_sa); + macsec_rxsc_put(rx_sc); + } rcu_read_unlock(); *pskb = NULL; return RX_HANDLER_CONSUMED; @@ -1197,6 +1201,7 @@ deliver: if (rx_sa) macsec_rxsa_put(rx_sa); + macsec_rxsc_put(rx_sc); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) @@ -1212,6 +1217,7 @@ deliver: drop: macsec_rxsa_put(rx_sa); drop_nosa: + macsec_rxsc_put(rx_sc); rcu_read_unlock(); drop_direct: kfree_skb(skb); @@ -1646,7 +1652,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) rtnl_lock(); rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); - if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { + if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } @@ -3173,6 +3179,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) return err; + dev_hold(real_dev); + /* need to be already registered so that ->init has run and * the MAC addr is set */ @@ -3201,8 +3209,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev, macsec_generation++; - dev_hold(real_dev); - return 0; del_dev: diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index d94a978024d9..775674808249 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -345,10 +345,8 @@ static int xgene_mdio_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); csr_base = devm_ioremap_resource(dev, res); - if (IS_ERR(csr_base)) { - dev_err(dev, "Unable to retrieve mac CSR region\n"); + if (IS_ERR(csr_base)) return PTR_ERR(csr_base); - } pdata->mac_csr_addr = csr_base; pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET; pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 059f13b60fe0..1882d9828c99 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -869,7 +869,7 @@ static struct phy_driver ksphy_driver[] = { }, { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", - .phy_id_mask = 0x00ffffff, + .phy_id_mask = 0x00fffffc, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, @@ -993,7 +993,7 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000ffffe }, { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK }, - { PHY_ID_KSZ8001, 0x00ffffff }, + { PHY_ID_KSZ8001, 0x00fffffc }, { PHY_ID_KS8737, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8021, 0x00ffffff }, { PHY_ID_KSZ8031, 0x00ffffff }, diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 2fc50ec453d0..6f044450b702 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -862,7 +862,7 @@ static int uhdlc_suspend(struct device *dev) static int uhdlc_resume(struct device *dev) { struct ucc_hdlc_private *priv = dev_get_drvdata(dev); - struct ucc_tdm *utdm = priv->utdm; + struct ucc_tdm *utdm; struct ucc_tdm_info *ut_info; struct ucc_fast __iomem *uf_regs; struct ucc_fast_private *uccf; @@ -877,6 +877,7 @@ static int uhdlc_resume(struct device *dev) if (!netif_running(priv->ndev)) return 0; + utdm = priv->utdm; ut_info = priv->ut_info; uf_info = &ut_info->uf_info; uf_regs = priv->uf_regs; diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 7c8a3bf07884..124c2432ac9c 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -1,6 +1,7 @@ menuconfig LIBNVDIMM tristate "NVDIMM (Non-Volatile Memory Device) Support" depends on PHYS_ADDR_T_64BIT + depends on HAS_IOMEM depends on BLK_DEV help Generic support for non-volatile memory devices including @@ -19,7 +20,6 @@ if LIBNVDIMM config BLK_DEV_PMEM tristate "PMEM: Persistent memory block device support" default LIBNVDIMM - depends on HAS_IOMEM select ND_BTT if BTT select ND_PFN if NVDIMM_PFN help diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 7e262ef06ede..9faaa9694d87 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -267,10 +267,8 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) q = blk_alloc_queue(GFP_KERNEL); if (!q) return -ENOMEM; - if (devm_add_action(dev, nd_blk_release_queue, q)) { - blk_cleanup_queue(q); + if (devm_add_action_or_reset(dev, nd_blk_release_queue, q)) return -ENOMEM; - } blk_queue_make_request(q, nd_blk_make_request); blk_queue_max_hw_sectors(q, UINT_MAX); @@ -282,10 +280,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) disk = alloc_disk(0); if (!disk) return -ENOMEM; - if (devm_add_action(dev, nd_blk_release_disk, disk)) { - put_disk(disk); - return -ENOMEM; - } disk->first_minor = 0; disk->fops = &nd_blk_fops; @@ -295,6 +289,9 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) set_capacity(disk, 0); device_add_disk(dev, disk); + if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) + return -ENOMEM; + if (nsblk_meta_size(nsblk)) { int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk)); diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 816d0dae6398..3fa7919f94a8 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -198,8 +198,7 @@ struct device *nd_btt_create(struct nd_region *nd_region) { struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL); - if (dev) - __nd_device_register(dev); + __nd_device_register(dev); return dev; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 5e4e5c772ea5..458daf927336 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -31,6 +31,7 @@ int nvdimm_major; static int nvdimm_bus_major; static struct class *nd_class; +static DEFINE_IDA(nd_ida); static int to_nd_device_type(struct device *dev) { @@ -60,20 +61,13 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) to_nd_device_type(dev)); } -static int nvdimm_bus_match(struct device *dev, struct device_driver *drv) -{ - struct nd_device_driver *nd_drv = to_nd_device_driver(drv); - - return !!test_bit(to_nd_device_type(dev), &nd_drv->type); -} - static struct module *to_bus_provider(struct device *dev) { /* pin bus providers while regions are enabled */ if (is_nd_pmem(dev) || is_nd_blk(dev)) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - return nvdimm_bus->module; + return nvdimm_bus->nd_desc->module; } return NULL; } @@ -136,6 +130,21 @@ static int nvdimm_bus_remove(struct device *dev) return rc; } +static void nvdimm_bus_shutdown(struct device *dev) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nd_device_driver *nd_drv = NULL; + + if (dev->driver) + nd_drv = to_nd_device_driver(dev->driver); + + if (nd_drv && nd_drv->shutdown) { + nd_drv->shutdown(dev); + dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n", + dev->driver->name, dev_name(dev)); + } +} + void nd_device_notify(struct device *dev, enum nvdimm_event event) { device_lock(dev); @@ -208,14 +217,187 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, } EXPORT_SYMBOL_GPL(nvdimm_clear_poison); +static int nvdimm_bus_match(struct device *dev, struct device_driver *drv); + static struct bus_type nvdimm_bus_type = { .name = "nd", .uevent = nvdimm_bus_uevent, .match = nvdimm_bus_match, .probe = nvdimm_bus_probe, .remove = nvdimm_bus_remove, + .shutdown = nvdimm_bus_shutdown, +}; + +static void nvdimm_bus_release(struct device *dev) +{ + struct nvdimm_bus *nvdimm_bus; + + nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); + ida_simple_remove(&nd_ida, nvdimm_bus->id); + kfree(nvdimm_bus); +} + +static bool is_nvdimm_bus(struct device *dev) +{ + return dev->release == nvdimm_bus_release; +} + +struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) +{ + struct device *dev; + + for (dev = nd_dev; dev; dev = dev->parent) + if (is_nvdimm_bus(dev)) + break; + dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n"); + if (dev) + return to_nvdimm_bus(dev); + return NULL; +} + +struct nvdimm_bus *to_nvdimm_bus(struct device *dev) +{ + struct nvdimm_bus *nvdimm_bus; + + nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); + WARN_ON(!is_nvdimm_bus(dev)); + return nvdimm_bus; +} +EXPORT_SYMBOL_GPL(to_nvdimm_bus); + +struct nvdimm_bus *nvdimm_bus_register(struct device *parent, + struct nvdimm_bus_descriptor *nd_desc) +{ + struct nvdimm_bus *nvdimm_bus; + int rc; + + nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL); + if (!nvdimm_bus) + return NULL; + INIT_LIST_HEAD(&nvdimm_bus->list); + INIT_LIST_HEAD(&nvdimm_bus->mapping_list); + INIT_LIST_HEAD(&nvdimm_bus->poison_list); + init_waitqueue_head(&nvdimm_bus->probe_wait); + nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); + mutex_init(&nvdimm_bus->reconfig_mutex); + if (nvdimm_bus->id < 0) { + kfree(nvdimm_bus); + return NULL; + } + nvdimm_bus->nd_desc = nd_desc; + nvdimm_bus->dev.parent = parent; + nvdimm_bus->dev.release = nvdimm_bus_release; + nvdimm_bus->dev.groups = nd_desc->attr_groups; + nvdimm_bus->dev.bus = &nvdimm_bus_type; + dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); + rc = device_register(&nvdimm_bus->dev); + if (rc) { + dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc); + goto err; + } + + return nvdimm_bus; + err: + put_device(&nvdimm_bus->dev); + return NULL; +} +EXPORT_SYMBOL_GPL(nvdimm_bus_register); + +void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) +{ + if (!nvdimm_bus) + return; + device_unregister(&nvdimm_bus->dev); +} +EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); + +static int child_unregister(struct device *dev, void *data) +{ + /* + * the singular ndctl class device per bus needs to be + * "device_destroy"ed, so skip it here + * + * i.e. remove classless children + */ + if (dev->class) + /* pass */; + else + nd_device_unregister(dev, ND_SYNC); + return 0; +} + +static void free_poison_list(struct list_head *poison_list) +{ + struct nd_poison *pl, *next; + + list_for_each_entry_safe(pl, next, poison_list, list) { + list_del(&pl->list); + kfree(pl); + } + list_del_init(poison_list); +} + +static int nd_bus_remove(struct device *dev) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + + mutex_lock(&nvdimm_bus_list_mutex); + list_del_init(&nvdimm_bus->list); + mutex_unlock(&nvdimm_bus_list_mutex); + + nd_synchronize(); + device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); + + nvdimm_bus_lock(&nvdimm_bus->dev); + free_poison_list(&nvdimm_bus->poison_list); + nvdimm_bus_unlock(&nvdimm_bus->dev); + + nvdimm_bus_destroy_ndctl(nvdimm_bus); + + return 0; +} + +static int nd_bus_probe(struct device *dev) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + int rc; + + rc = nvdimm_bus_create_ndctl(nvdimm_bus); + if (rc) + return rc; + + mutex_lock(&nvdimm_bus_list_mutex); + list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list); + mutex_unlock(&nvdimm_bus_list_mutex); + + /* enable bus provider attributes to look up their local context */ + dev_set_drvdata(dev, nvdimm_bus->nd_desc); + + return 0; +} + +static struct nd_device_driver nd_bus_driver = { + .probe = nd_bus_probe, + .remove = nd_bus_remove, + .drv = { + .name = "nd_bus", + .suppress_bind_attrs = true, + .bus = &nvdimm_bus_type, + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, }; +static int nvdimm_bus_match(struct device *dev, struct device_driver *drv) +{ + struct nd_device_driver *nd_drv = to_nd_device_driver(drv); + + if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver) + return true; + + return !!test_bit(to_nd_device_type(dev), &nd_drv->type); +} + static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain); void nd_synchronize(void) @@ -395,12 +577,10 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus, "ndctl%d", nvdimm_bus->id); - if (IS_ERR(dev)) { + if (IS_ERR(dev)) dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n", nvdimm_bus->id, PTR_ERR(dev)); - return PTR_ERR(dev); - } - return 0; + return PTR_ERR_OR_ZERO(dev); } void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus) @@ -850,8 +1030,14 @@ int __init nvdimm_bus_init(void) goto err_class; } + rc = driver_register(&nd_bus_driver.drv); + if (rc) + goto err_nd_bus; + return 0; + err_nd_bus: + class_destroy(nd_class); err_class: unregister_chrdev(nvdimm_major, "dimmctl"); err_dimm_chrdev: @@ -864,8 +1050,10 @@ int __init nvdimm_bus_init(void) void nvdimm_bus_exit(void) { + driver_unregister(&nd_bus_driver.drv); class_destroy(nd_class); unregister_chrdev(nvdimm_bus_major, "ndctl"); unregister_chrdev(nvdimm_major, "dimmctl"); bus_unregister(&nvdimm_bus_type); + ida_destroy(&nd_ida); } diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 8b2e3c4fb0ad..d5dc80c48b4c 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -240,7 +240,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, return memcpy_from_pmem(buf, nsio->addr + offset, size); } else { memcpy_to_pmem(nsio->addr + offset, buf, size); - wmb_pmem(); + nvdimm_flush(to_nd_region(ndns->dev.parent)); } return 0; @@ -266,9 +266,8 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio) nsio->addr = devm_memremap(dev, res->start, resource_size(res), ARCH_MEMREMAP_PMEM); - if (IS_ERR(nsio->addr)) - return PTR_ERR(nsio->addr); - return 0; + + return PTR_ERR_OR_ZERO(nsio->addr); } EXPORT_SYMBOL_GPL(devm_nsio_enable); diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index be89764315c2..715583f69d28 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -20,12 +20,12 @@ #include <linux/ndctl.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/io.h> #include "nd-core.h" #include "nd.h" LIST_HEAD(nvdimm_bus_list); DEFINE_MUTEX(nvdimm_bus_list_mutex); -static DEFINE_IDA(nd_ida); void nvdimm_bus_lock(struct device *dev) { @@ -57,6 +57,127 @@ bool is_nvdimm_bus_locked(struct device *dev) } EXPORT_SYMBOL(is_nvdimm_bus_locked); +struct nvdimm_map { + struct nvdimm_bus *nvdimm_bus; + struct list_head list; + resource_size_t offset; + unsigned long flags; + size_t size; + union { + void *mem; + void __iomem *iomem; + }; + struct kref kref; +}; + +static struct nvdimm_map *find_nvdimm_map(struct device *dev, + resource_size_t offset) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_map *nvdimm_map; + + list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) + if (nvdimm_map->offset == offset) + return nvdimm_map; + return NULL; +} + +static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, + resource_size_t offset, size_t size, unsigned long flags) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_map *nvdimm_map; + + nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); + if (!nvdimm_map) + return NULL; + + INIT_LIST_HEAD(&nvdimm_map->list); + nvdimm_map->nvdimm_bus = nvdimm_bus; + nvdimm_map->offset = offset; + nvdimm_map->flags = flags; + nvdimm_map->size = size; + kref_init(&nvdimm_map->kref); + + if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) + goto err_request_region; + + if (flags) + nvdimm_map->mem = memremap(offset, size, flags); + else + nvdimm_map->iomem = ioremap(offset, size); + + if (!nvdimm_map->mem) + goto err_map; + + dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", + __func__); + list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); + + return nvdimm_map; + + err_map: + release_mem_region(offset, size); + err_request_region: + kfree(nvdimm_map); + return NULL; +} + +static void nvdimm_map_release(struct kref *kref) +{ + struct nvdimm_bus *nvdimm_bus; + struct nvdimm_map *nvdimm_map; + + nvdimm_map = container_of(kref, struct nvdimm_map, kref); + nvdimm_bus = nvdimm_map->nvdimm_bus; + + dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset); + list_del(&nvdimm_map->list); + if (nvdimm_map->flags) + memunmap(nvdimm_map->mem); + else + iounmap(nvdimm_map->iomem); + release_mem_region(nvdimm_map->offset, nvdimm_map->size); + kfree(nvdimm_map); +} + +static void nvdimm_map_put(void *data) +{ + struct nvdimm_map *nvdimm_map = data; + struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; + + nvdimm_bus_lock(&nvdimm_bus->dev); + kref_put(&nvdimm_map->kref, nvdimm_map_release); + nvdimm_bus_unlock(&nvdimm_bus->dev); +} + +/** + * devm_nvdimm_memremap - map a resource that is shared across regions + * @dev: device that will own a reference to the shared mapping + * @offset: physical base address of the mapping + * @size: mapping size + * @flags: memremap flags, or, if zero, perform an ioremap instead + */ +void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, + size_t size, unsigned long flags) +{ + struct nvdimm_map *nvdimm_map; + + nvdimm_bus_lock(dev); + nvdimm_map = find_nvdimm_map(dev, offset); + if (!nvdimm_map) + nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); + else + kref_get(&nvdimm_map->kref); + nvdimm_bus_unlock(dev); + + if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) + return NULL; + + return nvdimm_map->mem; +} +EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); + u64 nd_fletcher64(void *addr, size_t len, bool le) { u32 *buf = addr; @@ -73,25 +194,6 @@ u64 nd_fletcher64(void *addr, size_t len, bool le) } EXPORT_SYMBOL_GPL(nd_fletcher64); -static void nvdimm_bus_release(struct device *dev) -{ - struct nvdimm_bus *nvdimm_bus; - - nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); - ida_simple_remove(&nd_ida, nvdimm_bus->id); - kfree(nvdimm_bus); -} - -struct nvdimm_bus *to_nvdimm_bus(struct device *dev) -{ - struct nvdimm_bus *nvdimm_bus; - - nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); - WARN_ON(nvdimm_bus->dev.release != nvdimm_bus_release); - return nvdimm_bus; -} -EXPORT_SYMBOL_GPL(to_nvdimm_bus); - struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) { /* struct nvdimm_bus definition is private to libnvdimm */ @@ -99,18 +201,12 @@ struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) } EXPORT_SYMBOL_GPL(to_nd_desc); -struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) +struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) { - struct device *dev; - - for (dev = nd_dev; dev; dev = dev->parent) - if (dev->release == nvdimm_bus_release) - break; - dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n"); - if (dev) - return to_nvdimm_bus(dev); - return NULL; + /* struct nvdimm_bus definition is private to libnvdimm */ + return &nvdimm_bus->dev; } +EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); static bool is_uuid_sep(char sep) { @@ -325,51 +421,6 @@ struct attribute_group nvdimm_bus_attribute_group = { }; EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group); -struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, - struct nvdimm_bus_descriptor *nd_desc, struct module *module) -{ - struct nvdimm_bus *nvdimm_bus; - int rc; - - nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL); - if (!nvdimm_bus) - return NULL; - INIT_LIST_HEAD(&nvdimm_bus->list); - INIT_LIST_HEAD(&nvdimm_bus->poison_list); - init_waitqueue_head(&nvdimm_bus->probe_wait); - nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); - mutex_init(&nvdimm_bus->reconfig_mutex); - if (nvdimm_bus->id < 0) { - kfree(nvdimm_bus); - return NULL; - } - nvdimm_bus->nd_desc = nd_desc; - nvdimm_bus->module = module; - nvdimm_bus->dev.parent = parent; - nvdimm_bus->dev.release = nvdimm_bus_release; - nvdimm_bus->dev.groups = nd_desc->attr_groups; - dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); - rc = device_register(&nvdimm_bus->dev); - if (rc) { - dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc); - goto err; - } - - rc = nvdimm_bus_create_ndctl(nvdimm_bus); - if (rc) - goto err; - - mutex_lock(&nvdimm_bus_list_mutex); - list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list); - mutex_unlock(&nvdimm_bus_list_mutex); - - return nvdimm_bus; - err: - put_device(&nvdimm_bus->dev); - return NULL; -} -EXPORT_SYMBOL_GPL(__nvdimm_bus_register); - static void set_badblock(struct badblocks *bb, sector_t s, int num) { dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n", @@ -545,54 +596,6 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) } EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison); -static void free_poison_list(struct list_head *poison_list) -{ - struct nd_poison *pl, *next; - - list_for_each_entry_safe(pl, next, poison_list, list) { - list_del(&pl->list); - kfree(pl); - } - list_del_init(poison_list); -} - -static int child_unregister(struct device *dev, void *data) -{ - /* - * the singular ndctl class device per bus needs to be - * "device_destroy"ed, so skip it here - * - * i.e. remove classless children - */ - if (dev->class) - /* pass */; - else - nd_device_unregister(dev, ND_SYNC); - return 0; -} - -void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) -{ - if (!nvdimm_bus) - return; - - mutex_lock(&nvdimm_bus_list_mutex); - list_del_init(&nvdimm_bus->list); - mutex_unlock(&nvdimm_bus_list_mutex); - - nd_synchronize(); - device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); - - nvdimm_bus_lock(&nvdimm_bus->dev); - free_poison_list(&nvdimm_bus->poison_list); - nvdimm_bus_unlock(&nvdimm_bus->dev); - - nvdimm_bus_destroy_ndctl(nvdimm_bus); - - device_unregister(&nvdimm_bus->dev); -} -EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); - #ifdef CONFIG_BLK_DEV_INTEGRITY int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) { @@ -601,7 +604,8 @@ int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) if (meta_size == 0) return 0; - bi.profile = NULL; + memset(&bi, 0, sizeof(bi)); + bi.tuple_size = meta_size; bi.tag_size = meta_size; @@ -650,7 +654,6 @@ static __exit void libnvdimm_exit(void) nvdimm_bus_exit(); nd_region_devs_exit(); nvdimm_devs_exit(); - ida_destroy(&nd_ida); } MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index bbde28d3dec5..d9bba5edd8dc 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -346,7 +346,8 @@ EXPORT_SYMBOL_GPL(nvdimm_attribute_group); struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, - unsigned long cmd_mask) + unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq) { struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct device *dev; @@ -362,6 +363,8 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, nvdimm->provider_data = provider_data; nvdimm->flags = flags; nvdimm->cmd_mask = cmd_mask; + nvdimm->num_flush = num_flush; + nvdimm->flush_wpq = flush_wpq; atomic_set(&nvdimm->busy, 0); dev = &nvdimm->dev; dev_set_name(dev, "nmem%d", nvdimm->id); diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index 95825b38559a..11ea90120542 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -47,6 +47,7 @@ static int e820_pmem_probe(struct platform_device *pdev) nd_desc.attr_groups = e820_pmem_attribute_groups; nd_desc.provider_name = "e820"; + nd_desc.module = THIS_MODULE; nvdimm_bus = nvdimm_bus_register(dev, &nd_desc); if (!nvdimm_bus) goto err; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 284cdaa268cf..38ce6bbbc170 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -26,11 +26,11 @@ extern int nvdimm_major; struct nvdimm_bus { struct nvdimm_bus_descriptor *nd_desc; wait_queue_head_t probe_wait; - struct module *module; struct list_head list; struct device dev; int id, probe_active; struct list_head poison_list; + struct list_head mapping_list; struct mutex reconfig_mutex; }; @@ -40,7 +40,8 @@ struct nvdimm { unsigned long cmd_mask; struct device dev; atomic_t busy; - int id; + int id, num_flush; + struct resource *flush_wpq; }; bool is_nvdimm(struct device *dev); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index d0ac93c31dda..40476399d227 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -49,9 +49,11 @@ struct nvdimm_drvdata { struct kref kref; }; -struct nd_region_namespaces { - int count; - int active; +struct nd_region_data { + int ns_count; + int ns_active; + unsigned int flush_mask; + void __iomem *flush_wpq[0][0]; }; static inline struct nd_namespace_index *to_namespace_index( @@ -119,7 +121,6 @@ struct nd_region { struct nd_blk_region { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); void *blk_provider_data; @@ -325,6 +326,7 @@ static inline void devm_nsio_disable(struct device *dev, } #endif int nd_blk_region_init(struct nd_region *nd_region); +int nd_region_activate(struct nd_region *nd_region); void __nd_iostat_start(struct bio *bio, unsigned long *start); static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 36cb39047d5b..b511099457db 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -29,27 +29,28 @@ #include <linux/slab.h> #include <linux/pmem.h> #include <linux/nd.h> +#include "pmem.h" #include "pfn.h" #include "nd.h" -struct pmem_device { - /* One contiguous memory region per device */ - phys_addr_t phys_addr; - /* when non-zero this device is hosting a 'pfn' instance */ - phys_addr_t data_offset; - u64 pfn_flags; - void __pmem *virt_addr; - /* immutable base size of the namespace */ - size_t size; - /* trim size when namespace capacity has been section aligned */ - u32 pfn_pad; - struct badblocks bb; -}; +static struct device *to_dev(struct pmem_device *pmem) +{ + /* + * nvdimm bus services need a 'dev' parameter, and we record the device + * at init in bb.dev. + */ + return pmem->bb.dev; +} + +static struct nd_region *to_region(struct pmem_device *pmem) +{ + return to_nd_region(to_dev(pmem)->parent); +} static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { - struct device *dev = pmem->bb.dev; + struct device *dev = to_dev(pmem); sector_t sector; long cleared; @@ -57,7 +58,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); if (cleared > 0 && cleared / 512) { - dev_dbg(dev, "%s: %llx clear %ld sector%s\n", + dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, (unsigned long long) sector, cleared / 512, cleared / 512 > 1 ? "s" : ""); badblocks_clear(&pmem->bb, sector, cleared / 512); @@ -73,7 +74,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, bool bad_pmem = false; void *mem = kmap_atomic(page); phys_addr_t pmem_off = sector * 512 + pmem->data_offset; - void __pmem *pmem_addr = pmem->virt_addr + pmem_off; + void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) bad_pmem = true; @@ -112,6 +113,11 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, return rc; } +/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */ +#ifndef REQ_FLUSH +#define REQ_FLUSH REQ_PREFLUSH +#endif + static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) { int rc = 0; @@ -120,6 +126,10 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) struct bio_vec bvec; struct bvec_iter iter; struct pmem_device *pmem = q->queuedata; + struct nd_region *nd_region = to_region(pmem); + + if (bio->bi_rw & REQ_FLUSH) + nvdimm_flush(nd_region); do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { @@ -134,8 +144,8 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) if (do_acct) nd_iostat_end(bio, start); - if (bio_data_dir(bio)) - wmb_pmem(); + if (bio->bi_rw & REQ_FUA) + nvdimm_flush(nd_region); bio_endio(bio); return BLK_QC_T_NONE; @@ -148,8 +158,6 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, int rc; rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector); - if (rw & WRITE) - wmb_pmem(); /* * The ->rw_page interface is subtle and tricky. The core @@ -163,8 +171,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, return rc; } -static long pmem_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, pfn_t *pfn, long size) +/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ +__weak long pmem_direct_access(struct block_device *bdev, sector_t sector, + void **kaddr, pfn_t *pfn, long size) { struct pmem_device *pmem = bdev->bd_queue->queuedata; resource_size_t offset = sector * 512 + pmem->data_offset; @@ -195,7 +204,7 @@ static void pmem_release_queue(void *q) blk_cleanup_queue(q); } -void pmem_release_disk(void *disk) +static void pmem_release_disk(void *disk) { del_gendisk(disk); put_disk(disk); @@ -205,6 +214,7 @@ static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct nd_region *nd_region = to_nd_region(dev->parent); struct vmem_altmap __altmap, *altmap = NULL; struct resource *res = &nsio->res; struct nd_pfn *nd_pfn = NULL; @@ -234,7 +244,7 @@ static int pmem_attach_disk(struct device *dev, dev_set_drvdata(dev, pmem); pmem->phys_addr = res->start; pmem->size = resource_size(res); - if (!arch_has_wmb_pmem()) + if (nvdimm_has_flush(nd_region) < 0) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (!devm_request_mem_region(dev, res->start, resource_size(res), @@ -269,15 +279,14 @@ static int pmem_attach_disk(struct device *dev, * At release time the queue must be dead before * devm_memremap_pages is unwound */ - if (devm_add_action(dev, pmem_release_queue, q)) { - blk_cleanup_queue(q); + if (devm_add_action_or_reset(dev, pmem_release_queue, q)) return -ENOMEM; - } if (IS_ERR(addr)) return PTR_ERR(addr); - pmem->virt_addr = (void __pmem *) addr; + pmem->virt_addr = addr; + blk_queue_write_cache(q, true, true); blk_queue_make_request(q, pmem_make_request); blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_max_hw_sectors(q, UINT_MAX); @@ -289,10 +298,6 @@ static int pmem_attach_disk(struct device *dev, disk = alloc_disk_node(0, nid); if (!disk) return -ENOMEM; - if (devm_add_action(dev, pmem_release_disk, disk)) { - put_disk(disk); - return -ENOMEM; - } disk->fops = &pmem_fops; disk->queue = q; @@ -302,9 +307,13 @@ static int pmem_attach_disk(struct device *dev, / 512); if (devm_init_badblocks(dev, &pmem->bb)) return -ENOMEM; - nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res); + nvdimm_badblocks_populate(nd_region, &pmem->bb, res); disk->bb = &pmem->bb; device_add_disk(dev, disk); + + if (devm_add_action_or_reset(dev, pmem_release_disk, disk)) + return -ENOMEM; + revalidate_disk(disk); return 0; @@ -340,13 +349,20 @@ static int nd_pmem_remove(struct device *dev) { if (is_nd_btt(dev)) nvdimm_namespace_detach_btt(to_nd_btt(dev)); + nvdimm_flush(to_nd_region(dev->parent)); + return 0; } +static void nd_pmem_shutdown(struct device *dev) +{ + nvdimm_flush(to_nd_region(dev->parent)); +} + static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) { - struct nd_region *nd_region = to_nd_region(dev->parent); struct pmem_device *pmem = dev_get_drvdata(dev); + struct nd_region *nd_region = to_region(pmem); resource_size_t offset = 0, end_trunc = 0; struct nd_namespace_common *ndns; struct nd_namespace_io *nsio; @@ -382,6 +398,7 @@ static struct nd_device_driver nd_pmem_driver = { .probe = nd_pmem_probe, .remove = nd_pmem_remove, .notify = nd_pmem_notify, + .shutdown = nd_pmem_shutdown, .drv = { .name = "nd_pmem", }, diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h new file mode 100644 index 000000000000..b4ee4f71b4a1 --- /dev/null +++ b/drivers/nvdimm/pmem.h @@ -0,0 +1,24 @@ +#ifndef __NVDIMM_PMEM_H__ +#define __NVDIMM_PMEM_H__ +#include <linux/badblocks.h> +#include <linux/types.h> +#include <linux/pfn_t.h> +#include <linux/fs.h> + +long pmem_direct_access(struct block_device *bdev, sector_t sector, + void **kaddr, pfn_t *pfn, long size); +/* this definition is in it's own header for tools/testing/nvdimm to consume */ +struct pmem_device { + /* One contiguous memory region per device */ + phys_addr_t phys_addr; + /* when non-zero this device is hosting a 'pfn' instance */ + phys_addr_t data_offset; + u64 pfn_flags; + void *virt_addr; + /* immutable base size of the namespace */ + size_t size; + /* trim size when namespace capacity has been section aligned */ + u32 pfn_pad; + struct badblocks bb; +}; +#endif /* __NVDIMM_PMEM_H__ */ diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index 05a912359939..8f241772ec0b 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -20,7 +20,7 @@ static int nd_region_probe(struct device *dev) { int err, rc; static unsigned long once; - struct nd_region_namespaces *num_ns; + struct nd_region_data *ndrd; struct nd_region *nd_region = to_nd_region(dev); if (nd_region->num_lanes > num_online_cpus() @@ -33,21 +33,21 @@ static int nd_region_probe(struct device *dev) nd_region->num_lanes); } + rc = nd_region_activate(nd_region); + if (rc) + return rc; + rc = nd_blk_region_init(nd_region); if (rc) return rc; rc = nd_region_register_namespaces(nd_region, &err); - num_ns = devm_kzalloc(dev, sizeof(*num_ns), GFP_KERNEL); - if (!num_ns) - return -ENOMEM; - if (rc < 0) return rc; - num_ns->active = rc; - num_ns->count = rc + err; - dev_set_drvdata(dev, num_ns); + ndrd = dev_get_drvdata(dev); + ndrd->ns_active = rc; + ndrd->ns_count = rc + err; if (rc && err && rc == err) return -ENODEV; @@ -82,6 +82,8 @@ static int nd_region_remove(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev); + device_for_each_child(dev, NULL, child_unregister); + /* flush attribute readers and disable */ nvdimm_bus_lock(dev); nd_region->ns_seed = NULL; @@ -91,7 +93,6 @@ static int nd_region_remove(struct device *dev) dev_set_drvdata(dev, NULL); nvdimm_bus_unlock(dev); - device_for_each_child(dev, NULL, child_unregister); return 0; } diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 40fcfea26fbb..e8d5ba7b29af 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -14,13 +14,97 @@ #include <linux/highmem.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/hash.h> +#include <linux/pmem.h> #include <linux/sort.h> #include <linux/io.h> #include <linux/nd.h> #include "nd-core.h" #include "nd.h" +/* + * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is + * irrelevant. + */ +#include <linux/io-64-nonatomic-hi-lo.h> + static DEFINE_IDA(region_ida); +static DEFINE_PER_CPU(int, flush_idx); + +static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, + struct nd_region_data *ndrd) +{ + int i, j; + + dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), + nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); + for (i = 0; i < nvdimm->num_flush; i++) { + struct resource *res = &nvdimm->flush_wpq[i]; + unsigned long pfn = PHYS_PFN(res->start); + void __iomem *flush_page; + + /* check if flush hints share a page */ + for (j = 0; j < i; j++) { + struct resource *res_j = &nvdimm->flush_wpq[j]; + unsigned long pfn_j = PHYS_PFN(res_j->start); + + if (pfn == pfn_j) + break; + } + + if (j < i) + flush_page = (void __iomem *) ((unsigned long) + ndrd->flush_wpq[dimm][j] & PAGE_MASK); + else + flush_page = devm_nvdimm_ioremap(dev, + PHYS_PFN(pfn), PAGE_SIZE); + if (!flush_page) + return -ENXIO; + ndrd->flush_wpq[dimm][i] = flush_page + + (res->start & ~PAGE_MASK); + } + + return 0; +} + +int nd_region_activate(struct nd_region *nd_region) +{ + int i, num_flush = 0; + struct nd_region_data *ndrd; + struct device *dev = &nd_region->dev; + size_t flush_data_size = sizeof(void *); + + nvdimm_bus_lock(&nd_region->dev); + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + /* at least one null hint slot per-dimm for the "no-hint" case */ + flush_data_size += sizeof(void *); + num_flush = min_not_zero(num_flush, nvdimm->num_flush); + if (!nvdimm->num_flush) + continue; + flush_data_size += nvdimm->num_flush * sizeof(void *); + } + nvdimm_bus_unlock(&nd_region->dev); + + ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); + if (!ndrd) + return -ENOMEM; + dev_set_drvdata(dev, ndrd); + + ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); + + if (rc) + return rc; + } + + return 0; +} static void nd_region_release(struct device *dev) { @@ -242,12 +326,12 @@ static DEVICE_ATTR_RO(available_size); static ssize_t init_namespaces_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_region_namespaces *num_ns = dev_get_drvdata(dev); + struct nd_region_data *ndrd = dev_get_drvdata(dev); ssize_t rc; nvdimm_bus_lock(dev); - if (num_ns) - rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count); + if (ndrd) + rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); else rc = -ENXIO; nvdimm_bus_unlock(dev); @@ -433,8 +517,6 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, if (is_nd_pmem(dev)) return; - - to_nd_blk_region(dev)->disable(nvdimm_bus, dev); } if (dev->parent && is_nd_blk(dev->parent) && probe) { nd_region = to_nd_region(dev->parent); @@ -698,7 +780,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, if (ndbr) { nd_region = &ndbr->nd_region; ndbr->enable = ndbr_desc->enable; - ndbr->disable = ndbr_desc->disable; ndbr->do_io = ndbr_desc->do_io; } region_buf = ndbr; @@ -794,6 +875,67 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, } EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); +/** + * nvdimm_flush - flush any posted write queues between the cpu and pmem media + * @nd_region: blk or interleaved pmem region + */ +void nvdimm_flush(struct nd_region *nd_region) +{ + struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); + int i, idx; + + /* + * Try to encourage some diversity in flush hint addresses + * across cpus assuming a limited number of flush hints. + */ + idx = this_cpu_read(flush_idx); + idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); + + /* + * The first wmb() is needed to 'sfence' all previous writes + * such that they are architecturally visible for the platform + * buffer flush. Note that we've already arranged for pmem + * writes to avoid the cache via arch_memcpy_to_pmem(). The + * final wmb() ensures ordering for the NVDIMM flush write. + */ + wmb(); + for (i = 0; i < nd_region->ndr_mappings; i++) + if (ndrd->flush_wpq[i][0]) + writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); + wmb(); +} +EXPORT_SYMBOL_GPL(nvdimm_flush); + +/** + * nvdimm_has_flush - determine write flushing requirements + * @nd_region: blk or interleaved pmem region + * + * Returns 1 if writes require flushing + * Returns 0 if writes do not require flushing + * Returns -ENXIO if flushing capability can not be determined + */ +int nvdimm_has_flush(struct nd_region *nd_region) +{ + struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); + int i; + + /* no nvdimm == flushing capability unknown */ + if (nd_region->ndr_mappings == 0) + return -ENXIO; + + for (i = 0; i < nd_region->ndr_mappings; i++) + /* flush hints present, flushing required */ + if (ndrd->flush_wpq[i][0]) + return 1; + + /* + * The platform defines dimm devices without hints, assume + * platform persistence mechanism like ADR + */ + return 0; +} +EXPORT_SYMBOL_GPL(nvdimm_has_flush); + void __exit nd_region_devs_exit(void) { ida_destroy(®ion_ida); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4cb9b156cab7..d7c33f9361aa 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1661,14 +1661,9 @@ static int nvme_pci_enable(struct nvme_dev *dev) static void nvme_dev_unmap(struct nvme_dev *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); - int bars; - if (dev->bar) iounmap(dev->bar); - - bars = pci_select_bars(pdev, IORESOURCE_MEM); - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(to_pci_dev(dev->dev)); } static void nvme_pci_disable(struct nvme_dev *dev) @@ -1897,13 +1892,9 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { static int nvme_dev_map(struct nvme_dev *dev) { - int bars; struct pci_dev *pdev = to_pci_dev(dev->dev); - bars = pci_select_bars(pdev, IORESOURCE_MEM); - if (!bars) - return -ENODEV; - if (pci_request_selected_regions(pdev, bars, "nvme")) + if (pci_request_mem_regions(pdev, "nvme")) return -ENODEV; dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); @@ -1912,7 +1903,7 @@ static int nvme_dev_map(struct nvme_dev *dev) return 0; release: - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); return -ENODEV; } diff --git a/drivers/of/address.c b/drivers/of/address.c index 0a553c084a81..02b2903fe9d2 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -1,4 +1,6 @@ +#define pr_fmt(fmt) "OF: " fmt + #include <linux/device.h> #include <linux/io.h> #include <linux/ioport.h> @@ -24,10 +26,10 @@ static int __of_address_to_resource(struct device_node *dev, #ifdef DEBUG static void of_dump_addr(const char *s, const __be32 *addr, int na) { - printk(KERN_DEBUG "%s", s); + pr_debug("%s", s); while (na--) - printk(" %08x", be32_to_cpu(*(addr++))); - printk("\n"); + pr_cont(" %08x", be32_to_cpu(*(addr++))); + pr_cont("\n"); } #else static void of_dump_addr(const char *s, const __be32 *addr, int na) { } @@ -68,7 +70,7 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range, s = of_read_number(range + na + pna, ns); da = of_read_number(addr, na); - pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n", + pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", (unsigned long long)cp, (unsigned long long)s, (unsigned long long)da); @@ -156,7 +158,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, s = of_read_number(range + na + pna, ns); da = of_read_number(addr + 1, na - 1); - pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n", + pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", (unsigned long long)cp, (unsigned long long)s, (unsigned long long)da); @@ -381,7 +383,7 @@ static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, s = of_read_number(range + na + pna, ns); da = of_read_number(addr + 1, na - 1); - pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n", + pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", (unsigned long long)cp, (unsigned long long)s, (unsigned long long)da); @@ -504,17 +506,17 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, */ ranges = of_get_property(parent, rprop, &rlen); if (ranges == NULL && !of_empty_ranges_quirk(parent)) { - pr_debug("OF: no ranges; cannot translate\n"); + pr_debug("no ranges; cannot translate\n"); return 1; } if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); - pr_debug("OF: empty ranges; 1:1 translation\n"); + pr_debug("empty ranges; 1:1 translation\n"); goto finish; } - pr_debug("OF: walking ranges...\n"); + pr_debug("walking ranges...\n"); /* Now walk through the ranges */ rlen /= 4; @@ -525,14 +527,14 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, break; } if (offset == OF_BAD_ADDR) { - pr_debug("OF: not found !\n"); + pr_debug("not found !\n"); return 1; } memcpy(addr, ranges + na, 4 * pna); finish: - of_dump_addr("OF: parent translation for:", addr, pna); - pr_debug("OF: with offset: %llx\n", (unsigned long long)offset); + of_dump_addr("parent translation for:", addr, pna); + pr_debug("with offset: %llx\n", (unsigned long long)offset); /* Translate it into parent bus space */ return pbus->translate(addr, offset, pna); @@ -557,7 +559,7 @@ static u64 __of_translate_address(struct device_node *dev, int na, ns, pna, pns; u64 result = OF_BAD_ADDR; - pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev)); + pr_debug("** translation for device %s **\n", of_node_full_name(dev)); /* Increase refcount at current level */ of_node_get(dev); @@ -571,14 +573,14 @@ static u64 __of_translate_address(struct device_node *dev, /* Count address cells & copy address locally */ bus->count_cells(dev, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { - pr_debug("OF: Bad cell count for %s\n", of_node_full_name(dev)); + pr_debug("Bad cell count for %s\n", of_node_full_name(dev)); goto bail; } memcpy(addr, in_addr, na * 4); - pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n", + pr_debug("bus is %s (na=%d, ns=%d) on %s\n", bus->name, na, ns, of_node_full_name(parent)); - of_dump_addr("OF: translating address:", addr, na); + of_dump_addr("translating address:", addr, na); /* Translate */ for (;;) { @@ -589,7 +591,7 @@ static u64 __of_translate_address(struct device_node *dev, /* If root, we have finished */ if (parent == NULL) { - pr_debug("OF: reached root node\n"); + pr_debug("reached root node\n"); result = of_read_number(addr, na); break; } @@ -598,12 +600,12 @@ static u64 __of_translate_address(struct device_node *dev, pbus = of_match_bus(parent); pbus->count_cells(dev, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { - pr_err("prom_parse: Bad cell count for %s\n", + pr_err("Bad cell count for %s\n", of_node_full_name(dev)); break; } - pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n", + pr_debug("parent bus is %s (na=%d, ns=%d) on %s\n", pbus->name, pna, pns, of_node_full_name(parent)); /* Apply bus translation */ @@ -615,7 +617,7 @@ static u64 __of_translate_address(struct device_node *dev, ns = pns; bus = pbus; - of_dump_addr("OF: one level translation:", addr, na); + of_dump_addr("one level translation:", addr, na); } bail: of_node_put(parent); @@ -853,8 +855,7 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz } if (!ranges) { - pr_debug("%s: no dma-ranges found for node(%s)\n", - __func__, np->full_name); + pr_debug("no dma-ranges found for node(%s)\n", np->full_name); ret = -ENODEV; goto out; } @@ -871,8 +872,8 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz dmaaddr = of_read_number(ranges, naddr); *paddr = of_translate_dma_address(np, ranges); if (*paddr == OF_BAD_ADDR) { - pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n", - __func__, dma_addr, np->full_name); + pr_err("translation of DMA address(%pad) to CPU address failed node(%s)\n", + dma_addr, np->full_name); ret = -EINVAL; goto out; } diff --git a/drivers/of/base.c b/drivers/of/base.c index ebf84e3b56d5..7792266db259 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -17,6 +17,9 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#define pr_fmt(fmt) "OF: " fmt + #include <linux/console.h> #include <linux/ctype.h> #include <linux/cpu.h> @@ -112,6 +115,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); } +/* always return newly allocated name, caller must free after use */ static const char *safe_name(struct kobject *kobj, const char *orig_name) { const char *name = orig_name; @@ -126,9 +130,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name) name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); } - if (name != orig_name) - pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n", + if (name == orig_name) { + name = kstrdup(orig_name, GFP_KERNEL); + } else { + pr_warn("Duplicate name in %s, renamed to \"%s\"\n", kobject_name(kobj), name); + } return name; } @@ -159,6 +166,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) int __of_attach_node_sysfs(struct device_node *np) { const char *name; + struct kobject *parent; struct property *pp; int rc; @@ -171,15 +179,16 @@ int __of_attach_node_sysfs(struct device_node *np) np->kobj.kset = of_kset; if (!np->parent) { /* Nodes without parents are new top level trees */ - rc = kobject_add(&np->kobj, NULL, "%s", - safe_name(&of_kset->kobj, "base")); + name = safe_name(&of_kset->kobj, "base"); + parent = NULL; } else { name = safe_name(&np->parent->kobj, kbasename(np->full_name)); - if (!name || !name[0]) - return -EINVAL; - - rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name); + parent = &np->parent->kobj; } + if (!name) + return -ENOMEM; + rc = kobject_add(&np->kobj, parent, "%s", name); + kfree(name); if (rc) return rc; @@ -198,7 +207,7 @@ void __init of_core_init(void) of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { mutex_unlock(&of_mutex); - pr_err("devicetree: failed to register existing nodes\n"); + pr_err("failed to register existing nodes\n"); return; } for_each_of_allnodes(np) @@ -493,6 +502,28 @@ int of_device_is_compatible(const struct device_node *device, } EXPORT_SYMBOL(of_device_is_compatible); +/** Checks if the device is compatible with any of the entries in + * a NULL terminated array of strings. Returns the best match + * score or 0. + */ +int of_device_compatible_match(struct device_node *device, + const char *const *compat) +{ + unsigned int tmp, score = 0; + + if (!compat) + return 0; + + while (*compat) { + tmp = of_device_is_compatible(device, *compat); + if (tmp > score) + score = tmp; + compat++; + } + + return score; +} + /** * of_machine_is_compatible - Test root of device tree for a given compatible value * @compat: compatible string to look for in root node's compatible property. @@ -1815,6 +1846,12 @@ int __of_remove_property(struct device_node *np, struct property *prop) return 0; } +void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) +{ + sysfs_remove_bin_file(&np->kobj, &prop->attr); + kfree(prop->attr.attr.name); +} + void __of_remove_property_sysfs(struct device_node *np, struct property *prop) { if (!IS_ENABLED(CONFIG_SYSFS)) @@ -1822,7 +1859,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop) /* at early boot, bail here and defer setup to of_init() */ if (of_kset && of_node_is_attached(np)) - sysfs_remove_bin_file(&np->kobj, &prop->attr); + __of_sysfs_remove_bin_file(np, prop); } /** @@ -1895,7 +1932,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop return; if (oldprop) - sysfs_remove_bin_file(&np->kobj, &oldprop->attr); + __of_sysfs_remove_bin_file(np, oldprop); __of_add_property_sysfs(np, newprop); } @@ -2257,8 +2294,8 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, of_node_put(node); if (!port) { - pr_err("%s(): no port node found in %s\n", - __func__, parent->full_name); + pr_err("graph: no port node found in %s\n", + parent->full_name); return NULL; } } else { diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 3033fa3250dc..888fdbc09992 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -6,6 +6,8 @@ * device tree nodes. */ +#define pr_fmt(fmt) "OF: " fmt + #include <linux/of.h> #include <linux/spinlock.h> #include <linux/slab.h> @@ -55,7 +57,7 @@ void __of_detach_node_sysfs(struct device_node *np) /* only remove properties if on sysfs */ if (of_node_is_attached(np)) { for_each_property_of_node(np, pp) - sysfs_remove_bin_file(&np->kobj, &pp->attr); + __of_sysfs_remove_bin_file(np, pp); kobject_del(&np->kobj); } @@ -96,13 +98,13 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) switch (action) { case OF_RECONFIG_ATTACH_NODE: case OF_RECONFIG_DETACH_NODE: - pr_debug("of/notify %-15s %s\n", action_names[action], + pr_debug("notify %-15s %s\n", action_names[action], pr->dn->full_name); break; case OF_RECONFIG_ADD_PROPERTY: case OF_RECONFIG_REMOVE_PROPERTY: case OF_RECONFIG_UPDATE_PROPERTY: - pr_debug("of/notify %-15s %s:%s\n", action_names[action], + pr_debug("notify %-15s %s:%s\n", action_names[action], pr->dn->full_name, pr->prop->name); break; @@ -460,12 +462,12 @@ static void __of_changeset_entry_dump(struct of_changeset_entry *ce) case OF_RECONFIG_ADD_PROPERTY: case OF_RECONFIG_REMOVE_PROPERTY: case OF_RECONFIG_UPDATE_PROPERTY: - pr_debug("of/cset<%p> %-15s %s/%s\n", ce, action_names[ce->action], + pr_debug("cset<%p> %-15s %s/%s\n", ce, action_names[ce->action], ce->np->full_name, ce->prop->name); break; case OF_RECONFIG_ATTACH_NODE: case OF_RECONFIG_DETACH_NODE: - pr_debug("of/cset<%p> %-15s %s\n", ce, action_names[ce->action], + pr_debug("cset<%p> %-15s %s\n", ce, action_names[ce->action], ce->np->full_name); break; } @@ -531,13 +533,13 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop); break; default: - pr_err("%s: invalid devicetree changeset action: %i\n", __func__, + pr_err("invalid devicetree changeset action: %i\n", (int)ce->action); return; } if (ret) - pr_err("%s: notifier error @%s\n", __func__, ce->np->full_name); + pr_err("changeset notifier error @%s\n", ce->np->full_name); } static int __of_changeset_entry_apply(struct of_changeset_entry *ce) @@ -568,8 +570,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) ret = __of_add_property(ce->np, ce->prop); if (ret) { - pr_err("%s: add_property failed @%s/%s\n", - __func__, ce->np->full_name, + pr_err("changeset: add_property failed @%s/%s\n", + ce->np->full_name, ce->prop->name); break; } @@ -577,8 +579,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); if (ret) { - pr_err("%s: remove_property failed @%s/%s\n", - __func__, ce->np->full_name, + pr_err("changeset: remove_property failed @%s/%s\n", + ce->np->full_name, ce->prop->name); break; } @@ -596,8 +598,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) ret = __of_update_property(ce->np, ce->prop, &old_prop); if (ret) { - pr_err("%s: update_property failed @%s/%s\n", - __func__, ce->np->full_name, + pr_err("changeset: update_property failed @%s/%s\n", + ce->np->full_name, ce->prop->name); break; } @@ -677,24 +679,24 @@ int __of_changeset_apply(struct of_changeset *ocs) int ret; /* perform the rest of the work */ - pr_debug("of_changeset: applying...\n"); + pr_debug("changeset: applying...\n"); list_for_each_entry(ce, &ocs->entries, node) { ret = __of_changeset_entry_apply(ce); if (ret) { - pr_err("%s: Error applying changeset (%d)\n", __func__, ret); + pr_err("Error applying changeset (%d)\n", ret); list_for_each_entry_continue_reverse(ce, &ocs->entries, node) __of_changeset_entry_revert(ce); return ret; } } - pr_debug("of_changeset: applied, emitting notifiers.\n"); + pr_debug("changeset: applied, emitting notifiers.\n"); /* drop the global lock while emitting notifiers */ mutex_unlock(&of_mutex); list_for_each_entry(ce, &ocs->entries, node) __of_changeset_entry_notify(ce, 0); mutex_lock(&of_mutex); - pr_debug("of_changeset: notifiers sent.\n"); + pr_debug("changeset: notifiers sent.\n"); return 0; } @@ -728,24 +730,24 @@ int __of_changeset_revert(struct of_changeset *ocs) struct of_changeset_entry *ce; int ret; - pr_debug("of_changeset: reverting...\n"); + pr_debug("changeset: reverting...\n"); list_for_each_entry_reverse(ce, &ocs->entries, node) { ret = __of_changeset_entry_revert(ce); if (ret) { - pr_err("%s: Error reverting changeset (%d)\n", __func__, ret); + pr_err("Error reverting changeset (%d)\n", ret); list_for_each_entry_continue(ce, &ocs->entries, node) __of_changeset_entry_apply(ce); return ret; } } - pr_debug("of_changeset: reverted, emitting notifiers.\n"); + pr_debug("changeset: reverted, emitting notifiers.\n"); /* drop the global lock while emitting notifiers */ mutex_unlock(&of_mutex); list_for_each_entry_reverse(ce, &ocs->entries, node) __of_changeset_entry_notify(ce, 1); mutex_lock(&of_mutex); - pr_debug("of_changeset: notifiers sent.\n"); + pr_debug("changeset: notifiers sent.\n"); return 0; } @@ -795,10 +797,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, struct of_changeset_entry *ce; ce = kzalloc(sizeof(*ce), GFP_KERNEL); - if (!ce) { - pr_err("%s: Failed to allocate\n", __func__); + if (!ce) return -ENOMEM; - } + /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 0e02947a8a7a..55f1b8391149 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -9,6 +9,8 @@ * version 2 as published by the Free Software Foundation. */ +#define pr_fmt(fmt) "OF: fdt:" fmt + #include <linux/crc32.h> #include <linux/kernel.h> #include <linux/initrd.h> @@ -182,14 +184,12 @@ static void populate_properties(const void *blob, val = fdt_getprop_by_offset(blob, cur, &pname, &sz); if (!val) { - pr_warn("%s: Cannot locate property at 0x%x\n", - __func__, cur); + pr_warn("Cannot locate property at 0x%x\n", cur); continue; } if (!pname) { - pr_warn("%s: Cannot find property name at 0x%x\n", - __func__, cur); + pr_warn("Cannot find property name at 0x%x\n", cur); continue; } @@ -439,7 +439,7 @@ static int unflatten_dt_nodes(const void *blob, } if (offset < 0 && offset != -FDT_ERR_NOTFOUND) { - pr_err("%s: Error %d processing FDT\n", __func__, offset); + pr_err("Error %d processing FDT\n", offset); return -EINVAL; } @@ -472,7 +472,8 @@ static int unflatten_dt_nodes(const void *blob, static void *__unflatten_device_tree(const void *blob, struct device_node *dad, struct device_node **mynodes, - void *(*dt_alloc)(u64 size, u64 align)) + void *(*dt_alloc)(u64 size, u64 align), + bool detached) { int size; void *mem; @@ -516,6 +517,11 @@ static void *__unflatten_device_tree(const void *blob, pr_warning("End of tree marker overwritten: %08x\n", be32_to_cpup(mem + size)); + if (detached) { + of_node_set_flag(*mynodes, OF_DETACHED); + pr_debug("unflattened tree is detached\n"); + } + pr_debug(" <- unflatten_device_tree()\n"); return mem; } @@ -548,7 +554,8 @@ void *of_fdt_unflatten_tree(const unsigned long *blob, void *mem; mutex_lock(&of_fdt_unflatten_mutex); - mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc); + mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc, + true); mutex_unlock(&of_fdt_unflatten_mutex); return mem; @@ -1237,7 +1244,7 @@ bool __init early_init_dt_scan(void *params) void __init unflatten_device_tree(void) { __unflatten_device_tree(initial_boot_params, NULL, &of_root, - early_init_dt_alloc_memory_arch); + early_init_dt_alloc_memory_arch, false); /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ of_alias_scan(early_init_dt_alloc_memory_arch); @@ -1294,7 +1301,7 @@ static int __init of_fdt_raw_init(void) if (of_fdt_crc32 != crc32_be(~0, initial_boot_params, fdt_totalsize(initial_boot_params))) { - pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n"); + pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n"); return 0; } of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params); diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c index dca8f9b93745..843a542dac7d 100644 --- a/drivers/of/fdt_address.c +++ b/drivers/of/fdt_address.c @@ -12,6 +12,9 @@ * the Free Software Foundation; either version 2, or (at your option) * any later version. */ + +#define pr_fmt(fmt) "OF: fdt: " fmt + #include <linux/kernel.h> #include <linux/libfdt.h> #include <linux/of.h> @@ -30,7 +33,7 @@ static void __init of_dump_addr(const char *s, const __be32 *addr, int na) pr_debug("%s", s); while(na--) pr_cont(" %08x", *(addr++)); - pr_debug("\n"); + pr_cont("\n"); } #else static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { } @@ -77,7 +80,7 @@ static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range, s = of_read_number(range + na + pna, ns); da = of_read_number(addr, na); - pr_debug("FDT: default map, cp=%llx, s=%llx, da=%llx\n", + pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", cp, s, da); if (da < cp || da >= (cp + s)) @@ -123,11 +126,11 @@ static int __init fdt_translate_one(const void *blob, int parent, if (rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); - pr_debug("FDT: empty ranges, 1:1 translation\n"); + pr_debug("empty ranges, 1:1 translation\n"); goto finish; } - pr_debug("FDT: walking ranges...\n"); + pr_debug("walking ranges...\n"); /* Now walk through the ranges */ rlen /= 4; @@ -138,14 +141,14 @@ static int __init fdt_translate_one(const void *blob, int parent, break; } if (offset == OF_BAD_ADDR) { - pr_debug("FDT: not found !\n"); + pr_debug("not found !\n"); return 1; } memcpy(addr, ranges + na, 4 * pna); finish: - of_dump_addr("FDT: parent translation for:", addr, pna); - pr_debug("FDT: with offset: %llx\n", offset); + of_dump_addr("parent translation for:", addr, pna); + pr_debug("with offset: %llx\n", offset); /* Translate it into parent bus space */ return pbus->translate(addr, offset, pna); @@ -170,12 +173,12 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset) int na, ns, pna, pns; u64 result = OF_BAD_ADDR; - pr_debug("FDT: ** translation for device %s **\n", + pr_debug("** translation for device %s **\n", fdt_get_name(blob, node_offset, NULL)); reg = fdt_getprop(blob, node_offset, "reg", &len); if (!reg) { - pr_err("FDT: warning: device tree node '%s' has no address.\n", + pr_err("warning: device tree node '%s' has no address.\n", fdt_get_name(blob, node_offset, NULL)); goto bail; } @@ -189,15 +192,15 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset) /* Cound address cells & copy address locally */ bus->count_cells(blob, parent, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { - pr_err("FDT: Bad cell count for %s\n", + pr_err("Bad cell count for %s\n", fdt_get_name(blob, node_offset, NULL)); goto bail; } memcpy(addr, reg, na * 4); - pr_debug("FDT: bus (na=%d, ns=%d) on %s\n", + pr_debug("bus (na=%d, ns=%d) on %s\n", na, ns, fdt_get_name(blob, parent, NULL)); - of_dump_addr("OF: translating address:", addr, na); + of_dump_addr("translating address:", addr, na); /* Translate */ for (;;) { @@ -207,7 +210,7 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset) /* If root, we have finished */ if (parent < 0) { - pr_debug("FDT: reached root node\n"); + pr_debug("reached root node\n"); result = of_read_number(addr, na); break; } @@ -216,12 +219,12 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset) pbus = &of_busses[0]; pbus->count_cells(blob, parent, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { - pr_err("FDT: Bad cell count for %s\n", + pr_err("Bad cell count for %s\n", fdt_get_name(blob, node_offset, NULL)); break; } - pr_debug("FDT: parent bus (na=%d, ns=%d) on %s\n", + pr_debug("parent bus (na=%d, ns=%d) on %s\n", pna, pns, fdt_get_name(blob, parent, NULL)); /* Apply bus translation */ @@ -234,7 +237,7 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset) ns = pns; bus = pbus; - of_dump_addr("FDT: one level translation:", addr, na); + of_dump_addr("one level translation:", addr, na); } bail: return result; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 6ec743faabe8..89a71c6074fc 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -18,6 +18,8 @@ * driver. */ +#define pr_fmt(fmt) "OF: " fmt + #include <linux/device.h> #include <linux/errno.h> #include <linux/list.h> @@ -557,6 +559,8 @@ void __init of_irq_init(const struct of_device_id *matches) * its children can get processed in a subsequent pass. */ list_add_tail(&desc->list, &intc_parent_list); + + of_node_set_flag(desc->dev, OF_POPULATED); } /* Get the next pending parent that might have children */ diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index 13f4fed38048..589b30c68e14 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) "OF: PCI: " fmt + #include <linux/kernel.h> #include <linux/export.h> #include <linux/of.h> @@ -138,7 +140,7 @@ void of_pci_check_probe_only(void) else pci_clear_flags(PCI_PROBE_ONLY); - pr_info("PCI: PROBE_ONLY %sabled\n", val ? "en" : "dis"); + pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis"); } EXPORT_SYMBOL_GPL(of_pci_check_probe_only); @@ -181,7 +183,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, if (!bus_range) return -ENOMEM; - pr_info("PCI host bridge %s ranges:\n", dev->full_name); + pr_info("host bridge %s ranges:\n", dev->full_name); err = of_pci_parse_bus_range(dev, bus_range); if (err) { diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 829469faeb23..18bbb4517e25 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -83,6 +83,9 @@ extern int __of_attach_node_sysfs(struct device_node *np); extern void __of_detach_node(struct device_node *np); extern void __of_detach_node_sysfs(struct device_node *np); +extern void __of_sysfs_remove_bin_file(struct device_node *np, + struct property *prop); + /* iterators for transactions, used for overlays */ /* forward iterator */ #define for_each_transaction_entry(_oft, _te) \ diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 06af99f64ad8..366d8c3c7989 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -13,6 +13,8 @@ * License or (at your optional) any later version of the license. */ +#define pr_fmt(fmt) "OF: reserved mem: " fmt + #include <linux/err.h> #include <linux/of.h> #include <linux/of_fdt.h> @@ -76,7 +78,7 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { - pr_err("Reserved memory: not enough space all defined regions.\n"); + pr_err("not enough space all defined regions.\n"); return; } @@ -109,8 +111,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, return -EINVAL; if (len != dt_root_size_cells * sizeof(__be32)) { - pr_err("Reserved memory: invalid size property in '%s' node.\n", - uname); + pr_err("invalid size property in '%s' node.\n", uname); return -EINVAL; } size = dt_mem_next_cell(dt_root_size_cells, &prop); @@ -120,7 +121,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, prop = of_get_flat_dt_prop(node, "alignment", &len); if (prop) { if (len != dt_root_addr_cells * sizeof(__be32)) { - pr_err("Reserved memory: invalid alignment property in '%s' node.\n", + pr_err("invalid alignment property in '%s' node.\n", uname); return -EINVAL; } @@ -142,7 +143,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, if (prop) { if (len % t_len != 0) { - pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n", + pr_err("invalid alloc-ranges property in '%s', skipping node.\n", uname); return -EINVAL; } @@ -157,7 +158,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, ret = early_init_dt_alloc_reserved_memory_arch(size, align, start, end, nomap, &base); if (ret == 0) { - pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", + pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n", uname, &base, (unsigned long)size / SZ_1M); break; @@ -169,13 +170,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, ret = early_init_dt_alloc_reserved_memory_arch(size, align, 0, 0, nomap, &base); if (ret == 0) - pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", + pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n", uname, &base, (unsigned long)size / SZ_1M); } if (base == 0) { - pr_info("Reserved memory: failed to allocate memory for node '%s'\n", - uname); + pr_info("failed to allocate memory for node '%s'\n", uname); return -ENOMEM; } @@ -204,7 +204,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) continue; if (initfn(rmem) == 0) { - pr_info("Reserved memory: initialized node %s, compatible id %s\n", + pr_info("initialized node %s, compatible id %s\n", rmem->name, compat); return 0; } @@ -246,7 +246,7 @@ static void __init __rmem_check_for_overlap(void) this_end = this->base + this->size; next_end = next->base + next->size; - pr_err("Reserved memory: OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", + pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", this->name, &this->base, &this_end, next->name, &next->base, &next_end); } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 82250815e9a5..318dbb51e7a2 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -8,7 +8,9 @@ * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ -#undef DEBUG + +#define pr_fmt(fmt) "OF: overlay: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -137,8 +139,8 @@ static int of_overlay_apply_one(struct of_overlay *ov, for_each_property_of_node(overlay, prop) { ret = of_overlay_apply_single_property(ov, target, prop); if (ret) { - pr_err("%s: Failed to apply prop @%s/%s\n", - __func__, target->full_name, prop->name); + pr_err("Failed to apply prop @%s/%s\n", + target->full_name, prop->name); return ret; } } @@ -146,9 +148,8 @@ static int of_overlay_apply_one(struct of_overlay *ov, for_each_child_of_node(overlay, child) { ret = of_overlay_apply_single_device_node(ov, target, child); if (ret != 0) { - pr_err("%s: Failed to apply single node @%s/%s\n", - __func__, target->full_name, - child->name); + pr_err("Failed to apply single node @%s/%s\n", + target->full_name, child->name); of_node_put(child); return ret; } @@ -176,8 +177,7 @@ static int of_overlay_apply(struct of_overlay *ov) err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay); if (err != 0) { - pr_err("%s: overlay failed '%s'\n", - __func__, ovinfo->target->full_name); + pr_err("apply failed '%s'\n", ovinfo->target->full_name); return err; } } @@ -208,7 +208,7 @@ static struct device_node *find_target_node(struct device_node *info_node) if (ret == 0) return of_find_node_by_path(path); - pr_err("%s: Failed to find target for node %p (%s)\n", __func__, + pr_err("Failed to find target for node %p (%s)\n", info_node, info_node->name); return NULL; @@ -355,8 +355,6 @@ int of_overlay_create(struct device_node *tree) id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL); if (id < 0) { - pr_err("%s: idr_alloc() failed for tree@%s\n", - __func__, tree->full_name); err = id; goto err_destroy_trans; } @@ -365,26 +363,21 @@ int of_overlay_create(struct device_node *tree) /* build the overlay info structures */ err = of_build_overlay_info(ov, tree); if (err) { - pr_err("%s: of_build_overlay_info() failed for tree@%s\n", - __func__, tree->full_name); + pr_err("of_build_overlay_info() failed for tree@%s\n", + tree->full_name); goto err_free_idr; } /* apply the overlay */ err = of_overlay_apply(ov); - if (err) { - pr_err("%s: of_overlay_apply() failed for tree@%s\n", - __func__, tree->full_name); + if (err) goto err_abort_trans; - } /* apply the changeset */ err = __of_changeset_apply(&ov->cset); - if (err) { - pr_err("%s: __of_changeset_apply() failed for tree@%s\n", - __func__, tree->full_name); + if (err) goto err_revert_overlay; - } + /* add to the tail of the overlay list */ list_add_tail(&ov->node, &ov_list); @@ -469,8 +462,7 @@ static int overlay_removal_is_ok(struct of_overlay *ov) list_for_each_entry(ce, &ov->cset.entries, node) { if (!overlay_is_topmost(ov, ce->np)) { - pr_err("%s: overlay #%d is not topmost\n", - __func__, ov->id); + pr_err("overlay #%d is not topmost\n", ov->id); return 0; } } @@ -496,16 +488,13 @@ int of_overlay_destroy(int id) ov = idr_find(&ov_idr, id); if (ov == NULL) { err = -ENODEV; - pr_err("%s: Could not find overlay #%d\n", - __func__, id); + pr_err("destroy: Could not find overlay #%d\n", id); goto out; } /* check whether the overlay is safe to remove */ if (!overlay_removal_is_ok(ov)) { err = -EBUSY; - pr_err("%s: removal check failed for overlay #%d\n", - __func__, id); goto out; } diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 16e8daffac06..765390e3ed8d 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -11,6 +11,9 @@ * 2 of the License, or (at your option) any later version. * */ + +#define pr_fmt(fmt) "OF: " fmt + #include <linux/errno.h> #include <linux/module.h> #include <linux/amba/bus.h> @@ -31,7 +34,6 @@ const struct of_device_id of_default_bus_match_table[] = { #endif /* CONFIG_ARM_AMBA */ {} /* Empty terminated list */ }; -EXPORT_SYMBOL(of_default_bus_match_table); static int of_dev_node_match(struct device *dev, void *data) { @@ -234,11 +236,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node, return NULL; dev = amba_device_alloc(NULL, 0, 0); - if (!dev) { - pr_err("%s(): amba_device_alloc() failed for %s\n", - __func__, node->full_name); + if (!dev) goto err_clear_flag; - } /* setup generic device info */ dev->dev.of_node = of_node_get(node); @@ -261,15 +260,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node, ret = of_address_to_resource(node, 0, &dev->res); if (ret) { - pr_err("%s(): of_address_to_resource() failed (%d) for %s\n", - __func__, ret, node->full_name); + pr_err("amba: of_address_to_resource() failed (%d) for %s\n", + ret, node->full_name); goto err_free; } ret = amba_device_add(dev, &iomem_resource); if (ret) { - pr_err("%s(): amba_device_add() failed (%d) for %s\n", - __func__, ret, node->full_name); + pr_err("amba_device_add() failed (%d) for %s\n", + ret, node->full_name); goto err_free; } @@ -363,6 +362,12 @@ static int of_platform_bus_create(struct device_node *bus, return 0; } + if (of_node_check_flag(bus, OF_POPULATED_BUS)) { + pr_debug("%s() - skipping %s, already populated\n", + __func__, bus->full_name); + return 0; + } + auxdata = of_dev_lookup(lookup, bus); if (auxdata) { bus_id = auxdata->name; @@ -414,7 +419,7 @@ int of_platform_bus_probe(struct device_node *root, if (!root) return -EINVAL; - pr_debug("of_platform_bus_probe()\n"); + pr_debug("%s()\n", __func__); pr_debug(" starting at: %s\n", root->full_name); /* Do a self check of bus type, if there's a match, create children */ @@ -466,6 +471,9 @@ int of_platform_populate(struct device_node *root, if (!root) return -EINVAL; + pr_debug("%s()\n", __func__); + pr_debug(" starting at: %s\n", root->full_name); + for_each_child_of_node(root, child) { rc = of_platform_bus_create(child, matches, lookup, parent, true); if (rc) { @@ -489,6 +497,15 @@ int of_platform_default_populate(struct device_node *root, } EXPORT_SYMBOL_GPL(of_platform_default_populate); +static int __init of_platform_default_populate_init(void) +{ + if (of_have_populated_dt()) + of_platform_default_populate(NULL, NULL, NULL); + + return 0; +} +arch_initcall_sync(of_platform_default_populate_init); + static int of_platform_device_destroy(struct device *dev, void *data) { /* Do not touch devices not populated from the device tree */ diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index d313d492f278..46325d6394cf 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -9,6 +9,8 @@ * version 2 as published by the Free Software Foundation. */ +#define pr_fmt(fmt) "OF: resolver: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -313,6 +315,11 @@ int of_resolve_phandles(struct device_node *resolve) phandle phandle, phandle_delta; int err; + if (!resolve) + pr_err("%s: null node\n", __func__); + if (resolve && !of_node_check_flag(resolve, OF_DETACHED)) + pr_err("%s: node %s not detached\n", __func__, + resolve->full_name); /* the resolve node must exist, and be detached */ if (!resolve || !of_node_check_flag(resolve, OF_DETACHED)) return -EINVAL; @@ -369,6 +376,7 @@ int of_resolve_phandles(struct device_node *resolve) /* we need to fixup, but no root symbols... */ if (!root_sym) { + pr_err("%s: no symbols in root of device tree.\n", __func__); err = -EINVAL; goto out; } diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index f34ed9310323..53c83d66eb7e 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -771,7 +771,7 @@ static void __init of_unittest_platform_populate(void) }; np = of_find_node_by_path("/testcase-data"); - of_platform_populate(np, of_default_bus_match_table, NULL, NULL); + of_platform_default_populate(np, NULL, NULL); /* Test that a missing irq domain returns -EPROBE_DEFER */ np = of_find_node_by_path("/testcase-data/testcase-device1"); @@ -1871,8 +1871,7 @@ static void __init of_unittest_overlay(void) goto out; } - ret = of_platform_populate(bus_np, of_default_bus_match_table, - NULL, NULL); + ret = of_platform_default_populate(bus_np, NULL, NULL); if (ret != 0) { unittest(0, "could not populate bus @ \"%s\"\n", bus_path); goto out; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 56389be5d08b..67f9916ff14d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -25,7 +25,7 @@ config PCI_MSI If you don't know what to do here, say Y. config PCI_MSI_IRQ_DOMAIN - bool + def_bool ARM || ARM64 || X86 depends on PCI_MSI select GENERIC_MSI_IRQ_DOMAIN diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index dd7cdbee8029..c288e5a52575 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -91,6 +91,35 @@ void pci_bus_remove_resources(struct pci_bus *bus) } } +int devm_request_pci_bus_resources(struct device *dev, + struct list_head *resources) +{ + struct resource_entry *win; + struct resource *parent, *res; + int err; + + resource_list_for_each_entry(win, resources) { + res = win->res; + switch (resource_type(res)) { + case IORESOURCE_IO: + parent = &ioport_resource; + break; + case IORESOURCE_MEM: + parent = &iomem_resource; + break; + default: + continue; + } + + err = devm_request_resource(dev, parent, res); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources); + static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT static struct pci_bus_region pci_64_bit = {0, @@ -291,6 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev) pci_fixup_device(pci_fixup_final, dev); pci_create_sysfs_dev_files(dev); pci_proc_attach_device(dev); + pci_bridge_d3_device_changed(dev); dev->match_driver = true; retval = device_attach(&dev->dev); @@ -397,4 +427,3 @@ void pci_bus_put(struct pci_bus *bus) put_device(&bus->dev); } EXPORT_SYMBOL(pci_bus_put); - diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index f9832ad8efe2..66e0d718472f 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -19,10 +19,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/pci-ecam.h> #include <linux/slab.h> -#include "ecam.h" - /* * On 64-bit systems, we do a single ioremap for the whole config space * since we have enough virtual address range available. On 32-bit, we @@ -52,6 +51,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev, if (!cfg) return ERR_PTR(-ENOMEM); + cfg->parent = dev; cfg->ops = ops; cfg->busr.start = busr->start; cfg->busr.end = busr->end; @@ -95,7 +95,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev, } if (ops->init) { - err = ops->init(dev, cfg); + err = ops->init(cfg); if (err) goto err_exit; } diff --git a/drivers/pci/ecam.h b/drivers/pci/ecam.h deleted file mode 100644 index 9878bebd45bb..000000000000 --- a/drivers/pci/ecam.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2016 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation (the "GPL"). - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 (GPLv2) for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 (GPLv2) along with this source code. - */ -#ifndef DRIVERS_PCI_ECAM_H -#define DRIVERS_PCI_ECAM_H - -#include <linux/kernel.h> -#include <linux/platform_device.h> - -/* - * struct to hold pci ops and bus shift of the config window - * for a PCI controller. - */ -struct pci_config_window; -struct pci_ecam_ops { - unsigned int bus_shift; - struct pci_ops pci_ops; - int (*init)(struct device *, - struct pci_config_window *); -}; - -/* - * struct to hold the mappings of a config space window. This - * is expected to be used as sysdata for PCI controllers that - * use ECAM. - */ -struct pci_config_window { - struct resource res; - struct resource busr; - void *priv; - struct pci_ecam_ops *ops; - union { - void __iomem *win; /* 64-bit single mapping */ - void __iomem **winp; /* 32-bit per-bus mapping */ - }; -}; - -/* create and free pci_config_window */ -struct pci_config_window *pci_ecam_create(struct device *dev, - struct resource *cfgres, struct resource *busr, - struct pci_ecam_ops *ops); -void pci_ecam_free(struct pci_config_window *cfg); - -/* map_bus when ->sysdata is an instance of pci_config_window */ -void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, - int where); -/* default ECAM ops */ -extern struct pci_ecam_ops pci_generic_ecam_ops; - -#ifdef CONFIG_PCI_HOST_GENERIC -/* for DT-based PCI controllers that support ECAM */ -int pci_host_common_probe(struct platform_device *pdev, - struct pci_ecam_ops *ops); -#endif -#endif diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 5d2374e4ee7f..9b485d873b0d 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -3,8 +3,9 @@ menu "PCI host controller drivers" config PCI_DRA7XX bool "TI DRA7xx PCIe controller" - select PCIE_DW depends on OF && HAS_IOMEM && TI_PIPE3 + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW help Enables support for the PCIe controller in the DRA7xx SoC. There are two instances of PCIe controller in DRA7xx. This controller can @@ -16,11 +17,20 @@ config PCI_MVEBU depends on ARM depends on OF +config PCI_AARDVARK + bool "Aardvark PCIe controller" + depends on ARCH_MVEBU && ARM64 + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + help + Add support for Aardvark 64bit PCIe Host Controller. This + controller is part of the South Bridge of the Marvel Armada + 3700 SoC. config PCIE_XILINX_NWL bool "NWL PCIe Core" depends on ARCH_ZYNQMP - select PCI_MSI_IRQ_DOMAIN if PCI_MSI + depends on PCI_MSI_IRQ_DOMAIN help Say 'Y' here if you want kernel support for Xilinx NWL PCIe controller. The controller can act as Root Port @@ -29,6 +39,7 @@ config PCIE_XILINX_NWL config PCIE_DW_PLAT bool "Platform bus based DesignWare PCIe Controller" + depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW ---help--- This selects the DesignWare PCIe controller support. Select this if @@ -40,16 +51,19 @@ config PCIE_DW_PLAT config PCIE_DW bool + depends on PCI_MSI_IRQ_DOMAIN config PCI_EXYNOS bool "Samsung Exynos PCIe controller" depends on SOC_EXYNOS5440 + depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW config PCI_IMX6 bool "Freescale i.MX6 PCIe controller" depends on SOC_IMX6Q + depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW @@ -72,8 +86,7 @@ config PCI_RCAR_GEN2 config PCIE_RCAR bool "Renesas R-Car PCIe controller" depends on ARCH_RENESAS || (ARM && COMPILE_TEST) - select PCI_MSI - select PCI_MSI_IRQ_DOMAIN + depends on PCI_MSI_IRQ_DOMAIN help Say Y here if you want PCIe controller support on R-Car SoCs. @@ -85,6 +98,7 @@ config PCI_HOST_GENERIC bool "Generic PCI host controller" depends on (ARM || ARM64) && OF select PCI_HOST_COMMON + select IRQ_DOMAIN help Say Y here if you want to support a simple generic PCI host controller, such as the one emulated by kvmtool. @@ -92,6 +106,7 @@ config PCI_HOST_GENERIC config PCIE_SPEAR13XX bool "STMicroelectronics SPEAr PCIe controller" depends on ARCH_SPEAR13XX + depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW help @@ -100,6 +115,7 @@ config PCIE_SPEAR13XX config PCI_KEYSTONE bool "TI Keystone PCIe controller" depends on ARCH_KEYSTONE + depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW select PCIEPORTBUS help @@ -120,7 +136,6 @@ config PCI_XGENE depends on ARCH_XGENE depends on OF select PCIEPORTBUS - select PCI_MSI_IRQ_DOMAIN if PCI_MSI help Say Y here if you want internal PCI support on APM X-Gene SoC. There are 5 internal PCIe ports available. Each port is GEN3 capable @@ -128,7 +143,8 @@ config PCI_XGENE config PCI_XGENE_MSI bool "X-Gene v1 PCIe MSI feature" - depends on PCI_XGENE && PCI_MSI + depends on PCI_XGENE + depends on PCI_MSI_IRQ_DOMAIN default y help Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. @@ -137,6 +153,7 @@ config PCI_XGENE_MSI config PCI_LAYERSCAPE bool "Freescale Layerscape PCIe controller" depends on OF && (ARM || ARCH_LAYERSCAPE) + depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW select MFD_SYSCON help @@ -177,8 +194,7 @@ config PCIE_IPROC_BCMA config PCIE_IPROC_MSI bool "Broadcom iProc PCIe MSI support" depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA - depends on PCI_MSI - select PCI_MSI_IRQ_DOMAIN + depends on PCI_MSI_IRQ_DOMAIN default ARCH_BCM_IPROC help Say Y here if you want to enable MSI support for Broadcom's iProc @@ -195,8 +211,8 @@ config PCIE_ALTERA config PCIE_ALTERA_MSI bool "Altera PCIe MSI feature" - depends on PCIE_ALTERA && PCI_MSI - select PCI_MSI_IRQ_DOMAIN + depends on PCIE_ALTERA + depends on PCI_MSI_IRQ_DOMAIN help Say Y here if you want PCIe MSI support for the Altera FPGA. This MSI driver supports Altera MSI to GIC controller IP. @@ -204,6 +220,7 @@ config PCIE_ALTERA_MSI config PCI_HISI depends on OF && ARM64 bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" + depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW help @@ -213,6 +230,7 @@ config PCI_HISI config PCIE_QCOM bool "Qualcomm PCIe controller" depends on ARCH_QCOM && OF + depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW select PCIEPORTBUS help @@ -237,6 +255,7 @@ config PCI_HOST_THUNDER_ECAM config PCIE_ARMADA_8K bool "Marvell Armada-8K PCIe controller" depends on ARCH_MVEBU + depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW select PCIEPORTBUS help @@ -245,4 +264,14 @@ config PCIE_ARMADA_8K Designware hardware and therefore the driver re-uses the Designware core functions to implement the driver. +config PCIE_ARTPEC6 + bool "Axis ARTPEC-6 PCIe controller" + depends on MACH_ARTPEC6 + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW + select PCIEPORTBUS + help + Say Y here to enable PCIe controller support on Axis ARTPEC-6 + SoCs. This PCIe controller uses the DesignWare core. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 9c8698e89e96..88434101e4c4 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o +obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o @@ -29,3 +30,4 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o +obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c new file mode 100644 index 000000000000..ef9893fa3176 --- /dev/null +++ b/drivers/pci/host/pci-aardvark.c @@ -0,0 +1,1001 @@ +/* + * Driver for the Aardvark PCIe controller, used on Marvell Armada + * 3700. + * + * Copyright (C) 2016 Marvell + * + * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> + +/* PCIe core registers */ +#define PCIE_CORE_CMD_STATUS_REG 0x4 +#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) +#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) +#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) +#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 +#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 +#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 +#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) +#define PCIE_CORE_LINK_TRAINING BIT(5) +#define PCIE_CORE_LINK_WIDTH_SHIFT 20 +#define PCIE_CORE_ERR_CAPCTL_REG 0x118 +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) + +/* PIO registers base address and register offsets */ +#define PIO_BASE_ADDR 0x4000 +#define PIO_CTRL (PIO_BASE_ADDR + 0x0) +#define PIO_CTRL_TYPE_MASK GENMASK(3, 0) +#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) +#define PIO_STAT (PIO_BASE_ADDR + 0x4) +#define PIO_COMPLETION_STATUS_SHIFT 7 +#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) +#define PIO_COMPLETION_STATUS_OK 0 +#define PIO_COMPLETION_STATUS_UR 1 +#define PIO_COMPLETION_STATUS_CRS 2 +#define PIO_COMPLETION_STATUS_CA 4 +#define PIO_NON_POSTED_REQ BIT(0) +#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) +#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) +#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) +#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) +#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) +#define PIO_START (PIO_BASE_ADDR + 0x1c) +#define PIO_ISR (PIO_BASE_ADDR + 0x20) +#define PIO_ISRM (PIO_BASE_ADDR + 0x24) + +/* Aardvark Control registers */ +#define CONTROL_BASE_ADDR 0x4800 +#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) +#define PCIE_GEN_SEL_MSK 0x3 +#define PCIE_GEN_SEL_SHIFT 0x0 +#define SPEED_GEN_1 0 +#define SPEED_GEN_2 1 +#define SPEED_GEN_3 2 +#define IS_RC_MSK 1 +#define IS_RC_SHIFT 2 +#define LANE_CNT_MSK 0x18 +#define LANE_CNT_SHIFT 0x3 +#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) +#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) +#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) +#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) +#define LINK_TRAINING_EN BIT(6) +#define LEGACY_INTA BIT(28) +#define LEGACY_INTB BIT(29) +#define LEGACY_INTC BIT(30) +#define LEGACY_INTD BIT(31) +#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) +#define HOT_RESET_GEN BIT(0) +#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) +#define PCIE_CORE_CTRL2_RESERVED 0x7 +#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) +#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) +#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) +#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) +#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) +#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) +#define PCIE_ISR0_MSI_INT_PENDING BIT(24) +#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) +#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) +#define PCIE_ISR0_ALL_MASK GENMASK(26, 0) +#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) +#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) +#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) +#define PCIE_ISR1_FLUSH BIT(5) +#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) +#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) +#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) +#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) +#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) +#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) + +/* PCIe window configuration */ +#define OB_WIN_BASE_ADDR 0x4c00 +#define OB_WIN_BLOCK_SIZE 0x20 +#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ + OB_WIN_BLOCK_SIZE * (win) + \ + (offset)) +#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) +#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) +#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) +#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) +#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) +#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) +#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) + +/* PCIe window types */ +#define OB_PCIE_MEM 0x0 +#define OB_PCIE_IO 0x4 + +/* LMI registers base address and register offsets */ +#define LMI_BASE_ADDR 0x6000 +#define CFG_REG (LMI_BASE_ADDR + 0x0) +#define LTSSM_SHIFT 24 +#define LTSSM_MASK 0x3f +#define LTSSM_L0 0x10 +#define RC_BAR_CONFIG 0x300 + +/* PCIe core controller registers */ +#define CTRL_CORE_BASE_ADDR 0x18000 +#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) +#define CTRL_MODE_SHIFT 0x0 +#define CTRL_MODE_MASK 0x1 +#define PCIE_CORE_MODE_DIRECT 0x0 +#define PCIE_CORE_MODE_COMMAND 0x1 + +/* PCIe Central Interrupts Registers */ +#define CENTRAL_INT_BASE_ADDR 0x1b000 +#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) +#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) +#define PCIE_IRQ_CMDQ_INT BIT(0) +#define PCIE_IRQ_MSI_STATUS_INT BIT(1) +#define PCIE_IRQ_CMD_SENT_DONE BIT(3) +#define PCIE_IRQ_DMA_INT BIT(4) +#define PCIE_IRQ_IB_DXFERDONE BIT(5) +#define PCIE_IRQ_OB_DXFERDONE BIT(6) +#define PCIE_IRQ_OB_RXFERDONE BIT(7) +#define PCIE_IRQ_COMPQ_INT BIT(12) +#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) +#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) +#define PCIE_IRQ_CORE_INT BIT(16) +#define PCIE_IRQ_CORE_INT_PIO BIT(17) +#define PCIE_IRQ_DPMU_INT BIT(18) +#define PCIE_IRQ_PCIE_MIS_INT BIT(19) +#define PCIE_IRQ_MSI_INT1_DET BIT(20) +#define PCIE_IRQ_MSI_INT2_DET BIT(21) +#define PCIE_IRQ_RC_DBELL_DET BIT(22) +#define PCIE_IRQ_EP_STATUS BIT(23) +#define PCIE_IRQ_ALL_MASK 0xfff0fb +#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT + +/* Transaction types */ +#define PCIE_CONFIG_RD_TYPE0 0x8 +#define PCIE_CONFIG_RD_TYPE1 0x9 +#define PCIE_CONFIG_WR_TYPE0 0xa +#define PCIE_CONFIG_WR_TYPE1 0xb + +/* PCI_BDF shifts 8bit, so we need extra 4bit shift */ +#define PCIE_BDF(dev) (dev << 4) +#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) +#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) +#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) +#define PCIE_CONF_REG(reg) ((reg) & 0xffc) +#define PCIE_CONF_ADDR(bus, devfn, where) \ + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) + +#define PIO_TIMEOUT_MS 1 + +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +#define LEGACY_IRQ_NUM 4 +#define MSI_IRQ_NUM 32 + +struct advk_pcie { + struct platform_device *pdev; + void __iomem *base; + struct list_head resources; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + struct msi_controller msi; + struct irq_domain *msi_domain; + struct irq_chip msi_irq_chip; + DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM); + struct mutex msi_used_lock; + u16 msi_msg; + int root_bus_nr; +}; + +static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) +{ + writel(val, pcie->base + reg); +} + +static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) +{ + return readl(pcie->base + reg); +} + +static int advk_pcie_link_up(struct advk_pcie *pcie) +{ + u32 val, ltssm_state; + + val = advk_readl(pcie, CFG_REG); + ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; + return ltssm_state >= LTSSM_L0; +} + +static int advk_pcie_wait_for_link(struct advk_pcie *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (advk_pcie_link_up(pcie)) { + dev_info(&pcie->pdev->dev, "link up\n"); + return 0; + } + + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(&pcie->pdev->dev, "link never came up\n"); + + return -ETIMEDOUT; +} + +/* + * Set PCIe address window register which could be used for memory + * mapping. + */ +static void advk_pcie_set_ob_win(struct advk_pcie *pcie, + u32 win_num, u32 match_ms, + u32 match_ls, u32 mask_ms, + u32 mask_ls, u32 remap_ms, + u32 remap_ls, u32 action) +{ + advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); + advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); + advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); + advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); + advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); + advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); + advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); + advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); +} + +static void advk_pcie_setup_hw(struct advk_pcie *pcie) +{ + u32 reg; + int i; + + /* Point PCIe unit MBUS decode windows to DRAM space */ + for (i = 0; i < 8; i++) + advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); + + /* Set to Direct mode */ + reg = advk_readl(pcie, CTRL_CONFIG_REG); + reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); + reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); + advk_writel(pcie, reg, CTRL_CONFIG_REG); + + /* Set PCI global control register to RC mode */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= (IS_RC_MSK << IS_RC_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set Advanced Error Capabilities and Control PF0 register */ + reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | + PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; + advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); + + /* Set PCIe Device Control and Status 1 PF0 register */ + reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | + (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | + PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; + advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); + + /* Program PCIe Control 2 to disable strict ordering */ + reg = PCIE_CORE_CTRL2_RESERVED | + PCIE_CORE_CTRL2_TD_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Set GEN2 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~PCIE_GEN_SEL_MSK; + reg |= SPEED_GEN_2; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set lane X1 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~LANE_CNT_MSK; + reg |= LANE_COUNT_1; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable link training */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= LINK_TRAINING_EN; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable MSI */ + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_MSI_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Clear all interrupts */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + + /* Disable All ISR0/1 Sources */ + reg = PCIE_ISR0_ALL_MASK; + reg &= ~PCIE_ISR0_MSI_INT_PENDING; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSI's */ + advk_writel(pcie, 0, PCIE_MSI_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); + + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Bypass the address window mapping for PIO */ + reg = advk_readl(pcie, PIO_CTRL); + reg |= PIO_CTRL_ADDR_WIN_DISABLE; + advk_writel(pcie, reg, PIO_CTRL); + + /* Start link training */ + reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG); + reg |= PCIE_CORE_LINK_TRAINING; + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + advk_pcie_wait_for_link(pcie); + + reg = PCIE_CORE_LINK_L0S_ENTRY | + (1 << PCIE_CORE_LINK_WIDTH_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | + PCIE_CORE_CMD_MEM_IO_REQ_EN; + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); +} + +static void advk_pcie_check_pio_status(struct advk_pcie *pcie) +{ + u32 reg; + unsigned int status; + char *strcomp_status, *str_posted; + + reg = advk_readl(pcie, PIO_STAT); + status = (reg & PIO_COMPLETION_STATUS_MASK) >> + PIO_COMPLETION_STATUS_SHIFT; + + if (!status) + return; + + switch (status) { + case PIO_COMPLETION_STATUS_UR: + strcomp_status = "UR"; + break; + case PIO_COMPLETION_STATUS_CRS: + strcomp_status = "CRS"; + break; + case PIO_COMPLETION_STATUS_CA: + strcomp_status = "CA"; + break; + default: + strcomp_status = "Unknown"; + break; + } + + if (reg & PIO_NON_POSTED_REQ) + str_posted = "Non-posted"; + else + str_posted = "Posted"; + + dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n", + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); +} + +static int advk_pcie_wait_pio(struct advk_pcie *pcie) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); + + while (time_before(jiffies, timeout)) { + u32 start, isr; + + start = advk_readl(pcie, PIO_START); + isr = advk_readl(pcie, PIO_ISR); + if (!start && isr) + return 0; + } + + dev_err(&pcie->pdev->dev, "config read/write timed out\n"); + return -ETIMEDOUT; +} + +static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + int ret; + + if (PCI_SLOT(devfn) != 0) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_RD_TYPE0; + else + reg |= PCIE_CONFIG_RD_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Program the data strobe */ + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + /* Get the read result */ + *val = advk_readl(pcie, PIO_RD_DATA); + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + u32 data_strobe = 0x0; + int offset; + int ret; + + if (PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (where % size) + return PCIBIOS_SET_FAILED; + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_WR_TYPE0; + else + reg |= PCIE_CONFIG_WR_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_CONF_ADDR(bus->number, devfn, where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Calculate the write strobe */ + offset = where & 0x3; + reg = val << (8 * offset); + data_strobe = GENMASK(size - 1, 0) << offset; + + /* Program the data register */ + advk_writel(pcie, reg, PIO_WR_DATA); + + /* Program the data strobe */ + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops advk_pcie_ops = { + .read = advk_pcie_rd_conf, + .write = advk_pcie_wr_conf, +}; + +static int advk_pcie_alloc_msi(struct advk_pcie *pcie) +{ + int hwirq; + + mutex_lock(&pcie->msi_used_lock); + hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM); + if (hwirq >= MSI_IRQ_NUM) + hwirq = -ENOSPC; + else + set_bit(hwirq, pcie->msi_irq_in_use); + mutex_unlock(&pcie->msi_used_lock); + + return hwirq; +} + +static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq) +{ + mutex_lock(&pcie->msi_used_lock); + if (!test_bit(hwirq, pcie->msi_irq_in_use)) + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n", + hwirq); + else + clear_bit(hwirq, pcie->msi_irq_in_use); + mutex_unlock(&pcie->msi_used_lock); +} + +static int advk_pcie_setup_msi_irq(struct msi_controller *chip, + struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct advk_pcie *pcie = pdev->bus->sysdata; + struct msi_msg msg; + int virq, hwirq; + phys_addr_t msi_msg_phys; + + /* We support MSI, but not MSI-X */ + if (desc->msi_attrib.is_msix) + return -EINVAL; + + hwirq = advk_pcie_alloc_msi(pcie); + if (hwirq < 0) + return hwirq; + + virq = irq_create_mapping(pcie->msi_domain, hwirq); + if (!virq) { + advk_pcie_free_msi(pcie, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(virq, desc); + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + msg.address_lo = lower_32_bits(msi_msg_phys); + msg.address_hi = upper_32_bits(msi_msg_phys); + msg.data = virq; + + pci_write_msi_msg(virq, &msg); + + return 0; +} + +static void advk_pcie_teardown_msi_irq(struct msi_controller *chip, + unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + struct msi_desc *msi = irq_data_get_msi_desc(d); + struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi); + unsigned long hwirq = d->hwirq; + + irq_dispose_mapping(irq); + advk_pcie_free_msi(pcie, hwirq); +} + +static int advk_pcie_msi_map(struct irq_domain *domain, + unsigned int virq, irq_hw_number_t hw) +{ + struct advk_pcie *pcie = domain->host_data; + + irq_set_chip_and_handler(virq, &pcie->msi_irq_chip, + handle_simple_irq); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_msi_irq_ops = { + .map = advk_pcie_msi_map, +}; + +static void advk_pcie_irq_mask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + mask |= PCIE_ISR0_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); +} + +static void advk_pcie_irq_unmask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); +} + +static int advk_pcie_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct advk_pcie *pcie = h->host_data; + + advk_pcie_irq_mask(irq_get_irq_data(virq)); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pcie->irq_chip, + handle_level_irq); + irq_set_chip_data(virq, pcie); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_irq_domain_ops = { + .map = advk_pcie_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct irq_chip *msi_irq_chip; + struct msi_controller *msi; + phys_addr_t msi_msg_phys; + int ret; + + msi_irq_chip = &pcie->msi_irq_chip; + + msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", + dev_name(dev)); + if (!msi_irq_chip->name) + return -ENOMEM; + + msi_irq_chip->irq_enable = pci_msi_unmask_irq; + msi_irq_chip->irq_disable = pci_msi_mask_irq; + msi_irq_chip->irq_mask = pci_msi_mask_irq; + msi_irq_chip->irq_unmask = pci_msi_unmask_irq; + + msi = &pcie->msi; + + msi->setup_irq = advk_pcie_setup_msi_irq; + msi->teardown_irq = advk_pcie_teardown_msi_irq; + msi->of_node = node; + + mutex_init(&pcie->msi_used_lock); + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + advk_writel(pcie, lower_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, upper_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_HIGH_REG); + + pcie->msi_domain = + irq_domain_add_linear(NULL, MSI_IRQ_NUM, + &advk_pcie_msi_irq_ops, pcie); + if (!pcie->msi_domain) + return -ENOMEM; + + ret = of_pci_msi_chip_add(msi); + if (ret < 0) { + irq_domain_remove(pcie->msi_domain); + return ret; + } + + return 0; +} + +static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) +{ + of_pci_msi_chip_remove(&pcie->msi); + irq_domain_remove(pcie->msi_domain); +} + +static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + struct irq_chip *irq_chip; + + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + irq_chip = &pcie->irq_chip; + + irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", + dev_name(dev)); + if (!irq_chip->name) { + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + irq_chip->irq_mask = advk_pcie_irq_mask; + irq_chip->irq_mask_ack = advk_pcie_irq_mask; + irq_chip->irq_unmask = advk_pcie_irq_unmask; + + pcie->irq_domain = + irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM, + &advk_pcie_irq_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->irq_domain); +} + +static void advk_pcie_handle_msi(struct advk_pcie *pcie) +{ + u32 msi_val, msi_mask, msi_status, msi_idx; + u16 msi_data; + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); + msi_status = msi_val & ~msi_mask; + + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { + if (!(BIT(msi_idx) & msi_status)) + continue; + + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); + msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; + generic_handle_irq(msi_data); + } + + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, + PCIE_ISR0_REG); +} + +static void advk_pcie_handle_int(struct advk_pcie *pcie) +{ + u32 val, mask, status; + int i, virq; + + val = advk_readl(pcie, PCIE_ISR0_REG); + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + status = val & ((~mask) & PCIE_ISR0_ALL_MASK); + + if (!status) { + advk_writel(pcie, val, PCIE_ISR0_REG); + return; + } + + /* Process MSI interrupts */ + if (status & PCIE_ISR0_MSI_INT_PENDING) + advk_pcie_handle_msi(pcie); + + /* Process legacy interrupts */ + for (i = 0; i < LEGACY_IRQ_NUM; i++) { + if (!(status & PCIE_ISR0_INTX_ASSERT(i))) + continue; + + advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), + PCIE_ISR0_REG); + + virq = irq_find_mapping(pcie->irq_domain, i); + generic_handle_irq(virq); + } +} + +static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) +{ + struct advk_pcie *pcie = arg; + u32 status; + + status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); + if (!(status & PCIE_IRQ_CORE_INT)) + return IRQ_NONE; + + advk_pcie_handle_int(pcie); + + /* Clear interrupt */ + advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); + + return IRQ_HANDLED; +} + +static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) +{ + int err, res_valid = 0; + struct device *dev = &pcie->pdev->dev; + struct device_node *np = dev->of_node; + struct resource_entry *win; + resource_size_t iobase; + + INIT_LIST_HEAD(&pcie->resources); + + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, + &iobase); + if (err) + return err; + + err = devm_request_pci_bus_resources(dev, &pcie->resources); + if (err) + goto out_release_res; + + resource_list_for_each_entry(win, &pcie->resources) { + struct resource *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_IO: + advk_pcie_set_ob_win(pcie, 1, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0, 0xF8000000, 0, + lower_32_bits(res->start), + OB_PCIE_IO); + err = pci_remap_iospace(res, iobase); + if (err) + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, res); + break; + case IORESOURCE_MEM: + advk_pcie_set_ob_win(pcie, 0, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0x0, 0xF8000000, 0, + lower_32_bits(res->start), + (2 << 20) | OB_PCIE_MEM); + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + break; + case IORESOURCE_BUS: + pcie->root_bus_nr = res->start; + break; + } + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + pci_free_resource_list(&pcie->resources); + return err; +} + +static int advk_pcie_probe(struct platform_device *pdev) +{ + struct advk_pcie *pcie; + struct resource *res; + struct pci_bus *bus, *child; + struct msi_controller *msi; + struct device_node *msi_node; + int ret, irq; + + pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie), + GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pcie->base)) { + dev_err(&pdev->dev, "Failed to map registers\n"); + return PTR_ERR(pcie->base); + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", + pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to register interrupt\n"); + return ret; + } + + ret = advk_pcie_parse_request_of_pci_ranges(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to parse resources\n"); + return ret; + } + + advk_pcie_setup_hw(pcie); + + ret = advk_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize irq\n"); + return ret; + } + + ret = advk_pcie_init_msi_irq_domain(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize irq\n"); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + + msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0); + if (msi_node) + msi = of_pci_find_msi_chip_by_node(msi_node); + else + msi = NULL; + + bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops, + pcie, &pcie->resources, &pcie->msi); + if (!bus) { + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return -ENOMEM; + } + + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + + return 0; +} + +static const struct of_device_id advk_pcie_of_match_table[] = { + { .compatible = "marvell,armada-3700-pcie", }, + {}, +}; + +static struct platform_driver advk_pcie_driver = { + .driver = { + .name = "advk-pcie", + .of_match_table = advk_pcie_of_match_table, + /* Driver unloading/unbinding currently not supported */ + .suppress_bind_attrs = true, + }, + .probe = advk_pcie_probe, +}; +builtin_platform_driver(advk_pcie_driver); diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index f441130407e7..81b3949a26db 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -181,14 +181,14 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); - return PTR_ERR(pcie_intc_node); + return -ENODEV; } pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, &intx_domain_ops, pp); if (!pp->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return PTR_ERR(pp->irq_domain); + return -ENODEV; } return 0; diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index 8cba7ab73df9..9d9d34e959b6 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c @@ -20,10 +20,9 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> +#include <linux/pci-ecam.h> #include <linux/platform_device.h> -#include "../ecam.h" - static int gen_pci_parse_request_of_pci_ranges(struct device *dev, struct list_head *resources, struct resource **bus_range) { @@ -36,44 +35,34 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, if (err) return err; + err = devm_request_pci_bus_resources(dev, resources); + if (err) + return err; + resource_list_for_each_entry(win, resources) { - struct resource *parent, *res = win->res; + struct resource *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: - parent = &ioport_resource; err = pci_remap_iospace(res, iobase); - if (err) { + if (err) dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); - continue; - } break; case IORESOURCE_MEM: - parent = &iomem_resource; res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; case IORESOURCE_BUS: *bus_range = res; - default: - continue; + break; } - - err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; - } - - if (!res_valid) { - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - goto out_release_res; } - return 0; + if (res_valid) + return 0; -out_release_res: - return err; + dev_err(dev, "non-prefetchable memory resource required\n"); + return -EINVAL; } static void gen_pci_unmap_cfg(void *ptr) @@ -155,7 +144,14 @@ int pci_host_common_probe(struct platform_device *pdev, pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); - if (!pci_has_flag(PCI_PROBE_ONLY)) { + /* + * We insert PCI resources into the iomem_resource and + * ioport_resource trees in either pci_bus_claim_resources() + * or pci_bus_assign_resources(). + */ + if (pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_claim_resources(bus); + } else { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index 6eaceab1bf04..c05ea9d72f69 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c @@ -20,13 +20,12 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_address.h> #include <linux/of_pci.h> +#include <linux/pci-ecam.h> #include <linux/platform_device.h> -#include "../ecam.h" - static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, .pci_ops = { @@ -46,8 +45,6 @@ static const struct of_device_id gen_pci_of_match[] = { { }, }; -MODULE_DEVICE_TABLE(of, gen_pci_of_match); - static int gen_pci_probe(struct platform_device *pdev) { const struct of_device_id *of_id; @@ -66,8 +63,4 @@ static struct platform_driver gen_pci_driver = { }, .probe = gen_pci_probe, }; -module_platform_driver(gen_pci_driver); - -MODULE_DESCRIPTION("Generic PCI host driver"); -MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(gen_pci_driver); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 7e9b2de2aa24..6955ffdb89f3 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -732,16 +732,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, pdev = msi_desc_to_pci_dev(msi); hbus = info->data; - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); - if (!hpdev) + int_desc = irq_data_get_irq_chip_data(irq_data); + if (!int_desc) return; - int_desc = irq_data_get_irq_chip_data(irq_data); - if (int_desc) { - irq_data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); + irq_data->chip_data = NULL; + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) { + kfree(int_desc); + return; } + hv_int_desc_free(hpdev, int_desc); put_pcichild(hpdev, hv_pcidev_ref_by_slot); } @@ -1657,14 +1659,16 @@ static void hv_pci_onchannelcallback(void *context) continue; } + /* Zero length indicates there are no more packets. */ + if (ret || !bytes_recvd) + break; + /* * All incoming packets must be at least as large as a * response. */ - if (bytes_recvd <= sizeof(struct pci_response)) { - kfree(buffer); - return; - } + if (bytes_recvd <= sizeof(struct pci_response)) + continue; desc = (struct vmpacket_descriptor *)buffer; switch (desc->type) { @@ -1679,8 +1683,7 @@ static void hv_pci_onchannelcallback(void *context) comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_recvd); - kfree(buffer); - return; + break; case VM_PKT_DATA_INBAND: @@ -1727,8 +1730,9 @@ static void hv_pci_onchannelcallback(void *context) desc->type, req_id, bytes_recvd); break; } - break; } + + kfree(buffer); } /** diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 6b8301ef21ca..8ba28834d470 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -17,7 +17,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_irq.h> #include <linux/of.h> @@ -360,7 +360,6 @@ static const struct of_device_id ks_pcie_of_match[] = { }, { }, }; -MODULE_DEVICE_TABLE(of, ks_pcie_of_match); static int __exit ks_pcie_remove(struct platform_device *pdev) { @@ -439,9 +438,4 @@ static struct platform_driver ks_pcie_driver __refdata = { .of_match_table = of_match_ptr(ks_pcie_of_match), }, }; - -module_platform_driver(ks_pcie_driver); - -MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>"); -MODULE_DESCRIPTION("Keystone PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index a21e229d95e0..114ba819277a 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -12,7 +12,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/of_irq.h> @@ -211,7 +211,6 @@ static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, { }, }; -MODULE_DEVICE_TABLE(of, ls_pcie_of_match); static int __init ls_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) @@ -275,9 +274,4 @@ static struct platform_driver ls_pcie_driver = { .of_match_table = ls_pcie_of_match, }, }; - -module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); - -MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>"); -MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 6b451df6502c..307f81d6b479 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -1,6 +1,8 @@ /* * PCIe driver for Marvell Armada 370 and Armada XP SoCs * + * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. @@ -11,7 +13,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/mbus.h> #include <linux/msi.h> #include <linux/slab.h> @@ -839,25 +841,22 @@ static struct pci_ops mvebu_pcie_ops = { static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) { struct mvebu_pcie *pcie = sys_to_pcie(sys); - int i; + int err, i; pcie->mem.name = "PCI MEM"; pcie->realio.name = "PCI I/O"; - if (request_resource(&iomem_resource, &pcie->mem)) - return 0; - - if (resource_size(&pcie->realio) != 0) { - if (request_resource(&ioport_resource, &pcie->realio)) { - release_resource(&pcie->mem); - return 0; - } + if (resource_size(&pcie->realio) != 0) pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset); - } + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); pci_add_resource(&sys->resources, &pcie->busn); + err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources); + if (err) + return 0; + for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; @@ -1298,7 +1297,6 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = { { .compatible = "marvell,kirkwood-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); static const struct dev_pm_ops mvebu_pcie_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) @@ -1314,8 +1312,4 @@ static struct platform_driver mvebu_pcie_driver = { }, .probe = mvebu_pcie_probe, }; -module_platform_driver(mvebu_pcie_driver); - -MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); -MODULE_DESCRIPTION("Marvell EBU PCIe driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(mvebu_pcie_driver); diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 9980a4bdae7e..597566f96f5e 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -4,6 +4,8 @@ * Copyright (C) 2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc. * + * Author: Valentine Barshak <valentine.barshak@cogentembedded.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -14,7 +16,6 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> @@ -97,7 +98,6 @@ struct rcar_pci_priv { struct device *dev; void __iomem *reg; - struct resource io_res; struct resource mem_res; struct resource *cfg_res; unsigned busnr; @@ -194,6 +194,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) struct rcar_pci_priv *priv = sys->private_data; void __iomem *reg = priv->reg; u32 val; + int ret; pm_runtime_enable(priv->dev); pm_runtime_get_sync(priv->dev); @@ -273,8 +274,10 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) rcar_pci_setup_errirq(priv); /* Add PCI resources */ - pci_add_resource(&sys->resources, &priv->io_res); pci_add_resource(&sys->resources, &priv->mem_res); + ret = devm_request_pci_bus_resources(priv->dev, &sys->resources); + if (ret < 0) + return ret; /* Setup bus number based on platform device id / of bus-range */ sys->busnr = priv->busnr; @@ -371,14 +374,6 @@ static int rcar_pci_probe(struct platform_device *pdev) return -ENOMEM; priv->mem_res = *mem_res; - /* - * The controller does not support/use port I/O, - * so setup a dummy port I/O region here. - */ - priv->io_res.start = priv->mem_res.start; - priv->io_res.end = priv->mem_res.end; - priv->io_res.flags = IORESOURCE_IO; - priv->cfg_res = cfg_res; priv->irq = platform_get_irq(pdev, 0); @@ -421,6 +416,7 @@ static int rcar_pci_probe(struct platform_device *pdev) hw_private[0] = priv; memset(&hw, 0, sizeof(hw)); hw.nr_controllers = ARRAY_SIZE(hw_private); + hw.io_optional = 1; hw.private_data = hw_private; hw.map_irq = rcar_pci_map_irq; hw.ops = &rcar_pci_ops; @@ -437,8 +433,6 @@ static struct of_device_id rcar_pci_of_match[] = { { }, }; -MODULE_DEVICE_TABLE(of, rcar_pci_of_match); - static struct platform_driver rcar_pci_driver = { .driver = { .name = "pci-rcar-gen2", @@ -447,9 +441,4 @@ static struct platform_driver rcar_pci_driver = { }, .probe = rcar_pci_probe, }; - -module_platform_driver(rcar_pci_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI"); -MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>"); +builtin_platform_driver(rcar_pci_driver); diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index c388468c202a..6de0757b11e4 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -9,6 +9,8 @@ * * Bits taken from arch/arm/mach-dove/pcie.c * + * Author: Thierry Reding <treding@nvidia.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -32,7 +34,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -183,26 +185,26 @@ #define AFI_PEXBIAS_CTRL_0 0x168 -#define RP_VEND_XP 0x00000F00 +#define RP_VEND_XP 0x00000f00 #define RP_VEND_XP_DL_UP (1 << 30) -#define RP_PRIV_MISC 0x00000FE0 -#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0) -#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0) +#define RP_PRIV_MISC 0x00000fe0 +#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 -#define PADS_CTL_SEL 0x0000009C +#define PADS_CTL_SEL 0x0000009c -#define PADS_CTL 0x000000A0 +#define PADS_CTL 0x000000a0 #define PADS_CTL_IDDQ_1L (1 << 0) #define PADS_CTL_TX_DATA_EN_1L (1 << 6) #define PADS_CTL_RX_DATA_EN_1L (1 << 10) -#define PADS_PLL_CTL_TEGRA20 0x000000B8 -#define PADS_PLL_CTL_TEGRA30 0x000000B4 +#define PADS_PLL_CTL_TEGRA20 0x000000b8 +#define PADS_PLL_CTL_TEGRA30 0x000000b4 #define PADS_PLL_CTL_RST_B4SM (1 << 1) #define PADS_PLL_CTL_LOCKDET (1 << 8) #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) @@ -214,9 +216,9 @@ #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) -#define PADS_REFCLK_CFG0 0x000000C8 -#define PADS_REFCLK_CFG1 0x000000CC -#define PADS_REFCLK_BIAS 0x000000D0 +#define PADS_REFCLK_CFG0 0x000000c8 +#define PADS_REFCLK_CFG1 0x000000cc +#define PADS_REFCLK_BIAS 0x000000d0 /* * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit @@ -228,15 +230,6 @@ #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ -/* Default value provided by HW engineering is 0xfa5c */ -#define PADS_REFCLK_CFG_VALUE \ - ( \ - (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \ - (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \ - (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \ - (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \ - ) - struct tegra_msi { struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); @@ -252,6 +245,8 @@ struct tegra_pcie_soc_data { unsigned int msi_base_shift; u32 pads_pll_ctl; u32 tx_ref_sel; + u32 pads_refclk_cfg0; + u32 pads_refclk_cfg1; bool has_pex_clkreq_en; bool has_pex_bias_ctrl; bool has_intr_prsnt_sense; @@ -274,7 +269,6 @@ struct tegra_pcie { struct list_head buses; struct resource *cs; - struct resource all; struct resource io; struct resource pio; struct resource mem; @@ -623,30 +617,21 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) sys->mem_offset = pcie->offset.mem; sys->io_offset = pcie->offset.io; - err = devm_request_resource(pcie->dev, &pcie->all, &pcie->io); - if (err < 0) - return err; - - err = devm_request_resource(pcie->dev, &ioport_resource, &pcie->pio); - if (err < 0) - return err; - - err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); + err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io); if (err < 0) return err; - err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch); - if (err) - return err; - pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset); pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); pci_add_resource_offset(&sys->resources, &pcie->prefetch, sys->mem_offset); pci_add_resource(&sys->resources, &pcie->busn); - pci_ioremap_io(pcie->pio.start, pcie->io.start); + err = devm_request_pci_bus_resources(pcie->dev, &sys->resources); + if (err < 0) + return err; + pci_remap_iospace(&pcie->pio, pcie->io.start); return 1; } @@ -838,12 +823,6 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) value |= PADS_PLL_CTL_RST_B4SM; pads_writel(pcie, value, soc->pads_pll_ctl); - /* Configure the reference clock driver */ - value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16); - pads_writel(pcie, value, PADS_REFCLK_CFG0); - if (soc->num_ports > 2) - pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1); - /* wait for the PLL to lock */ err = tegra_pcie_pll_wait(pcie, 500); if (err < 0) { @@ -927,6 +906,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { + const struct tegra_pcie_soc_data *soc = pcie->soc_data; struct tegra_pcie_port *port; int err; @@ -952,6 +932,12 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) } } + /* Configure the reference clock driver */ + pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); + + if (soc->num_ports > 2) + pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); + return 0; } @@ -1822,12 +1808,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) struct resource res; int err; - memset(&pcie->all, 0, sizeof(pcie->all)); - pcie->all.flags = IORESOURCE_MEM; - pcie->all.name = np->full_name; - pcie->all.start = ~0; - pcie->all.end = 0; - if (of_pci_range_parser_init(&parser, np)) { dev_err(pcie->dev, "missing \"ranges\" property\n"); return -EINVAL; @@ -1880,18 +1860,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) } break; } - - if (res.start <= pcie->all.start) - pcie->all.start = res.start; - - if (res.end >= pcie->all.end) - pcie->all.end = res.end; } - err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all); - if (err < 0) - return err; - err = of_pci_parse_bus_range(np, &pcie->busn); if (err < 0) { dev_err(pcie->dev, "failed to parse ranges property: %d\n", @@ -2078,6 +2048,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = { .msi_base_shift = 0, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, + .pads_refclk_cfg0 = 0xfa5cfa5c, .has_pex_clkreq_en = false, .has_pex_bias_ctrl = false, .has_intr_prsnt_sense = false, @@ -2090,6 +2061,8 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = { .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0xfa5cfa5c, + .pads_refclk_cfg1 = 0xfa5cfa5c, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2102,6 +2075,7 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = { .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x44ac44ac, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2115,7 +2089,6 @@ static const struct of_device_id tegra_pcie_of_match[] = { { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, { }, }; -MODULE_DEVICE_TABLE(of, tegra_pcie_of_match); static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) { @@ -2249,8 +2222,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) if (err < 0) return err; - pcibios_min_mem = 0; - err = tegra_pcie_get_resources(pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request resources: %d\n", err); @@ -2306,8 +2277,4 @@ static struct platform_driver tegra_pcie_driver = { }, .probe = tegra_pcie_probe, }; -module_platform_driver(tegra_pcie_driver); - -MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); -MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(tegra_pcie_driver); diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c index 540d030613eb..d50a3dc2d8db 100644 --- a/drivers/pci/host/pci-thunder-ecam.c +++ b/drivers/pci/host/pci-thunder-ecam.c @@ -7,14 +7,13 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/ioport.h> #include <linux/of_pci.h> #include <linux/of.h> +#include <linux/pci-ecam.h> #include <linux/platform_device.h> -#include "../ecam.h" - static void set_val(u32 v, int where, int size, u32 *val) { int shift = (where & 3) * 8; @@ -360,7 +359,6 @@ static const struct of_device_id thunder_ecam_of_match[] = { { .compatible = "cavium,pci-host-thunder-ecam" }, { }, }; -MODULE_DEVICE_TABLE(of, thunder_ecam_of_match); static int thunder_ecam_probe(struct platform_device *pdev) { @@ -374,7 +372,4 @@ static struct platform_driver thunder_ecam_driver = { }, .probe = thunder_ecam_probe, }; -module_platform_driver(thunder_ecam_driver); - -MODULE_DESCRIPTION("Thunder ECAM PCI host driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(thunder_ecam_driver); diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index 9b8ab94f3c8c..6abaf80ffb39 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -15,13 +15,12 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_address.h> #include <linux/of_pci.h> +#include <linux/pci-ecam.h> #include <linux/platform_device.h> -#include "../ecam.h" - #define PEM_CFG_WR 0x28 #define PEM_CFG_RD 0x30 @@ -285,8 +284,9 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, return pci_generic_config_write(bus, devfn, where, size, val); } -static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg) +static int thunder_pem_init(struct pci_config_window *cfg) { + struct device *dev = cfg->parent; resource_size_t bar4_start; struct resource *res_pem; struct thunder_pem_pci *pem_pci; @@ -346,7 +346,6 @@ static const struct of_device_id thunder_pem_of_match[] = { { .compatible = "cavium,pci-host-thunder-pem" }, { }, }; -MODULE_DEVICE_TABLE(of, thunder_pem_of_match); static int thunder_pem_probe(struct platform_device *pdev) { @@ -360,7 +359,4 @@ static struct platform_driver thunder_pem_driver = { }, .probe = thunder_pem_probe, }; -module_platform_driver(thunder_pem_driver); - -MODULE_DESCRIPTION("Thunder PEM PCIe host driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(thunder_pem_driver); diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index f843a72dc51c..f234405770ab 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -80,21 +80,21 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, if (err) return err; + err = devm_request_pci_bus_resources(dev, res); + if (err) + goto out_release_res; + resource_list_for_each_entry(win, res) { - struct resource *parent, *res = win->res; + struct resource *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: - parent = &ioport_resource; err = pci_remap_iospace(res, iobase); - if (err) { + if (err) dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); - continue; - } break; case IORESOURCE_MEM: - parent = &iomem_resource; res_valid |= !(res->flags & IORESOURCE_PREFETCH); writel(res->start >> 28, PCI_IMAP(mem)); @@ -102,23 +102,14 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, mem++; break; - case IORESOURCE_BUS: - default: - continue; } - - err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; } - if (!res_valid) { - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - goto out_release_res; - } + if (res_valid) + return 0; - return 0; + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; out_release_res: pci_free_resource_list(res); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index ae00ce22d5a6..a81273c23341 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -21,7 +21,7 @@ #include <linux/io.h> #include <linux/jiffies.h> #include <linux/memblock.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -540,14 +540,20 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) return ret; + ret = devm_request_pci_bus_resources(&pdev->dev, &res); + if (ret) + goto error; + ret = xgene_pcie_setup(port, &res, iobase); if (ret) - return ret; + goto error; bus = pci_create_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); - if (!bus) - return -ENOMEM; + if (!bus) { + ret = -ENOMEM; + goto error; + } pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); @@ -555,6 +561,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) platform_set_drvdata(pdev, port); return 0; + +error: + pci_free_resource_list(&res); + return ret; } static const struct of_device_id xgene_pcie_match_table[] = { @@ -569,8 +579,4 @@ static struct platform_driver xgene_pcie_driver = { }, .probe = xgene_pcie_probe_bridge, }; -module_platform_driver(xgene_pcie_driver); - -MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>"); -MODULE_DESCRIPTION("APM X-Gene PCIe driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(xgene_pcie_driver); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index dbac6fb3f0bd..2b7837650db8 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -61,6 +61,8 @@ #define TLP_LOOP 500 #define RP_DEVFN 0 +#define LINK_UP_TIMEOUT 5000 + #define INTX_NUM 4 #define DWORD_MASK 3 @@ -81,9 +83,30 @@ struct tlp_rp_regpair_t { u32 reg1; }; +static inline void cra_writel(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) +{ + return readl_relaxed(pcie->cra_base + reg); +} + +static bool altera_pcie_link_is_up(struct altera_pcie *pcie) +{ + return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); +} + static void altera_pcie_retrain(struct pci_dev *dev) { u16 linkcap, linkstat; + struct altera_pcie *pcie = dev->bus->sysdata; + int timeout = 0; + + if (!altera_pcie_link_is_up(pcie)) + return; /* * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but @@ -95,9 +118,16 @@ static void altera_pcie_retrain(struct pci_dev *dev) return; pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); - if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) + if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { pcie_capability_set_word(dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); + while (!altera_pcie_link_is_up(pcie)) { + timeout++; + if (timeout > LINK_UP_TIMEOUT) + break; + udelay(5); + } + } } DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); @@ -120,17 +150,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, return false; } -static inline void cra_writel(struct altera_pcie *pcie, const u32 value, - const u32 reg) -{ - writel_relaxed(value, pcie->cra_base + reg); -} - -static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) -{ - return readl_relaxed(pcie->cra_base + reg); -} - static void tlp_write_tx(struct altera_pcie *pcie, struct tlp_rp_regpair_t *tlp_rp_regdata) { @@ -139,11 +158,6 @@ static void tlp_write_tx(struct altera_pcie *pcie, cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); } -static bool altera_pcie_link_is_up(struct altera_pcie *pcie) -{ - return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); -} - static bool altera_pcie_valid_config(struct altera_pcie *pcie, struct pci_bus *bus, int dev) { @@ -415,11 +429,6 @@ static void altera_pcie_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie) -{ - pci_free_resource_list(&pcie->resources); -} - static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) { int err, res_valid = 0; @@ -432,33 +441,25 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) if (err) return err; + err = devm_request_pci_bus_resources(dev, &pcie->resources); + if (err) + goto out_release_res; + resource_list_for_each_entry(win, &pcie->resources) { - struct resource *parent, *res = win->res; + struct resource *res = win->res; - switch (resource_type(res)) { - case IORESOURCE_MEM: - parent = &iomem_resource; + if (resource_type(res) == IORESOURCE_MEM) res_valid |= !(res->flags & IORESOURCE_PREFETCH); - break; - default: - continue; - } - - err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; } - if (!res_valid) { - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - goto out_release_res; - } + if (res_valid) + return 0; - return 0; + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; out_release_res: - altera_pcie_release_of_pci_ranges(pcie); + pci_free_resource_list(&pcie->resources); return err; } diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c index 55723567b5d4..0f4f570068e3 100644 --- a/drivers/pci/host/pcie-armada8k.c +++ b/drivers/pci/host/pcie-armada8k.c @@ -5,6 +5,9 @@ * * Copyright (C) 2016 Marvell Technology Group Ltd. * + * Author: Yehuda Yitshak <yehuday@marvell.com> + * Author: Shadi Ammouri <shadi@marvell.com> + * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. @@ -14,7 +17,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> @@ -244,7 +247,6 @@ static const struct of_device_id armada8k_pcie_of_match[] = { { .compatible = "marvell,armada8k-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match); static struct platform_driver armada8k_pcie_driver = { .probe = armada8k_pcie_probe, @@ -253,10 +255,4 @@ static struct platform_driver armada8k_pcie_driver = { .of_match_table = of_match_ptr(armada8k_pcie_of_match), }, }; - -module_platform_driver(armada8k_pcie_driver); - -MODULE_DESCRIPTION("Armada 8k PCIe host controller driver"); -MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>"); -MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(armada8k_pcie_driver); diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c new file mode 100644 index 000000000000..16ba70b7ec65 --- /dev/null +++ b/drivers/pci/host/pcie-artpec6.c @@ -0,0 +1,280 @@ +/* + * PCIe host controller driver for Axis ARTPEC-6 SoC + * + * Author: Niklas Cassel <niklas.cassel@axis.com> + * + * Based on work done by Phil Edworthy <phil@edworthys.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include "pcie-designware.h" + +#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp) + +struct artpec6_pcie { + struct pcie_port pp; + struct regmap *regmap; + void __iomem *phy_base; +}; + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 +#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) +#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) + +#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc) +#define DBI_RO_WR_EN 1 + +/* ARTPEC-6 specific registers */ +#define PCIECFG 0x18 +#define PCIECFG_DBG_OEN (1 << 24) +#define PCIECFG_CORE_RESET_REQ (1 << 21) +#define PCIECFG_LTSSM_ENABLE (1 << 20) +#define PCIECFG_CLKREQ_B (1 << 11) +#define PCIECFG_REFCLK_ENABLE (1 << 10) +#define PCIECFG_PLL_ENABLE (1 << 9) +#define PCIECFG_PCLK_ENABLE (1 << 8) +#define PCIECFG_RISRCREN (1 << 4) +#define PCIECFG_MODE_TX_DRV_EN (1 << 3) +#define PCIECFG_CISRREN (1 << 2) +#define PCIECFG_MACRO_ENABLE (1 << 0) + +#define NOCCFG 0x40 +#define NOCCFG_ENABLE_CLK_PCIE (1 << 4) +#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3) +#define NOCCFG_POWER_PCIE_IDLE (1 << 2) +#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1) + +#define PHY_STATUS 0x118 +#define PHY_COSPLLLOCK (1 << 0) + +#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff + +static int artpec6_pcie_establish_link(struct pcie_port *pp) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp); + u32 val; + unsigned int retries; + + /* Hold DW core in reset */ + regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val |= PCIECFG_CORE_RESET_REQ; + regmap_write(artpec6_pcie->regmap, PCIECFG, val); + + regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ + PCIECFG_MODE_TX_DRV_EN | + PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ + PCIECFG_MACRO_ENABLE; + val |= PCIECFG_REFCLK_ENABLE; + val &= ~PCIECFG_DBG_OEN; + val &= ~PCIECFG_CLKREQ_B; + regmap_write(artpec6_pcie->regmap, PCIECFG, val); + usleep_range(5000, 6000); + + regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + val |= NOCCFG_ENABLE_CLK_PCIE; + regmap_write(artpec6_pcie->regmap, NOCCFG, val); + usleep_range(20, 30); + + regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; + regmap_write(artpec6_pcie->regmap, PCIECFG, val); + usleep_range(6000, 7000); + + regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + val &= ~NOCCFG_POWER_PCIE_IDLEREQ; + regmap_write(artpec6_pcie->regmap, NOCCFG, val); + + retries = 50; + do { + usleep_range(1000, 2000); + regmap_read(artpec6_pcie->regmap, NOCCFG, &val); + retries--; + } while (retries && + (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); + + retries = 50; + do { + usleep_range(1000, 2000); + val = readl(artpec6_pcie->phy_base + PHY_STATUS); + retries--; + } while (retries && !(val & PHY_COSPLLLOCK)); + + /* Take DW core out of reset */ + regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val &= ~PCIECFG_CORE_RESET_REQ; + regmap_write(artpec6_pcie->regmap, PCIECFG, val); + usleep_range(100, 200); + + /* + * Enable writing to config regs. This is required as the Synopsys + * driver changes the class code. That register needs DBI write enable. + */ + writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF); + + pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR; + pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR; + pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR; + pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR; + + /* setup root complex */ + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + regmap_read(artpec6_pcie->regmap, PCIECFG, &val); + val |= PCIECFG_LTSSM_ENABLE; + regmap_write(artpec6_pcie->regmap, PCIECFG, val); + + /* check if the link is up or not */ + if (!dw_pcie_wait_for_link(pp)) + return 0; + + dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), + readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + + return -ETIMEDOUT; +} + +static void artpec6_pcie_enable_interrupts(struct pcie_port *pp) +{ + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); +} + +static void artpec6_pcie_host_init(struct pcie_port *pp) +{ + artpec6_pcie_establish_link(pp); + artpec6_pcie_enable_interrupts(pp); +} + +static int artpec6_pcie_link_up(struct pcie_port *pp) +{ + u32 rc; + + /* + * Get status from Synopsys IP + * link is debug bit 36, debug register 1 starts at bit 32 + */ + rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32)); + if (rc) + return 1; + + return 0; +} + +static struct pcie_host_ops artpec6_pcie_host_ops = { + .link_up = artpec6_pcie_link_up, + .host_init = artpec6_pcie_host_init, +}; + +static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + return dw_handle_msi_irq(pp); +} + +static int __init artpec6_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) +{ + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq <= 0) { + dev_err(&pdev->dev, "failed to get MSI irq\n"); + return -ENODEV; + } + + ret = devm_request_irq(&pdev->dev, pp->msi_irq, + artpec6_pcie_msi_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "artpec6-pcie-msi", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request MSI irq\n"); + return ret; + } + } + + pp->root_bus_nr = -1; + pp->ops = &artpec6_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(&pdev->dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int artpec6_pcie_probe(struct platform_device *pdev) +{ + struct artpec6_pcie *artpec6_pcie; + struct pcie_port *pp; + struct resource *dbi_base; + struct resource *phy_base; + int ret; + + artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie), + GFP_KERNEL); + if (!artpec6_pcie) + return -ENOMEM; + + pp = &artpec6_pcie->pp; + pp->dev = &pdev->dev; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base); + if (IS_ERR(pp->dbi_base)) + return PTR_ERR(pp->dbi_base); + + phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); + if (IS_ERR(artpec6_pcie->phy_base)) + return PTR_ERR(artpec6_pcie->phy_base); + + artpec6_pcie->regmap = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "axis,syscon-pcie"); + if (IS_ERR(artpec6_pcie->regmap)) + return PTR_ERR(artpec6_pcie->regmap); + + ret = artpec6_add_pcie_port(pp, pdev); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, artpec6_pcie); + return 0; +} + +static const struct of_device_id artpec6_pcie_of_match[] = { + { .compatible = "axis,artpec6-pcie", }, + {}, +}; + +static struct platform_driver artpec6_pcie_driver = { + .probe = artpec6_pcie_probe, + .driver = { + .name = "artpec6-pcie", + .of_match_table = artpec6_pcie_of_match, + }, +}; +builtin_platform_driver(artpec6_pcie_driver); diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index b3500994d08a..c8079dc81c10 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -14,7 +14,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -121,7 +121,6 @@ static const struct of_device_id dw_plat_pcie_of_match[] = { { .compatible = "snps,dw-pcie", }, {}, }; -MODULE_DEVICE_TABLE(of, dw_plat_pcie_of_match); static struct platform_driver dw_plat_pcie_driver = { .driver = { @@ -130,9 +129,4 @@ static struct platform_driver dw_plat_pcie_driver = { }, .probe = dw_plat_pcie_probe, }; - -module_platform_driver(dw_plat_pcie_driver); - -MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); -MODULE_DESCRIPTION("Synopsys PCIe host controller glue platform driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(dw_plat_pcie_driver); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index aafd766546f3..12afce19890b 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -452,6 +452,10 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) return ret; + ret = devm_request_pci_bus_resources(&pdev->dev, &res); + if (ret) + goto error; + /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &res) { switch (resource_type(win->res)) { @@ -461,11 +465,9 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->io_size = resource_size(pp->io); pp->io_bus_addr = pp->io->start - win->offset; ret = pci_remap_iospace(pp->io, pp->io_base); - if (ret) { + if (ret) dev_warn(pp->dev, "error %d: failed to map resource %pR\n", ret, pp->io); - continue; - } break; case IORESOURCE_MEM: pp->mem = win->res; @@ -483,8 +485,6 @@ int dw_pcie_host_init(struct pcie_port *pp) case IORESOURCE_BUS: pp->busn = win->res; break; - default: - continue; } } @@ -493,7 +493,8 @@ int dw_pcie_host_init(struct pcie_port *pp) resource_size(pp->cfg)); if (!pp->dbi_base) { dev_err(pp->dev, "error with ioremap\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } } @@ -504,7 +505,8 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg0_size); if (!pp->va_cfg0_base) { dev_err(pp->dev, "error with ioremap in function\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } } @@ -513,7 +515,8 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg1_size); if (!pp->va_cfg1_base) { dev_err(pp->dev, "error with ioremap\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } } @@ -528,7 +531,8 @@ int dw_pcie_host_init(struct pcie_port *pp) &dw_pcie_msi_chip); if (!pp->irq_domain) { dev_err(pp->dev, "irq domain init failed\n"); - return -ENXIO; + ret = -ENXIO; + goto error; } for (i = 0; i < MAX_MSI_IRQS; i++) @@ -536,7 +540,7 @@ int dw_pcie_host_init(struct pcie_port *pp) } else { ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); if (ret < 0) - return ret; + goto error; } } @@ -552,8 +556,10 @@ int dw_pcie_host_init(struct pcie_port *pp) } else bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, pp, &res); - if (!bus) - return -ENOMEM; + if (!bus) { + ret = -ENOMEM; + goto error; + } if (pp->ops->scan_bus) pp->ops->scan_bus(pp); @@ -571,6 +577,10 @@ int dw_pcie_host_init(struct pcie_port *pp) pci_bus_add_devices(bus); return 0; + +error: + pci_free_resource_list(&res); + return ret; } static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 3e98d4edae2d..7ee9dfcc45fb 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -12,7 +12,7 @@ * published by the Free Software Foundation. */ #include <linux/interrupt.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/mfd/syscon.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -235,9 +235,6 @@ static const struct of_device_id hisi_pcie_of_match[] = { {}, }; - -MODULE_DEVICE_TABLE(of, hisi_pcie_of_match); - static struct platform_driver hisi_pcie_driver = { .probe = hisi_pcie_probe, .driver = { @@ -245,10 +242,4 @@ static struct platform_driver hisi_pcie_driver = { .of_match_table = hisi_pcie_of_match, }, }; - -module_platform_driver(hisi_pcie_driver); - -MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); -MODULE_AUTHOR("Dacai Zhu <zhudacai@hisilicon.com>"); -MODULE_AUTHOR("Gabriele Paoloni <gabriele.paoloni@huawei.com>"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(hisi_pcie_driver); diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index a576aeeb22da..e167b2f0098d 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -462,6 +462,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (!pcie || !pcie->dev || !pcie->base) return -EINVAL; + ret = devm_request_pci_bus_resources(pcie->dev, res); + if (ret) + return ret; + ret = phy_init(pcie->phy); if (ret) { dev_err(pcie->dev, "unable to initialize PCIe PHY\n"); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 35092188039b..65db7a221509 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -7,6 +7,8 @@ * arch/sh/drivers/pci/ops-sh7786.c * Copyright (C) 2009 - 2011 Paul Mundt * + * Author: Phil Edworthy <phil.edworthy@renesas.com> + * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. @@ -18,7 +20,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -936,12 +938,6 @@ static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, {}, }; -MODULE_DEVICE_TABLE(of, rcar_pcie_of_match); - -static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci) -{ - pci_free_resource_list(&pci->resources); -} static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) { @@ -955,37 +951,25 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) if (err) return err; + err = devm_request_pci_bus_resources(dev, &pci->resources); + if (err) + goto out_release_res; + resource_list_for_each_entry(win, &pci->resources) { - struct resource *parent, *res = win->res; + struct resource *res = win->res; - switch (resource_type(res)) { - case IORESOURCE_IO: - parent = &ioport_resource; + if (resource_type(res) == IORESOURCE_IO) { err = pci_remap_iospace(res, iobase); - if (err) { + if (err) dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); - continue; - } - break; - case IORESOURCE_MEM: - parent = &iomem_resource; - break; - - case IORESOURCE_BUS: - default: - continue; } - - err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; } return 0; out_release_res: - rcar_pcie_release_of_pci_ranges(pci); + pci_free_resource_list(&pci->resources); return err; } @@ -1073,8 +1057,4 @@ static struct platform_driver rcar_pcie_driver = { }, .probe = rcar_pcie_probe, }; -module_platform_driver(rcar_pcie_driver); - -MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>"); -MODULE_DESCRIPTION("Renesas R-Car PCIe driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(rcar_pcie_driver); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 3479d30e2be8..0b597d9190b4 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -825,27 +825,33 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase); if (err) { - pr_err("Getting bridge resources failed\n"); + dev_err(pcie->dev, "Getting bridge resources failed\n"); return err; } + err = devm_request_pci_bus_resources(pcie->dev, &res); + if (err) + goto error; + err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(pcie->dev, "Failed creating IRQ Domain\n"); - return err; + goto error; } bus = pci_create_root_bus(&pdev->dev, pcie->root_busno, &nwl_pcie_ops, pcie, &res); - if (!bus) - return -ENOMEM; + if (!bus) { + err = -ENOMEM; + goto error; + } if (IS_ENABLED(CONFIG_PCI_MSI)) { err = nwl_pcie_enable_msi(pcie, bus); if (err < 0) { dev_err(&pdev->dev, "failed to enable MSI support: %d\n", err); - return err; + goto error; } } pci_scan_child_bus(bus); @@ -855,6 +861,10 @@ static int nwl_pcie_probe(struct platform_device *pdev) pci_bus_add_devices(bus); platform_set_drvdata(pdev, pcie); return 0; + +error: + pci_free_resource_list(&res); + return err; } static int nwl_pcie_remove(struct platform_device *pdev) diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 65f0fe0c2eaf..a30e01639557 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -550,7 +550,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); - return PTR_ERR(pcie_intc_node); + return -ENODEV; } port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, @@ -558,7 +558,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) port); if (!port->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return PTR_ERR(port->irq_domain); + return -ENODEV; } /* Setup MSI */ @@ -569,7 +569,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) &xilinx_pcie_msi_chip); if (!port->irq_domain) { dev_err(dev, "Failed to get a MSI IRQ domain\n"); - return PTR_ERR(port->irq_domain); + return -ENODEV; } xilinx_pcie_enable_msi(port); @@ -660,7 +660,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) struct xilinx_pcie_port *port; struct device *dev = &pdev->dev; struct pci_bus *bus; - int err; resource_size_t iobase = 0; LIST_HEAD(res); @@ -694,10 +693,17 @@ static int xilinx_pcie_probe(struct platform_device *pdev) dev_err(dev, "Getting bridge resources failed\n"); return err; } + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto error; + bus = pci_create_root_bus(&pdev->dev, 0, &xilinx_pcie_ops, port, &res); - if (!bus) - return -ENOMEM; + if (!bus) { + err = -ENOMEM; + goto error; + } #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = port->dev; @@ -712,6 +718,10 @@ static int xilinx_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, port); return 0; + +error: + pci_free_resource_list(&res); + return err; } /** diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index df8caec59789..aadce45a9b4a 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -113,6 +113,19 @@ config HOTPLUG_PCI_SHPC When in doubt, say N. +config HOTPLUG_PCI_POWERNV + tristate "PowerPC PowerNV PCI Hotplug driver" + depends on PPC_POWERNV && EEH + select OF_DYNAMIC + help + Say Y here if you run PowerPC PowerNV platform that supports + PCI Hotplug + + To compile this driver as a module, choose M here: the + module will be called pnv-php. + + When in doubt, say N. + config HOTPLUG_PCI_RPA tristate "RPA PCI Hotplug driver" depends on PPC_PSERIES && EEH diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index b616e7588ff4..e33cdda45a4d 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o +obj-$(CONFIG_HOTPLUG_PCI_POWERNV) += pnv-php.o obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o @@ -50,6 +51,8 @@ ibmphp-objs := ibmphp_core.o \ acpiphp-objs := acpiphp_core.o \ acpiphp_glue.o +pnv-php-objs := pnv_php.o + rpaphp-objs := rpaphp_core.o \ rpaphp_pci.o \ rpaphp_slot.o diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index fa49f9143b80..6a33ddcfa20b 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -675,6 +675,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) if (bridge->is_going_away) return; + pm_runtime_get_sync(&bridge->pci_dev->dev); + list_for_each_entry(slot, &bridge->slots, node) { struct pci_bus *bus = slot->bus; struct pci_dev *dev, *tmp; @@ -694,6 +696,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) disable_slot(slot); } } + + pm_runtime_put(&bridge->pci_dev->dev); } /* diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 5c24e938042f..08e84d61874e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -546,6 +546,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) u8 present; bool link; + /* Interrupts cannot originate from a controller that's asleep */ + if (pdev->current_state == PCI_D3cold) + return IRQ_NONE; + /* * In order to guarantee that all interrupt events are * serviced, we need to re-inspect Slot Status register after diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c new file mode 100644 index 000000000000..e6245b03f0a1 --- /dev/null +++ b/drivers/pci/hotplug/pnv_php.c @@ -0,0 +1,711 @@ +/* + * PCI Hotplug Driver for PowerPC PowerNV platform. + * + * Copyright Gavin Shan, IBM Corporation 2016. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/libfdt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci_hotplug.h> + +#include <asm/opal.h> +#include <asm/pnv-pci.h> +#include <asm/ppc-pci.h> + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" +#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver" + +static LIST_HEAD(pnv_php_slot_list); +static DEFINE_SPINLOCK(pnv_php_lock); + +static void pnv_php_register(struct device_node *dn); +static void pnv_php_unregister_one(struct device_node *dn); +static void pnv_php_unregister(struct device_node *dn); + +static void pnv_php_free_slot(struct kref *kref) +{ + struct pnv_php_slot *php_slot = container_of(kref, + struct pnv_php_slot, kref); + + WARN_ON(!list_empty(&php_slot->children)); + kfree(php_slot->name); + kfree(php_slot); +} + +static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot) +{ + + if (WARN_ON(!php_slot)) + return; + + kref_put(&php_slot->kref, pnv_php_free_slot); +} + +static struct pnv_php_slot *pnv_php_match(struct device_node *dn, + struct pnv_php_slot *php_slot) +{ + struct pnv_php_slot *target, *tmp; + + if (php_slot->dn == dn) { + kref_get(&php_slot->kref); + return php_slot; + } + + list_for_each_entry(tmp, &php_slot->children, link) { + target = pnv_php_match(dn, tmp); + if (target) + return target; + } + + return NULL; +} + +struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn) +{ + struct pnv_php_slot *php_slot, *tmp; + unsigned long flags; + + spin_lock_irqsave(&pnv_php_lock, flags); + list_for_each_entry(tmp, &pnv_php_slot_list, link) { + php_slot = pnv_php_match(dn, tmp); + if (php_slot) { + spin_unlock_irqrestore(&pnv_php_lock, flags); + return php_slot; + } + } + spin_unlock_irqrestore(&pnv_php_lock, flags); + + return NULL; +} +EXPORT_SYMBOL_GPL(pnv_php_find_slot); + +/* + * Remove pdn for all children of the indicated device node. + * The function should remove pdn in a depth-first manner. + */ +static void pnv_php_rmv_pdns(struct device_node *dn) +{ + struct device_node *child; + + for_each_child_of_node(dn, child) { + pnv_php_rmv_pdns(child); + + pci_remove_device_node_info(child); + } +} + +/* + * Detach all child nodes of the indicated device nodes. The + * function should handle device nodes in depth-first manner. + * + * We should not invoke of_node_release() as the memory for + * individual device node is part of large memory block. The + * large block is allocated from memblock (system bootup) or + * kmalloc() when unflattening the device tree by OF changeset. + * We can not free the large block allocated from memblock. For + * later case, it should be released at once. + */ +static void pnv_php_detach_device_nodes(struct device_node *parent) +{ + struct device_node *dn; + int refcount; + + for_each_child_of_node(parent, dn) { + pnv_php_detach_device_nodes(dn); + + of_node_put(dn); + refcount = atomic_read(&dn->kobj.kref.refcount); + if (unlikely(refcount != 1)) + pr_warn("Invalid refcount %d on <%s>\n", + refcount, of_node_full_name(dn)); + + of_detach_node(dn); + } +} + +static void pnv_php_rmv_devtree(struct pnv_php_slot *php_slot) +{ + pnv_php_rmv_pdns(php_slot->dn); + + /* + * Decrease the refcount if the device nodes were created + * through OF changeset before detaching them. + */ + if (php_slot->fdt) + of_changeset_destroy(&php_slot->ocs); + pnv_php_detach_device_nodes(php_slot->dn); + + if (php_slot->fdt) { + kfree(php_slot->dt); + kfree(php_slot->fdt); + php_slot->dt = NULL; + php_slot->dn->child = NULL; + php_slot->fdt = NULL; + } +} + +/* + * As the nodes in OF changeset are applied in reverse order, we + * need revert the nodes in advance so that we have correct node + * order after the changeset is applied. + */ +static void pnv_php_reverse_nodes(struct device_node *parent) +{ + struct device_node *child, *next; + + /* In-depth first */ + for_each_child_of_node(parent, child) + pnv_php_reverse_nodes(child); + + /* Reverse the nodes in the child list */ + child = parent->child; + parent->child = NULL; + while (child) { + next = child->sibling; + + child->sibling = parent->child; + parent->child = child; + child = next; + } +} + +static int pnv_php_populate_changeset(struct of_changeset *ocs, + struct device_node *dn) +{ + struct device_node *child; + int ret = 0; + + for_each_child_of_node(dn, child) { + ret = of_changeset_attach_node(ocs, child); + if (unlikely(ret)) + break; + + ret = pnv_php_populate_changeset(ocs, child); + if (unlikely(ret)) + break; + } + + return ret; +} + +static void *pnv_php_add_one_pdn(struct device_node *dn, void *data) +{ + struct pci_controller *hose = (struct pci_controller *)data; + struct pci_dn *pdn; + + pdn = pci_add_device_node_info(hose, dn); + if (unlikely(!pdn)) + return ERR_PTR(-ENOMEM); + + return NULL; +} + +static void pnv_php_add_pdns(struct pnv_php_slot *slot) +{ + struct pci_controller *hose = pci_bus_to_host(slot->bus); + + pci_traverse_device_nodes(slot->dn, pnv_php_add_one_pdn, hose); +} + +static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) +{ + void *fdt, *fdt1, *dt; + int ret; + + /* We don't know the FDT blob size. We try to get it through + * maximal memory chunk and then copy it to another chunk that + * fits the real size. + */ + fdt1 = kzalloc(0x10000, GFP_KERNEL); + if (unlikely(!fdt1)) { + ret = -ENOMEM; + dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n"); + goto out; + } + + ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000); + if (unlikely(ret)) { + dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n", + ret); + goto free_fdt1; + } + + fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL); + if (unlikely(!fdt)) { + ret = -ENOMEM; + dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n", + fdt_totalsize(fdt1)); + goto free_fdt1; + } + + /* Unflatten device tree blob */ + memcpy(fdt, fdt1, fdt_totalsize(fdt1)); + dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL); + if (unlikely(!dt)) { + ret = -EINVAL; + dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n"); + goto free_fdt; + } + + /* Initialize and apply the changeset */ + of_changeset_init(&php_slot->ocs); + pnv_php_reverse_nodes(php_slot->dn); + ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn); + if (unlikely(ret)) { + pnv_php_reverse_nodes(php_slot->dn); + dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n", + ret); + goto free_dt; + } + + php_slot->dn->child = NULL; + ret = of_changeset_apply(&php_slot->ocs); + if (unlikely(ret)) { + dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n", + ret); + goto destroy_changeset; + } + + /* Add device node firmware data */ + pnv_php_add_pdns(php_slot); + php_slot->fdt = fdt; + php_slot->dt = dt; + kfree(fdt1); + goto out; + +destroy_changeset: + of_changeset_destroy(&php_slot->ocs); +free_dt: + kfree(dt); + php_slot->dn->child = NULL; +free_fdt: + kfree(fdt); +free_fdt1: + kfree(fdt1); +out: + return ret; +} + +int pnv_php_set_slot_power_state(struct hotplug_slot *slot, + uint8_t state) +{ + struct pnv_php_slot *php_slot = slot->private; + struct opal_msg msg; + int ret; + + ret = pnv_pci_set_power_state(php_slot->id, state, &msg); + if (likely(ret > 0)) { + if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle || + be64_to_cpu(msg.params[2]) != state || + be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) { + dev_warn(&php_slot->pdev->dev, "Wrong msg (%lld, %lld, %lld)\n", + be64_to_cpu(msg.params[1]), + be64_to_cpu(msg.params[2]), + be64_to_cpu(msg.params[3])); + return -ENOMSG; + } + } else if (unlikely(ret < 0)) { + dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n", + ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off"); + return ret; + } + + if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE) + pnv_php_rmv_devtree(php_slot); + else + ret = pnv_php_add_devtree(php_slot); + + return ret; +} +EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state); + +static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state) +{ + struct pnv_php_slot *php_slot = slot->private; + uint8_t power_state = OPAL_PCI_SLOT_POWER_ON; + int ret; + + /* + * Retrieve power status from firmware. If we fail + * getting that, the power status fails back to + * be on. + */ + ret = pnv_pci_get_power_state(php_slot->id, &power_state); + if (unlikely(ret)) { + dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n", + ret); + } else { + *state = power_state; + slot->info->power_status = power_state; + } + + return 0; +} + +static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) +{ + struct pnv_php_slot *php_slot = slot->private; + uint8_t presence = OPAL_PCI_SLOT_EMPTY; + int ret; + + /* + * Retrieve presence status from firmware. If we can't + * get that, it will fail back to be empty. + */ + ret = pnv_pci_get_presence_state(php_slot->id, &presence); + if (likely(ret >= 0)) { + *state = presence; + slot->info->adapter_status = presence; + ret = 0; + } else { + dev_warn(&php_slot->pdev->dev, "Error %d getting presence\n", + ret); + } + + return ret; +} + +static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) +{ + /* FIXME: Make it real once firmware supports it */ + slot->info->attention_status = state; + + return 0; +} + +static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) +{ + struct hotplug_slot *slot = &php_slot->slot; + uint8_t presence = OPAL_PCI_SLOT_EMPTY; + uint8_t power_status = OPAL_PCI_SLOT_POWER_ON; + int ret; + + /* Check if the slot has been configured */ + if (php_slot->state != PNV_PHP_STATE_REGISTERED) + return 0; + + /* Retrieve slot presence status */ + ret = pnv_php_get_adapter_state(slot, &presence); + if (unlikely(ret)) + return ret; + + /* Proceed if there have nothing behind the slot */ + if (presence == OPAL_PCI_SLOT_EMPTY) + goto scan; + + /* + * If the power supply to the slot is off, we can't detect + * adapter presence state. That means we have to turn the + * slot on before going to probe slot's presence state. + * + * On the first time, we don't change the power status to + * boost system boot with assumption that the firmware + * supplies consistent slot power status: empty slot always + * has its power off and non-empty slot has its power on. + */ + if (!php_slot->power_state_check) { + php_slot->power_state_check = true; + + ret = pnv_php_get_power_state(slot, &power_status); + if (unlikely(ret)) + return ret; + + if (power_status != OPAL_PCI_SLOT_POWER_ON) + return 0; + } + + /* Check the power status. Scan the slot if it is already on */ + ret = pnv_php_get_power_state(slot, &power_status); + if (unlikely(ret)) + return ret; + + if (power_status == OPAL_PCI_SLOT_POWER_ON) + goto scan; + + /* Power is off, turn it on and then scan the slot */ + ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON); + if (unlikely(ret)) + return ret; + +scan: + if (presence == OPAL_PCI_SLOT_PRESENT) { + if (rescan) { + pci_lock_rescan_remove(); + pci_hp_add_devices(php_slot->bus); + pci_unlock_rescan_remove(); + } + + /* Rescan for child hotpluggable slots */ + php_slot->state = PNV_PHP_STATE_POPULATED; + if (rescan) + pnv_php_register(php_slot->dn); + } else { + php_slot->state = PNV_PHP_STATE_POPULATED; + } + + return 0; +} + +static int pnv_php_enable_slot(struct hotplug_slot *slot) +{ + struct pnv_php_slot *php_slot = container_of(slot, + struct pnv_php_slot, slot); + + return pnv_php_enable(php_slot, true); +} + +static int pnv_php_disable_slot(struct hotplug_slot *slot) +{ + struct pnv_php_slot *php_slot = slot->private; + int ret; + + if (php_slot->state != PNV_PHP_STATE_POPULATED) + return 0; + + /* Remove all devices behind the slot */ + pci_lock_rescan_remove(); + pci_hp_remove_devices(php_slot->bus); + pci_unlock_rescan_remove(); + + /* Detach the child hotpluggable slots */ + pnv_php_unregister(php_slot->dn); + + /* Notify firmware and remove device nodes */ + ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_OFF); + + php_slot->state = PNV_PHP_STATE_REGISTERED; + return ret; +} + +static struct hotplug_slot_ops php_slot_ops = { + .get_power_status = pnv_php_get_power_state, + .get_adapter_status = pnv_php_get_adapter_state, + .set_attention_status = pnv_php_set_attention_state, + .enable_slot = pnv_php_enable_slot, + .disable_slot = pnv_php_disable_slot, +}; + +static void pnv_php_release(struct hotplug_slot *slot) +{ + struct pnv_php_slot *php_slot = slot->private; + unsigned long flags; + + /* Remove from global or child list */ + spin_lock_irqsave(&pnv_php_lock, flags); + list_del(&php_slot->link); + spin_unlock_irqrestore(&pnv_php_lock, flags); + + /* Detach from parent */ + pnv_php_put_slot(php_slot); + pnv_php_put_slot(php_slot->parent); +} + +static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn) +{ + struct pnv_php_slot *php_slot; + struct pci_bus *bus; + const char *label; + uint64_t id; + + label = of_get_property(dn, "ibm,slot-label", NULL); + if (unlikely(!label)) + return NULL; + + if (pnv_pci_get_slot_id(dn, &id)) + return NULL; + + bus = pci_find_bus_by_node(dn); + if (unlikely(!bus)) + return NULL; + + php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL); + if (unlikely(!php_slot)) + return NULL; + + php_slot->name = kstrdup(label, GFP_KERNEL); + if (unlikely(!php_slot->name)) { + kfree(php_slot); + return NULL; + } + + if (likely(dn->child && PCI_DN(dn->child))) + php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn); + else + php_slot->slot_no = -1; /* Placeholder slot */ + + kref_init(&php_slot->kref); + php_slot->state = PNV_PHP_STATE_INITIALIZED; + php_slot->dn = dn; + php_slot->pdev = bus->self; + php_slot->bus = bus; + php_slot->id = id; + php_slot->power_state_check = false; + php_slot->slot.ops = &php_slot_ops; + php_slot->slot.info = &php_slot->slot_info; + php_slot->slot.release = pnv_php_release; + php_slot->slot.private = php_slot; + + INIT_LIST_HEAD(&php_slot->children); + INIT_LIST_HEAD(&php_slot->link); + + return php_slot; +} + +static int pnv_php_register_slot(struct pnv_php_slot *php_slot) +{ + struct pnv_php_slot *parent; + struct device_node *dn = php_slot->dn; + unsigned long flags; + int ret; + + /* Check if the slot is registered or not */ + parent = pnv_php_find_slot(php_slot->dn); + if (unlikely(parent)) { + pnv_php_put_slot(parent); + return -EEXIST; + } + + /* Register PCI slot */ + ret = pci_hp_register(&php_slot->slot, php_slot->bus, + php_slot->slot_no, php_slot->name); + if (unlikely(ret)) { + dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n", + ret); + return ret; + } + + /* Attach to the parent's child list or global list */ + while ((dn = of_get_parent(dn))) { + if (!PCI_DN(dn)) { + of_node_put(dn); + break; + } + + parent = pnv_php_find_slot(dn); + if (parent) { + of_node_put(dn); + break; + } + + of_node_put(dn); + } + + spin_lock_irqsave(&pnv_php_lock, flags); + php_slot->parent = parent; + if (parent) + list_add_tail(&php_slot->link, &parent->children); + else + list_add_tail(&php_slot->link, &pnv_php_slot_list); + spin_unlock_irqrestore(&pnv_php_lock, flags); + + php_slot->state = PNV_PHP_STATE_REGISTERED; + return 0; +} + +static int pnv_php_register_one(struct device_node *dn) +{ + struct pnv_php_slot *php_slot; + const __be32 *prop32; + int ret; + + /* Check if it's hotpluggable slot */ + prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL); + if (!prop32 || !of_read_number(prop32, 1)) + return -ENXIO; + + prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL); + if (!prop32 || !of_read_number(prop32, 1)) + return -ENXIO; + + php_slot = pnv_php_alloc_slot(dn); + if (unlikely(!php_slot)) + return -ENODEV; + + ret = pnv_php_register_slot(php_slot); + if (unlikely(ret)) + goto free_slot; + + ret = pnv_php_enable(php_slot, false); + if (unlikely(ret)) + goto unregister_slot; + + return 0; + +unregister_slot: + pnv_php_unregister_one(php_slot->dn); +free_slot: + pnv_php_put_slot(php_slot); + return ret; +} + +static void pnv_php_register(struct device_node *dn) +{ + struct device_node *child; + + /* + * The parent slots should be registered before their + * child slots. + */ + for_each_child_of_node(dn, child) { + pnv_php_register_one(child); + pnv_php_register(child); + } +} + +static void pnv_php_unregister_one(struct device_node *dn) +{ + struct pnv_php_slot *php_slot; + + php_slot = pnv_php_find_slot(dn); + if (!php_slot) + return; + + php_slot->state = PNV_PHP_STATE_OFFLINE; + pnv_php_put_slot(php_slot); + pci_hp_deregister(&php_slot->slot); +} + +static void pnv_php_unregister(struct device_node *dn) +{ + struct device_node *child; + + /* The child slots should go before their parent slots */ + for_each_child_of_node(dn, child) { + pnv_php_unregister(child); + pnv_php_unregister_one(child); + } +} + +static int __init pnv_php_init(void) +{ + struct device_node *dn; + + pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); + for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") + pnv_php_register(dn); + + return 0; +} + +static void __exit pnv_php_exit(void) +{ + struct device_node *dn; + + for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") + pnv_php_unregister(dn); +} + +module_init(pnv_php_init); +module_exit(pnv_php_exit); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 6937c725b00b..388c4d8fcdd1 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -117,8 +117,10 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot); int rpaphp_register_slot(struct slot *slot) { struct hotplug_slot *php_slot = slot->hotplug_slot; + struct device_node *child; + u32 my_index; int retval; - int slotno; + int slotno = -1; dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, @@ -130,10 +132,15 @@ int rpaphp_register_slot(struct slot *slot) return -EAGAIN; } - if (slot->dn->child) - slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); - else - slotno = -1; + for_each_child_of_node(slot->dn, child) { + retval = of_property_read_u32(child, "ibm,my-drc-index", &my_index); + if (my_index == slot->index) { + slotno = PCI_SLOT(PCI_DN(child)->devfn); + of_node_put(child); + break; + } + } + retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name); if (retval) { err("pci_hp_register failed with error %d\n", retval); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index a080f4496fe2..a02981efdad5 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -4,6 +4,7 @@ * * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + * Copyright (C) 2016 Christoph Hellwig. */ #include <linux/err.h> @@ -207,6 +208,12 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } +static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +{ + return desc->mask_base + + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; +} + /* * This internal function does not flush PCI writes to the device. * All users must ensure that they read from the device before either @@ -217,8 +224,6 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; - unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; if (pci_msi_ignore_mask) return 0; @@ -226,7 +231,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; if (flag) mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; - writel(mask_bits, desc->mask_base + offset); + writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); return mask_bits; } @@ -284,8 +289,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) BUG_ON(dev->current_state != PCI_D0); if (entry->msi_attrib.is_msix) { - void __iomem *base = entry->mask_base + - entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; + void __iomem *base = pci_msix_desc_addr(entry); msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); @@ -315,9 +319,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) if (dev->current_state != PCI_D0) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { - void __iomem *base; - base = entry->mask_base + - entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; + void __iomem *base = pci_msix_desc_addr(entry); writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); @@ -567,6 +569,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); entry->nvec_used = nvec; + entry->affinity = dev->irq_affinity; if (control & PCI_MSI_FLAGS_64BIT) entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; @@ -678,10 +681,18 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec) { + const struct cpumask *mask = NULL; struct msi_desc *entry; - int i; + int cpu = -1, i; for (i = 0; i < nvec; i++) { + if (dev->irq_affinity) { + cpu = cpumask_next(cpu, dev->irq_affinity); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(dev->irq_affinity); + mask = cpumask_of(cpu); + } + entry = alloc_msi_entry(&dev->dev); if (!entry) { if (!i) @@ -694,10 +705,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.is_msix = 1; entry->msi_attrib.is_64 = 1; - entry->msi_attrib.entry_nr = entries[i].entry; + if (entries) + entry->msi_attrib.entry_nr = entries[i].entry; + else + entry->msi_attrib.entry_nr = i; entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; entry->nvec_used = 1; + entry->affinity = mask; list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); } @@ -712,13 +727,11 @@ static void msix_program_entries(struct pci_dev *dev, int i = 0; for_each_pci_msi_entry(entry, dev) { - int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; - - entries[i].vector = entry->irq; - entry->masked = readl(entry->mask_base + offset); + if (entries) + entries[i++].vector = entry->irq; + entry->masked = readl(pci_msix_desc_addr(entry) + + PCI_MSIX_ENTRY_VECTOR_CTRL); msix_mask_irq(entry, 1); - i++; } } @@ -931,7 +944,7 @@ EXPORT_SYMBOL(pci_msix_vec_count); /** * pci_enable_msix - configure device's MSI-X capability structure * @dev: pointer to the pci_dev data structure of MSI-X device function - * @entries: pointer to an array of MSI-X entries + * @entries: pointer to an array of MSI-X entries (optional) * @nvec: number of MSI-X irqs requested for allocation by device driver * * Setup the MSI-X capability structure of device function with the number @@ -951,22 +964,21 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) if (!pci_msi_supported(dev, nvec)) return -EINVAL; - if (!entries) - return -EINVAL; - nr_entries = pci_msix_vec_count(dev); if (nr_entries < 0) return nr_entries; if (nvec > nr_entries) return nr_entries; - /* Check for any invalid entries */ - for (i = 0; i < nvec; i++) { - if (entries[i].entry >= nr_entries) - return -EINVAL; /* invalid entry */ - for (j = i + 1; j < nvec; j++) { - if (entries[i].entry == entries[j].entry) - return -EINVAL; /* duplicate entry */ + if (entries) { + /* Check for any invalid entries */ + for (i = 0; i < nvec; i++) { + if (entries[i].entry >= nr_entries) + return -EINVAL; /* invalid entry */ + for (j = i + 1; j < nvec; j++) { + if (entries[i].entry == entries[j].entry) + return -EINVAL; /* duplicate entry */ + } } } WARN_ON(!!dev->msix_enabled); @@ -1026,19 +1038,8 @@ int pci_msi_enabled(void) } EXPORT_SYMBOL(pci_msi_enabled); -/** - * pci_enable_msi_range - configure device's MSI capability structure - * @dev: device to configure - * @minvec: minimal number of interrupts to configure - * @maxvec: maximum number of interrupts to configure - * - * This function tries to allocate a maximum possible number of interrupts in a - * range between @minvec and @maxvec. It returns a negative errno if an error - * occurs. If it succeeds, it returns the actual number of interrupts allocated - * and updates the @dev's irq member to the lowest new interrupt number; - * the other interrupt numbers allocated to this device are consecutive. - **/ -int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, + unsigned int flags) { int nvec; int rc; @@ -1061,25 +1062,85 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) nvec = pci_msi_vec_count(dev); if (nvec < 0) return nvec; - else if (nvec < minvec) + if (nvec < minvec) return -EINVAL; - else if (nvec > maxvec) + + if (nvec > maxvec) nvec = maxvec; - do { + for (;;) { + if (!(flags & PCI_IRQ_NOAFFINITY)) { + dev->irq_affinity = irq_create_affinity_mask(&nvec); + if (nvec < minvec) + return -ENOSPC; + } + rc = msi_capability_init(dev, nvec); - if (rc < 0) { + if (rc == 0) + return nvec; + + kfree(dev->irq_affinity); + dev->irq_affinity = NULL; + + if (rc < 0) return rc; - } else if (rc > 0) { - if (rc < minvec) + if (rc < minvec) + return -ENOSPC; + + nvec = rc; + } +} + +/** + * pci_enable_msi_range - configure device's MSI capability structure + * @dev: device to configure + * @minvec: minimal number of interrupts to configure + * @maxvec: maximum number of interrupts to configure + * + * This function tries to allocate a maximum possible number of interrupts in a + * range between @minvec and @maxvec. It returns a negative errno if an error + * occurs. If it succeeds, it returns the actual number of interrupts allocated + * and updates the @dev's irq member to the lowest new interrupt number; + * the other interrupt numbers allocated to this device are consecutive. + **/ +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY); +} +EXPORT_SYMBOL(pci_enable_msi_range); + +static int __pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, int minvec, int maxvec, + unsigned int flags) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + for (;;) { + if (!(flags & PCI_IRQ_NOAFFINITY)) { + dev->irq_affinity = irq_create_affinity_mask(&nvec); + if (nvec < minvec) return -ENOSPC; - nvec = rc; } - } while (rc); - return nvec; + rc = pci_enable_msix(dev, entries, nvec); + if (rc == 0) + return nvec; + + kfree(dev->irq_affinity); + dev->irq_affinity = NULL; + + if (rc < 0) + return rc; + if (rc < minvec) + return -ENOSPC; + + nvec = rc; + } } -EXPORT_SYMBOL(pci_enable_msi_range); /** * pci_enable_msix_range - configure device's MSI-X capability structure @@ -1097,28 +1158,101 @@ EXPORT_SYMBOL(pci_enable_msi_range); * with new allocated MSI-X interrupts. **/ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, - int minvec, int maxvec) + int minvec, int maxvec) { - int nvec = maxvec; - int rc; + return __pci_enable_msix_range(dev, entries, minvec, maxvec, + PCI_IRQ_NOAFFINITY); +} +EXPORT_SYMBOL(pci_enable_msix_range); - if (maxvec < minvec) - return -ERANGE; +/** + * pci_alloc_irq_vectors - allocate multiple IRQs for a device + * @dev: PCI device to operate on + * @min_vecs: minimum number of vectors required (must be >= 1) + * @max_vecs: maximum (desired) number of vectors + * @flags: flags or quirks for the allocation + * + * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI + * vectors if available, and fall back to a single legacy vector + * if neither is available. Return the number of vectors allocated, + * (which might be smaller than @max_vecs) if successful, or a negative + * error code on error. If less than @min_vecs interrupt vectors are + * available for @dev the function will fail with -ENOSPC. + * + * To get the Linux IRQ number used for a vector that can be passed to + * request_irq() use the pci_irq_vector() helper. + */ +int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + int vecs = -ENOSPC; - do { - rc = pci_enable_msix(dev, entries, nvec); - if (rc < 0) { - return rc; - } else if (rc > 0) { - if (rc < minvec) - return -ENOSPC; - nvec = rc; + if (!(flags & PCI_IRQ_NOMSIX)) { + vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, + flags); + if (vecs > 0) + return vecs; + } + + if (!(flags & PCI_IRQ_NOMSI)) { + vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); + if (vecs > 0) + return vecs; + } + + /* use legacy irq if allowed */ + if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1) + return 1; + return vecs; +} +EXPORT_SYMBOL(pci_alloc_irq_vectors); + +/** + * pci_free_irq_vectors - free previously allocated IRQs for a device + * @dev: PCI device to operate on + * + * Undoes the allocations and enabling in pci_alloc_irq_vectors(). + */ +void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msix(dev); + pci_disable_msi(dev); +} +EXPORT_SYMBOL(pci_free_irq_vectors); + +/** + * pci_irq_vector - return Linux IRQ number of a device vector + * @dev: PCI device to operate on + * @nr: device-relative interrupt vector index (0-based). + */ +int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (dev->msix_enabled) { + struct msi_desc *entry; + int i = 0; + + for_each_pci_msi_entry(entry, dev) { + if (i == nr) + return entry->irq; + i++; } - } while (rc); + WARN_ON_ONCE(1); + return -EINVAL; + } - return nvec; + if (dev->msi_enabled) { + struct msi_desc *entry = first_pci_msi_entry(dev); + + if (WARN_ON_ONCE(nr >= entry->nvec_used)) + return -EINVAL; + } else { + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + } + + return dev->irq + nr; } -EXPORT_SYMBOL(pci_enable_msix_range); +EXPORT_SYMBOL(pci_irq_vector); struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d7ffd66814bb..e39a67c8ef39 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -777,7 +777,7 @@ static int pci_pm_suspend_noirq(struct device *dev) if (!pci_dev->state_saved) { pci_save_state(pci_dev); - if (!pci_has_subordinate(pci_dev)) + if (pci_power_manageable(pci_dev)) pci_prepare_to_sleep(pci_dev); } @@ -1144,7 +1144,6 @@ static int pci_pm_runtime_suspend(struct device *dev) return -ENOSYS; pci_dev->state_saved = false; - pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); if (error) { /* @@ -1161,8 +1160,6 @@ static int pci_pm_runtime_suspend(struct device *dev) return error; } - if (!pci_dev->d3cold_allowed) - pci_dev->no_d3cold = true; pci_fixup_device(pci_fixup_suspend, pci_dev); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index d319a9ca9b7b..bcd10c795284 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -406,6 +406,11 @@ static ssize_t d3cold_allowed_store(struct device *dev, return -EINVAL; pdev->d3cold_allowed = !!val; + if (pdev->d3cold_allowed) + pci_d3cold_enable(pdev); + else + pci_d3cold_disable(pdev); + pm_runtime_resume(dev); return count; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index badbddc683f0..aab9d5115a5f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -7,8 +7,10 @@ * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> */ +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_pci.h> @@ -25,7 +27,9 @@ #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> +#include <linux/vmalloc.h> #include <asm/setup.h> +#include <asm/dma.h> #include <linux/aer.h> #include "pci.h" @@ -81,6 +85,9 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; +#define DEFAULT_HOTPLUG_BUS_SIZE 1 +unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; + enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; /* @@ -101,6 +108,21 @@ unsigned int pcibios_max_latency = 255; /* If set, the PCIe ARI capability will not be used. */ static bool pcie_ari_disabled; +/* Disable bridge_d3 for all PCIe ports */ +static bool pci_bridge_d3_disable; +/* Force bridge_d3 for all PCIe ports */ +static bool pci_bridge_d3_force; + +static int __init pcie_port_pm_setup(char *str) +{ + if (!strcmp(str, "off")) + pci_bridge_d3_disable = true; + else if (!strcmp(str, "force")) + pci_bridge_d3_force = true; + return 1; +} +__setup("pcie_port_pm=", pcie_port_pm_setup); + /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children * @bus: pointer to PCI bus structure to search @@ -2156,6 +2178,164 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) } /** + * pci_bridge_d3_possible - Is it possible to put the bridge into D3 + * @bridge: Bridge to check + * + * This function checks if it is possible to move the bridge to D3. + * Currently we only allow D3 for recent enough PCIe ports. + */ +static bool pci_bridge_d3_possible(struct pci_dev *bridge) +{ + unsigned int year; + + if (!pci_is_pcie(bridge)) + return false; + + switch (pci_pcie_type(bridge)) { + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + if (pci_bridge_d3_disable) + return false; + if (pci_bridge_d3_force) + return true; + + /* + * It should be safe to put PCIe ports from 2015 or newer + * to D3. + */ + if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && + year >= 2015) { + return true; + } + break; + } + + return false; +} + +static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) +{ + bool *d3cold_ok = data; + bool no_d3cold; + + /* + * The device needs to be allowed to go D3cold and if it is wake + * capable to do so from D3cold. + */ + no_d3cold = dev->no_d3cold || !dev->d3cold_allowed || + (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) || + !pci_power_manageable(dev); + + *d3cold_ok = !no_d3cold; + + return no_d3cold; +} + +/* + * pci_bridge_d3_update - Update bridge D3 capabilities + * @dev: PCI device which is changed + * @remove: Is the device being removed + * + * Update upstream bridge PM capabilities accordingly depending on if the + * device PM configuration was changed or the device is being removed. The + * change is also propagated upstream. + */ +static void pci_bridge_d3_update(struct pci_dev *dev, bool remove) +{ + struct pci_dev *bridge; + bool d3cold_ok = true; + + bridge = pci_upstream_bridge(dev); + if (!bridge || !pci_bridge_d3_possible(bridge)) + return; + + pci_dev_get(bridge); + /* + * If the device is removed we do not care about its D3cold + * capabilities. + */ + if (!remove) + pci_dev_check_d3cold(dev, &d3cold_ok); + + if (d3cold_ok) { + /* + * We need to go through all children to find out if all of + * them can still go to D3cold. + */ + pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, + &d3cold_ok); + } + + if (bridge->bridge_d3 != d3cold_ok) { + bridge->bridge_d3 = d3cold_ok; + /* Propagate change to upstream bridges */ + pci_bridge_d3_update(bridge, false); + } + + pci_dev_put(bridge); +} + +/** + * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change + * @dev: PCI device that was changed + * + * If a device is added or its PM configuration, such as is it allowed to + * enter D3cold, is changed this function updates upstream bridge PM + * capabilities accordingly. + */ +void pci_bridge_d3_device_changed(struct pci_dev *dev) +{ + pci_bridge_d3_update(dev, false); +} + +/** + * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove + * @dev: PCI device being removed + * + * Function updates upstream bridge PM capabilities based on other devices + * still left on the bus. + */ +void pci_bridge_d3_device_removed(struct pci_dev *dev) +{ + pci_bridge_d3_update(dev, true); +} + +/** + * pci_d3cold_enable - Enable D3cold for device + * @dev: PCI device to handle + * + * This function can be used in drivers to enable D3cold from the device + * they handle. It also updates upstream PCI bridge PM capabilities + * accordingly. + */ +void pci_d3cold_enable(struct pci_dev *dev) +{ + if (dev->no_d3cold) { + dev->no_d3cold = false; + pci_bridge_d3_device_changed(dev); + } +} +EXPORT_SYMBOL_GPL(pci_d3cold_enable); + +/** + * pci_d3cold_disable - Disable D3cold for device + * @dev: PCI device to handle + * + * This function can be used in drivers to disable D3cold from the device + * they handle. It also updates upstream PCI bridge PM capabilities + * accordingly. + */ +void pci_d3cold_disable(struct pci_dev *dev) +{ + if (!dev->no_d3cold) { + dev->no_d3cold = true; + pci_bridge_d3_device_changed(dev); + } +} +EXPORT_SYMBOL_GPL(pci_d3cold_disable); + +/** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. */ @@ -2189,6 +2369,7 @@ void pci_pm_init(struct pci_dev *dev) dev->pm_cap = pm; dev->d3_delay = PCI_PM_D3_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; + dev->bridge_d3 = pci_bridge_d3_possible(dev); dev->d3cold_allowed = true; dev->d1_support = false; @@ -3165,6 +3346,23 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) #endif } +/** + * pci_unmap_iospace - Unmap the memory mapped I/O space + * @res: resource to be unmapped + * + * Unmap the CPU virtual address @res from virtual address space. + * Only architectures that have memory mapped IO functions defined + * (and the PCI_IOBASE value defined) should call this function. + */ +void pci_unmap_iospace(struct resource *res) +{ +#if defined(PCI_IOBASE) && defined(CONFIG_MMU) + unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; + + unmap_kernel_range(vaddr, resource_size(res)); +#endif +} + static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; @@ -4755,6 +4953,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock); static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) { int seg, bus, slot, func, align_order, count; + unsigned short vendor, device, subsystem_vendor, subsystem_device; resource_size_t align = 0; char *p; @@ -4768,28 +4967,55 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) } else { align_order = -1; } - if (sscanf(p, "%x:%x:%x.%x%n", - &seg, &bus, &slot, &func, &count) != 4) { - seg = 0; - if (sscanf(p, "%x:%x.%x%n", - &bus, &slot, &func, &count) != 3) { - /* Invalid format */ - printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", - p); + if (strncmp(p, "pci:", 4) == 0) { + /* PCI vendor/device (subvendor/subdevice) ids are specified */ + p += 4; + if (sscanf(p, "%hx:%hx:%hx:%hx%n", + &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) { + if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) { + printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n", + p); + break; + } + subsystem_vendor = subsystem_device = 0; + } + p += count; + if ((!vendor || (vendor == dev->vendor)) && + (!device || (device == dev->device)) && + (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && + (!subsystem_device || (subsystem_device == dev->subsystem_device))) { + if (align_order == -1) + align = PAGE_SIZE; + else + align = 1 << align_order; + /* Found */ break; } } - p += count; - if (seg == pci_domain_nr(dev->bus) && - bus == dev->bus->number && - slot == PCI_SLOT(dev->devfn) && - func == PCI_FUNC(dev->devfn)) { - if (align_order == -1) - align = PAGE_SIZE; - else - align = 1 << align_order; - /* Found */ - break; + else { + if (sscanf(p, "%x:%x:%x.%x%n", + &seg, &bus, &slot, &func, &count) != 4) { + seg = 0; + if (sscanf(p, "%x:%x.%x%n", + &bus, &slot, &func, &count) != 3) { + /* Invalid format */ + printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", + p); + break; + } + } + p += count; + if (seg == pci_domain_nr(dev->bus) && + bus == dev->bus->number && + slot == PCI_SLOT(dev->devfn) && + func == PCI_FUNC(dev->devfn)) { + if (align_order == -1) + align = PAGE_SIZE; + else + align = 1 << align_order; + /* Found */ + break; + } } if (*p != ';' && *p != ',') { /* End of param or invalid format */ @@ -4897,7 +5123,7 @@ static ssize_t pci_resource_alignment_store(struct bus_type *bus, return pci_set_resource_alignment_param(buf, count); } -BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, +static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, pci_resource_alignment_store); static int __init pci_resource_alignment_sysfs_init(void) @@ -4923,7 +5149,7 @@ int pci_get_new_domain_nr(void) } #ifdef CONFIG_PCI_DOMAINS_GENERIC -void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) +static int of_pci_bus_find_domain_nr(struct device *parent) { static int use_dt_domains = -1; int domain = -1; @@ -4967,7 +5193,13 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) domain = -1; } - bus->domain_nr = domain; + return domain; +} + +int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) +{ + return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : + acpi_pci_bus_find_domain_nr(bus); } #endif #endif @@ -5021,6 +5253,11 @@ static int __init pci_setup(char *str) pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mem_size = memparse(str + 10, &str); + } else if (!strncmp(str, "hpbussize=", 10)) { + pci_hotplug_bus_size = + simple_strtoul(str + 10, &str, 0); + if (pci_hotplug_bus_size > 0xff) + pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { pcie_bus_config = PCIE_BUS_TUNE_OFF; } else if (!strncmp(str, "pcie_bus_safe", 13)) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a814bbb80fcb..9730c474b016 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -82,6 +82,8 @@ void pci_pm_init(struct pci_dev *dev); void pci_ea_init(struct pci_dev *dev); void pci_allocate_cap_save_buffers(struct pci_dev *dev); void pci_free_cap_save_buffers(struct pci_dev *dev); +void pci_bridge_d3_device_changed(struct pci_dev *dev); +void pci_bridge_d3_device_removed(struct pci_dev *dev); static inline void pci_wakeup_event(struct pci_dev *dev) { @@ -94,6 +96,15 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev) return !!(pci_dev->subordinate); } +static inline bool pci_power_manageable(struct pci_dev *pci_dev) +{ + /* + * Currently we allow normal PCI devices and PCI bridges transition + * into D3 if their bridge_d3 is set. + */ + return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; +} + struct pci_vpd_ops { ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 22ca6412bd15..7fcea75afa4c 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -83,7 +83,7 @@ config PCIE_PME depends on PCIEPORTBUS && PM config PCIE_DPC - tristate "PCIe Downstream Port Containment support" + bool "PCIe Downstream Port Containment support" depends on PCIEPORTBUS default n help @@ -92,6 +92,3 @@ config PCIE_DPC will be handled by the DPC driver. If your system doesn't have this capability or you do not want to use this feature, it is safe to answer N. - - To compile this driver as a module, choose M here: the module - will be called pcie-dpc. diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 2dfe7fdb77e7..0ec649d961d7 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -139,7 +139,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { /* Don't enable Clock PM if the link is not Clock PM capable */ - if (!link->clkpm_capable && enable) + if (!link->clkpm_capable) enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index ab552f1bc08f..250f87861786 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c @@ -15,8 +15,8 @@ struct dpc_dev { struct pcie_device *dev; - struct work_struct work; - int cap_pos; + struct work_struct work; + int cap_pos; }; static void dpc_wait_link_inactive(struct pci_dev *pdev) @@ -89,7 +89,7 @@ static int dpc_probe(struct pcie_device *dev) int status; u16 ctl, cap; - dpc = kzalloc(sizeof(*dpc), GFP_KERNEL); + dpc = devm_kzalloc(&dev->device, sizeof(*dpc), GFP_KERNEL); if (!dpc) return -ENOMEM; @@ -98,11 +98,12 @@ static int dpc_probe(struct pcie_device *dev) INIT_WORK(&dpc->work, interrupt_event_handler); set_service_data(dev, dpc); - status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc); + status = devm_request_irq(&dev->device, dev->irq, dpc_irq, IRQF_SHARED, + "pcie-dpc", dpc); if (status) { dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq, status); - goto out; + return status; } pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); @@ -117,9 +118,6 @@ static int dpc_probe(struct pcie_device *dev) FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); return status; - out: - kfree(dpc); - return status; } static void dpc_remove(struct pcie_device *dev) @@ -131,14 +129,11 @@ static void dpc_remove(struct pcie_device *dev) pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN); pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); - - free_irq(dev->irq, dpc); - kfree(dpc); } static struct pcie_port_service_driver dpcdriver = { .name = "dpc", - .port_type = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM, + .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_DPC, .probe = dpc_probe, .remove = dpc_remove, diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 32d4d0a3d20e..e9270b4026f3 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/pcieport_if.h> @@ -342,6 +343,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) return retval; } + pm_runtime_no_callbacks(device); + return 0; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index be35da2e105e..70d7ad8c6d17 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -93,6 +93,26 @@ static int pcie_port_resume_noirq(struct device *dev) return 0; } +static int pcie_port_runtime_suspend(struct device *dev) +{ + return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY; +} + +static int pcie_port_runtime_resume(struct device *dev) +{ + return 0; +} + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* + * Assume the PCI core has set bridge_d3 whenever it thinks the port + * should be good to go to D3. Everything else, including moving + * the port to D3, is handled by the PCI core. + */ + return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY; +} + static const struct dev_pm_ops pcie_portdrv_pm_ops = { .suspend = pcie_port_device_suspend, .resume = pcie_port_device_resume, @@ -101,6 +121,9 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .poweroff = pcie_port_device_suspend, .restore = pcie_port_device_resume, .resume_noirq = pcie_port_resume_noirq, + .runtime_suspend = pcie_port_runtime_suspend, + .runtime_resume = pcie_port_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -134,16 +157,39 @@ static int pcie_portdrv_probe(struct pci_dev *dev, return status; pci_save_state(dev); + /* - * D3cold may not work properly on some PCIe port, so disable - * it by default. + * Prevent runtime PM if the port is advertising support for PCIe + * hotplug. Otherwise the BIOS hotplug SMI code might not be able + * to enumerate devices behind this port properly (the port is + * powered down preventing all config space accesses to the + * subordinate devices). We can't be sure for native PCIe hotplug + * either so prevent that as well. */ - dev->d3cold_allowed = false; + if (!dev->is_hotplug_bridge) { + /* + * Keep the port resumed 100ms to make sure things like + * config space accesses from userspace (lspci) will not + * cause the port to repeatedly suspend and resume. + */ + pm_runtime_set_autosuspend_delay(&dev->dev, 100); + pm_runtime_use_autosuspend(&dev->dev); + pm_runtime_mark_last_busy(&dev->dev); + pm_runtime_put_autosuspend(&dev->dev); + pm_runtime_allow(&dev->dev); + } + return 0; } static void pcie_portdrv_remove(struct pci_dev *dev) { + if (!dev->is_hotplug_bridge) { + pm_runtime_forbid(&dev->dev); + pm_runtime_get_noresume(&dev->dev); + pm_runtime_dont_use_autosuspend(&dev->dev); + } + pcie_port_device_remove(dev); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8e3ef720997d..93f280df3428 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -16,6 +16,7 @@ #include <linux/aer.h> #include <linux/acpi.h> #include <linux/irqdomain.h> +#include <linux/pm_runtime.h> #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ @@ -832,6 +833,12 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) u8 primary, secondary, subordinate; int broken = 0; + /* + * Make sure the bridge is powered on to be able to access config + * space of devices below it. + */ + pm_runtime_get_sync(&dev->dev); + pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); primary = buses & 0xFF; secondary = (buses >> 8) & 0xFF; @@ -1012,6 +1019,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) out: pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); + pm_runtime_put(&dev->dev); + return max; } EXPORT_SYMBOL(pci_scan_bridge); @@ -2077,6 +2086,15 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) } /* + * Make sure a hotplug bridge has at least the minimum requested + * number of buses. + */ + if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) { + if (max - bus->busn_res.start < pci_hotplug_bus_size - 1) + max = bus->busn_res.start + pci_hotplug_bus_size - 1; + } + + /* * We've scanned the bus and so we know all about what's on * the other side of any bridges that may be on this bus plus * any devices. @@ -2127,7 +2145,9 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, b->sysdata = sysdata; b->ops = ops; b->number = b->busn_res.start = bus; - pci_bus_assign_domain_nr(b, parent); +#ifdef CONFIG_PCI_DOMAINS_GENERIC + b->domain_nr = pci_bus_find_domain_nr(b, parent); +#endif b2 = pci_find_bus(pci_domain_nr(b), bus); if (b2) { /* If we already got to this bus through a different bridge, ignore it */ diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 3f155e78513f..2408abe4ee8c 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -231,7 +231,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) { struct pci_dev *dev = PDE_DATA(file_inode(file)); struct pci_filp_private *fpriv = file->private_data; - int i, ret; + int i, ret, write_combine; if (!capable(CAP_SYS_RAWIO)) return -EPERM; @@ -245,9 +245,12 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (fpriv->mmap_state == pci_mmap_mem) + write_combine = fpriv->write_combine; + else + write_combine = 0; ret = pci_mmap_page_range(dev, vma, - fpriv->mmap_state, - fpriv->write_combine); + fpriv->mmap_state, write_combine); if (ret < 0) return ret; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ee72ebe18f4b..37ff0158e45f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3189,13 +3189,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev) } /* - * Atheros AR93xx chips do not behave after a bus reset. The device will - * throw a Link Down error on AER-capable systems and regardless of AER, - * config space of the device is never accessible again and typically - * causes the system to hang or reset when access is attempted. + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. + * The device will throw a Link Down error on AER-capable systems and + * regardless of AER, config space of the device is never accessible again + * and typically causes the system to hang or reset when access is attempted. * http://www.spinics.net/lists/linux-pci/msg34797.html */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); static void quirk_no_pm_reset(struct pci_dev *dev) { @@ -3711,6 +3713,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); @@ -3747,6 +3752,9 @@ static const struct pci_device_id fixed_dma_alias_tbl[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */ .driver_data = PCI_DEVFN(1, 0) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, + PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */ + .driver_data = PCI_DEVFN(1, 0) }, { 0 } }; @@ -4087,6 +4095,7 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs }, diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 8982026637d5..d1ef7acf6930 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -96,6 +96,8 @@ static void pci_remove_bus_device(struct pci_dev *dev) dev->subordinate = NULL; } + pci_bridge_d3_device_removed(dev); + pci_destroy_dev(dev); } diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 55641a39a3e9..c74059e10a6d 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -695,11 +695,16 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } +void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type) +{ +} + void pci_setup_bridge(struct pci_bus *bus) { unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; + pcibios_setup_bridge(bus, type); __pci_setup_bridge(bus, type); } @@ -1423,6 +1428,74 @@ void pci_bus_assign_resources(const struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_assign_resources); +static void pci_claim_device_resources(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (!r->flags || r->parent) + continue; + + pci_claim_resource(dev, i); + } +} + +static void pci_claim_bridge_resources(struct pci_dev *dev) +{ + int i; + + for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (!r->flags || r->parent) + continue; + + pci_claim_bridge_resource(dev, i); + } +} + +static void pci_bus_allocate_dev_resources(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child; + + list_for_each_entry(dev, &b->devices, bus_list) { + pci_claim_device_resources(dev); + + child = dev->subordinate; + if (child) + pci_bus_allocate_dev_resources(child); + } +} + +static void pci_bus_allocate_resources(struct pci_bus *b) +{ + struct pci_bus *child; + + /* + * Carry out a depth-first search on the PCI bus + * tree to allocate bridge apertures. Read the + * programmed bridge bases and recursively claim + * the respective bridge resources. + */ + if (b->self) { + pci_read_bridge_bases(b); + pci_claim_bridge_resources(b->self); + } + + list_for_each_entry(child, &b->children, node) + pci_bus_allocate_resources(child); +} + +void pci_bus_claim_resources(struct pci_bus *b) +{ + pci_bus_allocate_resources(b); + pci_bus_allocate_dev_resources(b); +} +EXPORT_SYMBOL(pci_bus_claim_resources); + static void __pci_bridge_assign_resources(const struct pci_dev *bridge, struct list_head *add_head, struct list_head *fail_head) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 5f70fee59a94..d6ff5e82377d 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1086,7 +1086,7 @@ out: return err; } -static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev, +static void __ref pcifront_backend_changed(struct xenbus_device *xdev, enum xenbus_state be_state) { struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 8e4d7f590b06..6ccb994bdfcb 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -688,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } +static DEFINE_MUTEX(arm_pmu_mutex); +static LIST_HEAD(arm_pmu_list); + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) +static int arm_perf_starting_cpu(unsigned int cpu) { - int cpu = (unsigned long)hcpu; - struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - return NOTIFY_DONE; + struct arm_pmu *pmu; - if (pmu->reset) - pmu->reset(pmu); - else - return NOTIFY_DONE; + mutex_lock(&arm_pmu_mutex); + list_for_each_entry(pmu, &arm_pmu_list, entry) { - return NOTIFY_OK; + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + continue; + if (pmu->reset) + pmu->reset(pmu); + } + mutex_unlock(&arm_pmu_mutex); + return 0; } #ifdef CONFIG_CPU_PM @@ -822,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; - err = register_cpu_notifier(&cpu_pmu->hotplug_nb); - if (err) - goto out_hw_events; + mutex_lock(&arm_pmu_mutex); + list_add_tail(&cpu_pmu->entry, &arm_pmu_list); + mutex_unlock(&arm_pmu_mutex); err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -861,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); -out_hw_events: + mutex_lock(&arm_pmu_mutex); + list_del(&cpu_pmu->entry); + mutex_unlock(&arm_pmu_mutex); free_percpu(cpu_hw_events); return err; } @@ -870,7 +869,9 @@ out_hw_events: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + mutex_lock(&arm_pmu_mutex); + list_del(&cpu_pmu->entry); + mutex_unlock(&arm_pmu_mutex); free_percpu(cpu_pmu->hw_events); } @@ -1061,3 +1062,17 @@ out_free: kfree(pmu); return ret; } + +static int arm_pmu_hp_init(void) +{ + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); + if (ret) + pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", + ret); + return ret; +} +subsys_initcall(arm_pmu_hp_init); diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c index 6ebcf3e41c46..fd57345ffed2 100644 --- a/drivers/phy/phy-rockchip-emmc.c +++ b/drivers/phy/phy-rockchip-emmc.c @@ -14,6 +14,7 @@ * GNU General Public License for more details. */ +#include <linux/clk.h> #include <linux/delay.h> #include <linux/mfd/syscon.h> #include <linux/module.h> @@ -31,42 +32,64 @@ ((val) << (shift) | (mask) << ((shift) + 16)) /* Register definition */ -#define GRF_EMMCPHY_CON0 0x0 -#define GRF_EMMCPHY_CON1 0x4 -#define GRF_EMMCPHY_CON2 0x8 -#define GRF_EMMCPHY_CON3 0xc -#define GRF_EMMCPHY_CON4 0x10 -#define GRF_EMMCPHY_CON5 0x14 -#define GRF_EMMCPHY_CON6 0x18 -#define GRF_EMMCPHY_STATUS 0x20 - -#define PHYCTRL_PDB_MASK 0x1 -#define PHYCTRL_PDB_SHIFT 0x0 -#define PHYCTRL_PDB_PWR_ON 0x1 -#define PHYCTRL_PDB_PWR_OFF 0x0 -#define PHYCTRL_ENDLL_MASK 0x1 -#define PHYCTRL_ENDLL_SHIFT 0x1 -#define PHYCTRL_ENDLL_ENABLE 0x1 -#define PHYCTRL_ENDLL_DISABLE 0x0 -#define PHYCTRL_CALDONE_MASK 0x1 -#define PHYCTRL_CALDONE_SHIFT 0x6 -#define PHYCTRL_CALDONE_DONE 0x1 -#define PHYCTRL_CALDONE_GOING 0x0 -#define PHYCTRL_DLLRDY_MASK 0x1 -#define PHYCTRL_DLLRDY_SHIFT 0x5 -#define PHYCTRL_DLLRDY_DONE 0x1 -#define PHYCTRL_DLLRDY_GOING 0x0 +#define GRF_EMMCPHY_CON0 0x0 +#define GRF_EMMCPHY_CON1 0x4 +#define GRF_EMMCPHY_CON2 0x8 +#define GRF_EMMCPHY_CON3 0xc +#define GRF_EMMCPHY_CON4 0x10 +#define GRF_EMMCPHY_CON5 0x14 +#define GRF_EMMCPHY_CON6 0x18 +#define GRF_EMMCPHY_STATUS 0x20 + +#define PHYCTRL_PDB_MASK 0x1 +#define PHYCTRL_PDB_SHIFT 0x0 +#define PHYCTRL_PDB_PWR_ON 0x1 +#define PHYCTRL_PDB_PWR_OFF 0x0 +#define PHYCTRL_ENDLL_MASK 0x1 +#define PHYCTRL_ENDLL_SHIFT 0x1 +#define PHYCTRL_ENDLL_ENABLE 0x1 +#define PHYCTRL_ENDLL_DISABLE 0x0 +#define PHYCTRL_CALDONE_MASK 0x1 +#define PHYCTRL_CALDONE_SHIFT 0x6 +#define PHYCTRL_CALDONE_DONE 0x1 +#define PHYCTRL_CALDONE_GOING 0x0 +#define PHYCTRL_DLLRDY_MASK 0x1 +#define PHYCTRL_DLLRDY_SHIFT 0x5 +#define PHYCTRL_DLLRDY_DONE 0x1 +#define PHYCTRL_DLLRDY_GOING 0x0 +#define PHYCTRL_FREQSEL_200M 0x0 +#define PHYCTRL_FREQSEL_50M 0x1 +#define PHYCTRL_FREQSEL_100M 0x2 +#define PHYCTRL_FREQSEL_150M 0x3 +#define PHYCTRL_FREQSEL_MASK 0x3 +#define PHYCTRL_FREQSEL_SHIFT 0xc +#define PHYCTRL_DR_MASK 0x7 +#define PHYCTRL_DR_SHIFT 0x4 +#define PHYCTRL_DR_50OHM 0x0 +#define PHYCTRL_DR_33OHM 0x1 +#define PHYCTRL_DR_66OHM 0x2 +#define PHYCTRL_DR_100OHM 0x3 +#define PHYCTRL_DR_40OHM 0x4 +#define PHYCTRL_OTAPDLYENA 0x1 +#define PHYCTRL_OTAPDLYENA_MASK 0x1 +#define PHYCTRL_OTAPDLYENA_SHIFT 0xb +#define PHYCTRL_OTAPDLYSEL_MASK 0xf +#define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 struct rockchip_emmc_phy { unsigned int reg_offset; struct regmap *reg_base; + struct clk *emmcclk; }; -static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy, - bool on_off) +static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) { + struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy); unsigned int caldone; unsigned int dllrdy; + unsigned int freqsel = PHYCTRL_FREQSEL_200M; + unsigned long rate; + unsigned long timeout; /* * Keep phyctrl_pdb and phyctrl_endll low to allow @@ -87,6 +110,43 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy, if (on_off == PHYCTRL_PDB_PWR_OFF) return 0; + rate = clk_get_rate(rk_phy->emmcclk); + + if (rate != 0) { + unsigned long ideal_rate; + unsigned long diff; + + switch (rate) { + case 1 ... 74999999: + ideal_rate = 50000000; + freqsel = PHYCTRL_FREQSEL_50M; + break; + case 75000000 ... 124999999: + ideal_rate = 100000000; + freqsel = PHYCTRL_FREQSEL_100M; + break; + case 125000000 ... 174999999: + ideal_rate = 150000000; + freqsel = PHYCTRL_FREQSEL_150M; + break; + default: + ideal_rate = 200000000; + break; + }; + + diff = (rate > ideal_rate) ? + rate - ideal_rate : ideal_rate - rate; + + /* + * In order for tuning delays to be accurate we need to be + * pretty spot on for the DLL range, so warn if we're too + * far off. Also warn if we're above the 200 MHz max. Don't + * warn for really slow rates since we won't be tuning then. + */ + if ((rate > 50000000 && diff > 15000000) || (rate > 200000000)) + dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate); + } + /* * According to the user manual, calpad calibration * cycle takes more than 2us without the minimal recommended @@ -113,20 +173,62 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy, return -ETIMEDOUT; } + /* Set the frequency of the DLL operation */ + regmap_write(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_CON0, + HIWORD_UPDATE(freqsel, PHYCTRL_FREQSEL_MASK, + PHYCTRL_FREQSEL_SHIFT)); + + /* Turn on the DLL */ regmap_write(rk_phy->reg_base, rk_phy->reg_offset + GRF_EMMCPHY_CON6, HIWORD_UPDATE(PHYCTRL_ENDLL_ENABLE, PHYCTRL_ENDLL_MASK, PHYCTRL_ENDLL_SHIFT)); + /* - * After enable analog DLL circuits, we need extra 10.2us - * for dll to be ready for work. + * We turned on the DLL even though the rate was 0 because we the + * clock might be turned on later. ...but we can't wait for the DLL + * to lock when the rate is 0 because it will never lock with no + * input clock. + * + * Technically we should be checking the lock later when the clock + * is turned on, but for now we won't. */ - udelay(11); - regmap_read(rk_phy->reg_base, - rk_phy->reg_offset + GRF_EMMCPHY_STATUS, - &dllrdy); - dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK; + if (rate == 0) + return 0; + + /* + * After enabling analog DLL circuits docs say that we need 10.2 us if + * our source clock is at 50 MHz and that lock time scales linearly + * with clock speed. If we are powering on the PHY and the card clock + * is super slow (like 100 kHZ) this could take as long as 5.1 ms as + * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms + * Hopefully we won't be running at 100 kHz, but we should still make + * sure we wait long enough. + * + * NOTE: There appear to be corner cases where the DLL seems to take + * extra long to lock for reasons that aren't understood. In some + * extreme cases we've seen it take up to over 10ms (!). We'll be + * generous and give it 50ms. We still busy wait here because: + * - In most cases it should be super fast. + * - This is not called lots during normal operation so it shouldn't + * be a power or performance problem to busy wait. We expect it + * only at boot / resume. In both cases, eMMC is probably on the + * critical path so busy waiting a little extra time should be OK. + */ + timeout = jiffies + msecs_to_jiffies(50); + do { + udelay(1); + + regmap_read(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_STATUS, + &dllrdy); + dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK; + if (dllrdy == PHYCTRL_DLLRDY_DONE) + break; + } while (!time_after(jiffies, timeout)); + if (dllrdy != PHYCTRL_DLLRDY_DONE) { pr_err("rockchip_emmc_phy_power: dllrdy timeout.\n"); return -ETIMEDOUT; @@ -135,33 +237,82 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy, return 0; } -static int rockchip_emmc_phy_power_off(struct phy *phy) +static int rockchip_emmc_phy_init(struct phy *phy) { struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy); int ret = 0; - /* Power down emmc phy analog blocks */ - ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_OFF); - if (ret) - return ret; + /* + * We purposely get the clock here and not in probe to avoid the + * circular dependency problem. We expect: + * - PHY driver to probe + * - SDHCI driver to start probe + * - SDHCI driver to register it's clock + * - SDHCI driver to get the PHY + * - SDHCI driver to init the PHY + * + * The clock is optional, so upon any error we just set to NULL. + * + * NOTE: we don't do anything special for EPROBE_DEFER here. Given the + * above expected use case, EPROBE_DEFER isn't sensible to expect, so + * it's just like any other error. + */ + rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk"); + if (IS_ERR(rk_phy->emmcclk)) { + dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret); + rk_phy->emmcclk = NULL; + } + + return ret; +} + +static int rockchip_emmc_phy_exit(struct phy *phy) +{ + struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy); + + clk_put(rk_phy->emmcclk); return 0; } +static int rockchip_emmc_phy_power_off(struct phy *phy) +{ + /* Power down emmc phy analog blocks */ + return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_OFF); +} + static int rockchip_emmc_phy_power_on(struct phy *phy) { struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy); - int ret = 0; - /* Power up emmc phy analog blocks */ - ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_ON); - if (ret) - return ret; + /* Drive impedance: 50 Ohm */ + regmap_write(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_CON6, + HIWORD_UPDATE(PHYCTRL_DR_50OHM, + PHYCTRL_DR_MASK, + PHYCTRL_DR_SHIFT)); - return 0; + /* Output tap delay: enable */ + regmap_write(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_CON0, + HIWORD_UPDATE(PHYCTRL_OTAPDLYENA, + PHYCTRL_OTAPDLYENA_MASK, + PHYCTRL_OTAPDLYENA_SHIFT)); + + /* Output tap delay */ + regmap_write(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_CON0, + HIWORD_UPDATE(4, + PHYCTRL_OTAPDLYSEL_MASK, + PHYCTRL_OTAPDLYSEL_SHIFT)); + + /* Power up emmc phy analog blocks */ + return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_ON); } static const struct phy_ops ops = { + .init = rockchip_emmc_phy_init, + .exit = rockchip_emmc_phy_exit, .power_on = rockchip_emmc_phy_power_on, .power_off = rockchip_emmc_phy_power_off, .owner = THIS_MODULE, diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index fb8200b8e8ec..b3fe1d339632 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -35,7 +35,7 @@ config PINCTRL_ADI2 machine and arch are selected to build. config PINCTRL_AS3722 - bool "Pinctrl and GPIO driver for ams AS3722 PMIC" + tristate "Pinctrl and GPIO driver for ams AS3722 PMIC" depends on MFD_AS3722 && GPIOLIB select PINMUX select GENERIC_PINCONF @@ -129,6 +129,17 @@ config PINCTRL_MESON select OF_GPIO select REGMAP_MMIO +config PINCTRL_OXNAS + bool + depends on OF + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select OF_GPIO + select GPIOLIB_IRQCHIP + select MFD_SYSCON + config PINCTRL_ROCKCHIP bool select PINMUX @@ -196,8 +207,19 @@ config PINCTRL_COH901 COH 901 335 and COH 901 571/3. They contain 3, 5 or 7 ports of 8 GPIO pins each. +config PINCTRL_MAX77620 + tristate "MAX77620/MAX20024 Pincontrol support" + depends on MFD_MAX77620 + select PINMUX + select GENERIC_PINCONF + help + Say Yes here to enable Pin control support for Maxim PMIC MAX77620. + This PMIC has 8 GPIO pins that work as GPIO as well as special + function in alternate mode. This driver also configure push-pull, + open drain, FPS slots etc. + config PINCTRL_PALMAS - bool "Pinctrl driver for the PALMAS Series MFD devices" + tristate "Pinctrl driver for the PALMAS Series MFD devices" depends on OF && MFD_PALMAS select PINMUX select GENERIC_PINCONF diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 42a5c1dddfef..8ebd7b8e1621 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -16,7 +16,9 @@ obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o +obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o obj-$(CONFIG_PINCTRL_MESON) += meson/ +obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o @@ -35,7 +37,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o -obj-$(CONFIG_ARCH_BCM) += bcm/ +obj-y += bcm/ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index c356223e1c9c..63246770bd74 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -60,6 +60,7 @@ config PINCTRL_IPROC_GPIO config PINCTRL_CYGNUS_MUX bool "Broadcom Cygnus IOMUX driver" depends on (ARCH_BCM_CYGNUS || COMPILE_TEST) + depends on OF select PINMUX select GENERIC_PINCONF default ARCH_BCM_CYGNUS @@ -99,3 +100,17 @@ config PINCTRL_NS2_MUX The Broadcom Northstar2 IOMUX driver supports group based IOMUX configuration. + +config PINCTRL_NSP_MUX + bool "Broadcom NSP IOMUX driver" + depends on (ARCH_BCM_NSP || COMPILE_TEST) + depends on OF + select PINMUX + select GENERIC_PINCONF + default ARCH_BCM_NSP + help + Say yes here to enable the Broadcom NSP SOC IOMUX driver. + + The Broadcom Northstar Plus IOMUX driver supports pin based IOMUX + configuration, with certain individual pins can be overridden + to GPIO function. diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile index 3861a1c1f8ff..2a65111f3c70 100644 --- a/drivers/pinctrl/bcm/Makefile +++ b/drivers/pinctrl/bcm/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o obj-$(CONFIG_PINCTRL_NSP_GPIO) += pinctrl-nsp-gpio.o obj-$(CONFIG_PINCTRL_NS2_MUX) += pinctrl-ns2-mux.o +obj-$(CONFIG_PINCTRL_NSP_MUX) += pinctrl-nsp-mux.o diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 3670f5ea7a12..7f7700716398 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -66,6 +66,14 @@ #define GPIO_DRV_STRENGTH_BITS 3 #define GPIO_DRV_STRENGTH_BIT_MASK ((1 << GPIO_DRV_STRENGTH_BITS) - 1) +enum iproc_pinconf_param { + IPROC_PINCONF_DRIVE_STRENGTH = 0, + IPROC_PINCONF_BIAS_DISABLE, + IPROC_PINCONF_BIAS_PULL_UP, + IPROC_PINCONF_BIAS_PULL_DOWN, + IPROC_PINCON_MAX, +}; + /* * Iproc GPIO core * @@ -78,6 +86,10 @@ * @num_banks: number of GPIO banks, each bank supports up to 32 GPIOs * @pinmux_is_supported: flag to indicate this GPIO controller contains pins * that can be individually muxed to GPIO + * @pinconf_disable: contains a list of PINCONF parameters that need to be + * disabled + * @nr_pinconf_disable: total number of PINCONF parameters that need to be + * disabled * @pctl: pointer to pinctrl_dev * @pctldesc: pinctrl descriptor */ @@ -94,6 +106,9 @@ struct iproc_gpio { bool pinmux_is_supported; + enum pin_config_param *pinconf_disable; + unsigned int nr_pinconf_disable; + struct pinctrl_dev *pctl; struct pinctrl_desc pctldesc; }; @@ -360,6 +375,65 @@ static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio) return !!(readl(chip->base + offset) & BIT(shift)); } +/* + * Mapping of the iProc PINCONF parameters to the generic pin configuration + * parameters + */ +static const enum pin_config_param iproc_pinconf_disable_map[] = { + [IPROC_PINCONF_DRIVE_STRENGTH] = PIN_CONFIG_DRIVE_STRENGTH, + [IPROC_PINCONF_BIAS_DISABLE] = PIN_CONFIG_BIAS_DISABLE, + [IPROC_PINCONF_BIAS_PULL_UP] = PIN_CONFIG_BIAS_PULL_UP, + [IPROC_PINCONF_BIAS_PULL_DOWN] = PIN_CONFIG_BIAS_PULL_DOWN, +}; + +static bool iproc_pinconf_param_is_disabled(struct iproc_gpio *chip, + enum pin_config_param param) +{ + unsigned int i; + + if (!chip->nr_pinconf_disable) + return false; + + for (i = 0; i < chip->nr_pinconf_disable; i++) + if (chip->pinconf_disable[i] == param) + return true; + + return false; +} + +static int iproc_pinconf_disable_map_create(struct iproc_gpio *chip, + unsigned long disable_mask) +{ + unsigned int map_size = ARRAY_SIZE(iproc_pinconf_disable_map); + unsigned int bit, nbits = 0; + + /* figure out total number of PINCONF parameters to disable */ + for_each_set_bit(bit, &disable_mask, map_size) + nbits++; + + if (!nbits) + return 0; + + /* + * Allocate an array to store PINCONF parameters that need to be + * disabled + */ + chip->pinconf_disable = devm_kcalloc(chip->dev, nbits, + sizeof(*chip->pinconf_disable), + GFP_KERNEL); + if (!chip->pinconf_disable) + return -ENOMEM; + + chip->nr_pinconf_disable = nbits; + + /* now store these parameters */ + nbits = 0; + for_each_set_bit(bit, &disable_mask, map_size) + chip->pinconf_disable[nbits++] = iproc_pinconf_disable_map[bit]; + + return 0; +} + static int iproc_get_groups_count(struct pinctrl_dev *pctldev) { return 1; @@ -500,6 +574,9 @@ static int iproc_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, bool disable, pull_up; int ret; + if (iproc_pinconf_param_is_disabled(chip, param)) + return -ENOTSUPP; + switch (param) { case PIN_CONFIG_BIAS_DISABLE: iproc_gpio_get_pull(chip, gpio, &disable, &pull_up); @@ -548,6 +625,10 @@ static int iproc_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, for (i = 0; i < num_configs; i++) { param = pinconf_to_config_param(configs[i]); + + if (iproc_pinconf_param_is_disabled(chip, param)) + return -ENOTSUPP; + arg = pinconf_to_config_argument(configs[i]); switch (param) { @@ -633,11 +714,13 @@ static int iproc_gpio_register_pinconf(struct iproc_gpio *chip) } static const struct of_device_id iproc_gpio_of_match[] = { + { .compatible = "brcm,iproc-gpio" }, { .compatible = "brcm,cygnus-ccm-gpio" }, { .compatible = "brcm,cygnus-asiu-gpio" }, { .compatible = "brcm,cygnus-crmu-gpio" }, - { .compatible = "brcm,iproc-gpio" }, - { } + { .compatible = "brcm,iproc-nsp-gpio" }, + { .compatible = "brcm,iproc-stingray-gpio" }, + { /* sentinel */ } }; static int iproc_gpio_probe(struct platform_device *pdev) @@ -646,8 +729,17 @@ static int iproc_gpio_probe(struct platform_device *pdev) struct resource *res; struct iproc_gpio *chip; struct gpio_chip *gc; - u32 ngpios; + u32 ngpios, pinconf_disable_mask = 0; int irq, ret; + bool no_pinconf = false; + + /* NSP does not support drive strength config */ + if (of_device_is_compatible(dev->of_node, "brcm,iproc-nsp-gpio")) + pinconf_disable_mask = BIT(IPROC_PINCONF_DRIVE_STRENGTH); + /* Stingray does not support pinconf in this controller */ + else if (of_device_is_compatible(dev->of_node, + "brcm,iproc-stingray-gpio")) + no_pinconf = true; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) @@ -702,10 +794,22 @@ static int iproc_gpio_probe(struct platform_device *pdev) return ret; } - ret = iproc_gpio_register_pinconf(chip); - if (ret) { - dev_err(dev, "unable to register pinconf\n"); - goto err_rm_gpiochip; + if (!no_pinconf) { + ret = iproc_gpio_register_pinconf(chip); + if (ret) { + dev_err(dev, "unable to register pinconf\n"); + goto err_rm_gpiochip; + } + + if (pinconf_disable_mask) { + ret = iproc_pinconf_disable_map_create(chip, + pinconf_disable_mask); + if (ret) { + dev_err(dev, + "unable to create pinconf disable map\n"); + goto err_rm_gpiochip; + } + } } /* optional GPIO interrupt support */ diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 3fefd14acc3e..ca817896ed24 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -1044,10 +1044,8 @@ static int ns2_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pinctrl->base0)) { - dev_err(&pdev->dev, "unable to map I/O space\n"); + if (IS_ERR(pinctrl->base0)) return PTR_ERR(pinctrl->base0); - } res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, @@ -1059,10 +1057,8 @@ static int ns2_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 2); pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pinctrl->pinconf_base)) { - dev_err(&pdev->dev, "unable to map I/O space\n"); + if (IS_ERR(pinctrl->pinconf_base)) return PTR_ERR(pinctrl->pinconf_base); - } ret = ns2_mux_log_init(pinctrl); if (ret) { @@ -1089,9 +1085,9 @@ static int ns2_pinmux_probe(struct platform_device *pdev) pinctrl->pctl = pinctrl_register(&ns2_pinctrl_desc, &pdev->dev, pinctrl); - if (!pinctrl->pctl) { + if (IS_ERR(pinctrl->pctl)) { dev_err(&pdev->dev, "unable to register IOMUX pinctrl\n"); - return -EINVAL; + return PTR_ERR(pinctrl->pctl); } return 0; diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index a8b37a9a8230..35783db1c10b 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -458,13 +458,15 @@ static int nsp_gpio_get_strength(struct nsp_gpio *chip, unsigned gpio, return 0; } -int nsp_pin_config_group_get(struct pinctrl_dev *pctldev, unsigned selector, +static int nsp_pin_config_group_get(struct pinctrl_dev *pctldev, + unsigned selector, unsigned long *config) { return 0; } -int nsp_pin_config_group_set(struct pinctrl_dev *pctldev, unsigned selector, +static int nsp_pin_config_group_set(struct pinctrl_dev *pctldev, + unsigned selector, unsigned long *configs, unsigned num_configs) { return 0; diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c new file mode 100644 index 000000000000..4149db309c8b --- /dev/null +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c @@ -0,0 +1,642 @@ +/* Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This file contains the Northstar plus (NSP) IOMUX driver that supports + * group based PINMUX configuration. The Northstar plus IOMUX controller + * allows pins to be individually muxed to GPIO function. The NAND and MMC is + * a group based selection. The gpio_a 8 - 11 are muxed with gpio_b and pwm. + * To select PWM, one need to enable the corresponding gpio_b as well. + * + * gpio_a (8 - 11) + * +---------- + * | + * gpio_a (8-11) | gpio_b (0 - 3) + * ------------------------+-------+---------- + * | + * | pwm (0 - 3) + * +---------- + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "../core.h" +#include "../pinctrl-utils.h" + +#define NSP_MUX_BASE0 0x00 +#define NSP_MUX_BASE1 0x01 +#define NSP_MUX_BASE2 0x02 +/* + * nsp IOMUX register description + * + * @base: base 0 or base 1 + * @shift: bit shift for mux configuration of a group + * @mask: bit mask of the function + * @alt: alternate function to set to + */ +struct nsp_mux { + unsigned int base; + unsigned int shift; + unsigned int mask; + unsigned int alt; +}; + +/* + * Keep track of nsp IOMUX configuration and prevent double configuration + * + * @nsp_mux: nsp IOMUX register description + * @is_configured: flag to indicate whether a mux setting has already been + * configured + */ +struct nsp_mux_log { + struct nsp_mux mux; + bool is_configured; +}; + +/* + * Group based IOMUX configuration + * + * @name: name of the group + * @pins: array of pins used by this group + * @num_pins: total number of pins used by this group + * @mux: nsp group based IOMUX configuration + */ +struct nsp_pin_group { + const char *name; + const unsigned int *pins; + const unsigned int num_pins; + const struct nsp_mux mux; +}; + +/* + * nsp mux function and supported pin groups + * + * @name: name of the function + * @groups: array of groups that can be supported by this function + * @num_groups: total number of groups that can be supported by this function + */ +struct nsp_pin_function { + const char *name; + const char * const *groups; + const unsigned int num_groups; +}; + +/* + * nsp IOMUX pinctrl core + * + * @pctl: pointer to pinctrl_dev + * @dev: pointer to device + * @base0: first mux register + * @base1: second mux register + * @base2: third mux register + * @groups: pointer to array of groups + * @num_groups: total number of groups + * @functions: pointer to array of functions + * @num_functions: total number of functions + * @mux_log: pointer to the array of mux logs + * @lock: lock to protect register access + */ +struct nsp_pinctrl { + struct pinctrl_dev *pctl; + struct device *dev; + void __iomem *base0; + void __iomem *base1; + void __iomem *base2; + const struct nsp_pin_group *groups; + unsigned int num_groups; + const struct nsp_pin_function *functions; + unsigned int num_functions; + struct nsp_mux_log *mux_log; + spinlock_t lock; +}; + +/* + * Description of a pin in nsp + * + * @pin: pin number + * @name: pin name + * @gpio_select: reg data to select GPIO + */ +struct nsp_pin { + unsigned int pin; + char *name; + unsigned int gpio_select; +}; + +#define NSP_PIN_DESC(p, n, g) \ +{ \ + .pin = p, \ + .name = n, \ + .gpio_select = g, \ +} + +/* + * List of muxable pins in nsp + */ +static struct nsp_pin nsp_pins[] = { + NSP_PIN_DESC(0, "spi_clk", 1), + NSP_PIN_DESC(1, "spi_ss", 1), + NSP_PIN_DESC(2, "spi_mosi", 1), + NSP_PIN_DESC(3, "spi_miso", 1), + NSP_PIN_DESC(4, "scl", 1), + NSP_PIN_DESC(5, "sda", 1), + NSP_PIN_DESC(6, "mdc", 1), + NSP_PIN_DESC(7, "mdio", 1), + NSP_PIN_DESC(8, "pwm0", 1), + NSP_PIN_DESC(9, "pwm1", 1), + NSP_PIN_DESC(10, "pwm2", 1), + NSP_PIN_DESC(11, "pwm3", 1), + NSP_PIN_DESC(12, "uart1_rx", 1), + NSP_PIN_DESC(13, "uart1_tx", 1), + NSP_PIN_DESC(14, "uart1_cts", 1), + NSP_PIN_DESC(15, "uart1_rts", 1), + NSP_PIN_DESC(16, "uart2_rx", 1), + NSP_PIN_DESC(17, "uart2_tx", 1), + NSP_PIN_DESC(18, "synce", 0), + NSP_PIN_DESC(19, "sata0_led", 0), + NSP_PIN_DESC(20, "sata1_led", 0), + NSP_PIN_DESC(21, "xtal_out", 1), + NSP_PIN_DESC(22, "sdio_pwr", 1), + NSP_PIN_DESC(23, "sdio_en_1p8v", 1), + NSP_PIN_DESC(24, "gpio_24", 1), + NSP_PIN_DESC(25, "gpio_25", 1), + NSP_PIN_DESC(26, "p5_led0", 0), + NSP_PIN_DESC(27, "p5_led1", 0), + NSP_PIN_DESC(28, "gpio_28", 1), + NSP_PIN_DESC(29, "gpio_29", 1), + NSP_PIN_DESC(30, "gpio_30", 1), + NSP_PIN_DESC(31, "gpio_31", 1), + NSP_PIN_DESC(32, "nand_ale", 0), + NSP_PIN_DESC(33, "nand_ce0", 0), + NSP_PIN_DESC(34, "nand_r/b", 0), + NSP_PIN_DESC(35, "nand_dq0", 0), + NSP_PIN_DESC(36, "nand_dq1", 0), + NSP_PIN_DESC(37, "nand_dq2", 0), + NSP_PIN_DESC(38, "nand_dq3", 0), + NSP_PIN_DESC(39, "nand_dq4", 0), + NSP_PIN_DESC(40, "nand_dq5", 0), + NSP_PIN_DESC(41, "nand_dq6", 0), + NSP_PIN_DESC(42, "nand_dq7", 0), +}; + +/* + * List of groups of pins + */ + +static const unsigned int spi_pins[] = {0, 1, 2, 3}; +static const unsigned int i2c_pins[] = {4, 5}; +static const unsigned int mdio_pins[] = {6, 7}; +static const unsigned int pwm0_pins[] = {8}; +static const unsigned int gpio_b_0_pins[] = {8}; +static const unsigned int pwm1_pins[] = {9}; +static const unsigned int gpio_b_1_pins[] = {9}; +static const unsigned int pwm2_pins[] = {10}; +static const unsigned int gpio_b_2_pins[] = {10}; +static const unsigned int pwm3_pins[] = {11}; +static const unsigned int gpio_b_3_pins[] = {11}; +static const unsigned int uart1_pins[] = {12, 13, 14, 15}; +static const unsigned int uart2_pins[] = {16, 17}; +static const unsigned int synce_pins[] = {18}; +static const unsigned int sata0_led_pins[] = {19}; +static const unsigned int sata1_led_pins[] = {20}; +static const unsigned int xtal_out_pins[] = {21}; +static const unsigned int sdio_pwr_pins[] = {22}; +static const unsigned int sdio_1p8v_pins[] = {23}; +static const unsigned int switch_p05_led0_pins[] = {26}; +static const unsigned int switch_p05_led1_pins[] = {27}; +static const unsigned int nand_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42}; +static const unsigned int emmc_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42}; + +#define NSP_PIN_GROUP(group_name, ba, sh, ma, al) \ +{ \ + .name = __stringify(group_name) "_grp", \ + .pins = group_name ## _pins, \ + .num_pins = ARRAY_SIZE(group_name ## _pins), \ + .mux = { \ + .base = ba, \ + .shift = sh, \ + .mask = ma, \ + .alt = al, \ + } \ +} + +/* + * List of nsp pin groups + */ +static const struct nsp_pin_group nsp_pin_groups[] = { + NSP_PIN_GROUP(spi, NSP_MUX_BASE0, 0, 0x0f, 0x00), + NSP_PIN_GROUP(i2c, NSP_MUX_BASE0, 3, 0x03, 0x00), + NSP_PIN_GROUP(mdio, NSP_MUX_BASE0, 5, 0x03, 0x00), + NSP_PIN_GROUP(gpio_b_0, NSP_MUX_BASE0, 7, 0x01, 0x00), + NSP_PIN_GROUP(pwm0, NSP_MUX_BASE1, 0, 0x01, 0x01), + NSP_PIN_GROUP(gpio_b_1, NSP_MUX_BASE0, 8, 0x01, 0x00), + NSP_PIN_GROUP(pwm1, NSP_MUX_BASE1, 1, 0x01, 0x01), + NSP_PIN_GROUP(gpio_b_2, NSP_MUX_BASE0, 9, 0x01, 0x00), + NSP_PIN_GROUP(pwm2, NSP_MUX_BASE1, 2, 0x01, 0x01), + NSP_PIN_GROUP(gpio_b_3, NSP_MUX_BASE0, 10, 0x01, 0x00), + NSP_PIN_GROUP(pwm3, NSP_MUX_BASE1, 3, 0x01, 0x01), + NSP_PIN_GROUP(uart1, NSP_MUX_BASE0, 11, 0x0f, 0x00), + NSP_PIN_GROUP(uart2, NSP_MUX_BASE0, 15, 0x03, 0x00), + NSP_PIN_GROUP(synce, NSP_MUX_BASE0, 17, 0x01, 0x01), + NSP_PIN_GROUP(sata0_led, NSP_MUX_BASE0, 18, 0x01, 0x01), + NSP_PIN_GROUP(sata1_led, NSP_MUX_BASE0, 19, 0x01, 0x01), + NSP_PIN_GROUP(xtal_out, NSP_MUX_BASE0, 20, 0x01, 0x00), + NSP_PIN_GROUP(sdio_pwr, NSP_MUX_BASE0, 21, 0x01, 0x00), + NSP_PIN_GROUP(sdio_1p8v, NSP_MUX_BASE0, 22, 0x01, 0x00), + NSP_PIN_GROUP(switch_p05_led0, NSP_MUX_BASE0, 26, 0x01, 0x01), + NSP_PIN_GROUP(switch_p05_led1, NSP_MUX_BASE0, 27, 0x01, 0x01), + NSP_PIN_GROUP(nand, NSP_MUX_BASE2, 0, 0x01, 0x00), + NSP_PIN_GROUP(emmc, NSP_MUX_BASE2, 0, 0x01, 0x01) +}; + +/* + * List of groups supported by functions + */ + +static const char * const spi_grps[] = {"spi_grp"}; +static const char * const i2c_grps[] = {"i2c_grp"}; +static const char * const mdio_grps[] = {"mdio_grp"}; +static const char * const pwm_grps[] = {"pwm0_grp", "pwm1_grp", "pwm2_grp" + , "pwm3_grp"}; +static const char * const gpio_b_grps[] = {"gpio_b_0_grp", "gpio_b_1_grp", + "gpio_b_2_grp", "gpio_b_3_grp"}; +static const char * const uart1_grps[] = {"uart1_grp"}; +static const char * const uart2_grps[] = {"uart2_grp"}; +static const char * const synce_grps[] = {"synce_grp"}; +static const char * const sata_led_grps[] = {"sata0_led_grp", "sata1_led_grp"}; +static const char * const xtal_out_grps[] = {"xtal_out_grp"}; +static const char * const sdio_grps[] = {"sdio_pwr_grp", "sdio_1p8v_grp"}; +static const char * const switch_led_grps[] = {"switch_p05_led0_grp", + "switch_p05_led1_grp"}; +static const char * const nand_grps[] = {"nand_grp"}; +static const char * const emmc_grps[] = {"emmc_grp"}; + +#define NSP_PIN_FUNCTION(func) \ +{ \ + .name = #func, \ + .groups = func ## _grps, \ + .num_groups = ARRAY_SIZE(func ## _grps), \ +} + +/* + * List of supported functions in nsp + */ +static const struct nsp_pin_function nsp_pin_functions[] = { + NSP_PIN_FUNCTION(spi), + NSP_PIN_FUNCTION(i2c), + NSP_PIN_FUNCTION(mdio), + NSP_PIN_FUNCTION(pwm), + NSP_PIN_FUNCTION(gpio_b), + NSP_PIN_FUNCTION(uart1), + NSP_PIN_FUNCTION(uart2), + NSP_PIN_FUNCTION(synce), + NSP_PIN_FUNCTION(sata_led), + NSP_PIN_FUNCTION(xtal_out), + NSP_PIN_FUNCTION(sdio), + NSP_PIN_FUNCTION(switch_led), + NSP_PIN_FUNCTION(nand), + NSP_PIN_FUNCTION(emmc) +}; + +static int nsp_get_groups_count(struct pinctrl_dev *pctrl_dev) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + return pinctrl->num_groups; +} + +static const char *nsp_get_group_name(struct pinctrl_dev *pctrl_dev, + unsigned int selector) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + return pinctrl->groups[selector].name; +} + +static int nsp_get_group_pins(struct pinctrl_dev *pctrl_dev, + unsigned int selector, const unsigned int **pins, + unsigned int *num_pins) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + *pins = pinctrl->groups[selector].pins; + *num_pins = pinctrl->groups[selector].num_pins; + + return 0; +} + +static void nsp_pin_dbg_show(struct pinctrl_dev *pctrl_dev, + struct seq_file *s, unsigned int offset) +{ + seq_printf(s, " %s", dev_name(pctrl_dev->dev)); +} + +static struct pinctrl_ops nsp_pinctrl_ops = { + .get_groups_count = nsp_get_groups_count, + .get_group_name = nsp_get_group_name, + .get_group_pins = nsp_get_group_pins, + .pin_dbg_show = nsp_pin_dbg_show, + .dt_node_to_map = pinconf_generic_dt_node_to_map_group, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int nsp_get_functions_count(struct pinctrl_dev *pctrl_dev) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + return pinctrl->num_functions; +} + +static const char *nsp_get_function_name(struct pinctrl_dev *pctrl_dev, + unsigned int selector) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + return pinctrl->functions[selector].name; +} + +static int nsp_get_function_groups(struct pinctrl_dev *pctrl_dev, + unsigned int selector, + const char * const **groups, + unsigned * const num_groups) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + + *groups = pinctrl->functions[selector].groups; + *num_groups = pinctrl->functions[selector].num_groups; + + return 0; +} + +static int nsp_pinmux_set(struct nsp_pinctrl *pinctrl, + const struct nsp_pin_function *func, + const struct nsp_pin_group *grp, + struct nsp_mux_log *mux_log) +{ + const struct nsp_mux *mux = &grp->mux; + int i; + u32 val, mask; + unsigned long flags; + void __iomem *base_address; + + for (i = 0; i < pinctrl->num_groups; i++) { + if ((mux->shift != mux_log[i].mux.shift) || + (mux->base != mux_log[i].mux.base)) + continue; + + /* if this is a new configuration, just do it! */ + if (!mux_log[i].is_configured) + break; + + /* + * IOMUX has been configured previously and one is trying to + * configure it to a different function + */ + if (mux_log[i].mux.alt != mux->alt) { + dev_err(pinctrl->dev, + "double configuration error detected!\n"); + dev_err(pinctrl->dev, "func:%s grp:%s\n", + func->name, grp->name); + return -EINVAL; + } + + return 0; + } + if (i == pinctrl->num_groups) + return -EINVAL; + + mask = mux->mask; + mux_log[i].mux.alt = mux->alt; + mux_log[i].is_configured = true; + + switch (mux->base) { + case NSP_MUX_BASE0: + base_address = pinctrl->base0; + break; + + case NSP_MUX_BASE1: + base_address = pinctrl->base1; + break; + + case NSP_MUX_BASE2: + base_address = pinctrl->base2; + break; + + default: + return -EINVAL; + } + + spin_lock_irqsave(&pinctrl->lock, flags); + val = readl(base_address); + val &= ~(mask << grp->mux.shift); + val |= grp->mux.alt << grp->mux.shift; + writel(val, base_address); + spin_unlock_irqrestore(&pinctrl->lock, flags); + + return 0; +} + +static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev, + unsigned int func_select, unsigned int grp_select) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + const struct nsp_pin_function *func; + const struct nsp_pin_group *grp; + + if (grp_select > pinctrl->num_groups || + func_select > pinctrl->num_functions) + return -EINVAL; + + func = &pinctrl->functions[func_select]; + grp = &pinctrl->groups[grp_select]; + + dev_dbg(pctrl_dev->dev, "func:%u name:%s grp:%u name:%s\n", + func_select, func->name, grp_select, grp->name); + + dev_dbg(pctrl_dev->dev, "shift:%u alt:%u\n", grp->mux.shift, + grp->mux.alt); + + return nsp_pinmux_set(pinctrl, func, grp, pinctrl->mux_log); +} + + +static int nsp_gpio_request_enable(struct pinctrl_dev *pctrl_dev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + u32 *gpio_select = pctrl_dev->desc->pins[pin].drv_data; + u32 val; + unsigned long flags; + + spin_lock_irqsave(&pinctrl->lock, flags); + val = readl(pinctrl->base0); + if ((val & BIT(pin)) != (*gpio_select << pin)) { + val &= ~BIT(pin); + val |= *gpio_select << pin; + writel(val, pinctrl->base0); + } + spin_unlock_irqrestore(&pinctrl->lock, flags); + + return 0; +} + +static void nsp_gpio_disable_free(struct pinctrl_dev *pctrl_dev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev); + u32 *gpio_select = pctrl_dev->desc->pins[pin].drv_data; + u32 val; + unsigned long flags; + + spin_lock_irqsave(&pinctrl->lock, flags); + val = readl(pinctrl->base0); + if ((val & (1 << pin)) == (*gpio_select << pin)) { + val &= ~(1 << pin); + if (!(*gpio_select)) + val |= (1 << pin); + writel(val, pinctrl->base0); + } + spin_unlock_irqrestore(&pinctrl->lock, flags); +} + +static struct pinmux_ops nsp_pinmux_ops = { + .get_functions_count = nsp_get_functions_count, + .get_function_name = nsp_get_function_name, + .get_function_groups = nsp_get_function_groups, + .set_mux = nsp_pinmux_enable, + .gpio_request_enable = nsp_gpio_request_enable, + .gpio_disable_free = nsp_gpio_disable_free, +}; + +static struct pinctrl_desc nsp_pinctrl_desc = { + .name = "nsp-pinmux", + .pctlops = &nsp_pinctrl_ops, + .pmxops = &nsp_pinmux_ops, +}; + +static int nsp_mux_log_init(struct nsp_pinctrl *pinctrl) +{ + struct nsp_mux_log *log; + unsigned int i; + u32 no_of_groups = ARRAY_SIZE(nsp_pin_groups); + + pinctrl->mux_log = devm_kcalloc(pinctrl->dev, no_of_groups, + sizeof(struct nsp_mux_log), + GFP_KERNEL); + if (!pinctrl->mux_log) + return -ENOMEM; + + for (i = 0; i < no_of_groups; i++) { + log = &pinctrl->mux_log[i]; + log->mux.base = nsp_pin_groups[i].mux.base; + log->mux.shift = nsp_pin_groups[i].mux.shift; + log->mux.alt = 0; + log->is_configured = false; + } + + return 0; +} + +static int nsp_pinmux_probe(struct platform_device *pdev) +{ + struct nsp_pinctrl *pinctrl; + struct resource *res; + int i, ret; + struct pinctrl_pin_desc *pins; + unsigned int num_pins = ARRAY_SIZE(nsp_pins); + + pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL); + if (!pinctrl) + return -ENOMEM; + pinctrl->dev = &pdev->dev; + platform_set_drvdata(pdev, pinctrl); + spin_lock_init(&pinctrl->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pinctrl->base0)) + return PTR_ERR(pinctrl->base0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!pinctrl->base1) { + dev_err(&pdev->dev, "unable to map I/O space\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pinctrl->base2)) + return PTR_ERR(pinctrl->base2); + + ret = nsp_mux_log_init(pinctrl); + if (ret) { + dev_err(&pdev->dev, "unable to initialize IOMUX log\n"); + return ret; + } + + pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + for (i = 0; i < num_pins; i++) { + pins[i].number = nsp_pins[i].pin; + pins[i].name = nsp_pins[i].name; + pins[i].drv_data = &nsp_pins[i].gpio_select; + } + + pinctrl->groups = nsp_pin_groups; + pinctrl->num_groups = ARRAY_SIZE(nsp_pin_groups); + pinctrl->functions = nsp_pin_functions; + pinctrl->num_functions = ARRAY_SIZE(nsp_pin_functions); + nsp_pinctrl_desc.pins = pins; + nsp_pinctrl_desc.npins = num_pins; + + pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &nsp_pinctrl_desc, + pinctrl); + if (IS_ERR(pinctrl->pctl)) { + dev_err(&pdev->dev, "unable to register nsp IOMUX pinctrl\n"); + return PTR_ERR(pinctrl->pctl); + } + + return 0; +} + +static const struct of_device_id nsp_pinmux_of_match[] = { + { .compatible = "brcm,nsp-pinmux" }, + { } +}; + +static struct platform_driver nsp_pinmux_driver = { + .driver = { + .name = "nsp-pinmux", + .of_match_table = nsp_pinmux_of_match, + }, + .probe = nsp_pinmux_probe, +}; + +static int __init nsp_pinmux_init(void) +{ + return platform_driver_register(&nsp_pinmux_driver); +} +arch_initcall(nsp_pinmux_init); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 98d2a1bb44cb..fb38e208f32d 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -225,13 +225,14 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev, } static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, - unsigned number, const char *name) + const struct pinctrl_pin_desc *pin) { struct pin_desc *pindesc; - pindesc = pin_desc_get(pctldev, number); + pindesc = pin_desc_get(pctldev, pin->number); if (pindesc != NULL) { - dev_err(pctldev->dev, "pin %d already registered\n", number); + dev_err(pctldev->dev, "pin %d already registered\n", + pin->number); return -EINVAL; } @@ -245,10 +246,10 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, pindesc->pctldev = pctldev; /* Copy basic pin info */ - if (name) { - pindesc->name = name; + if (pin->name) { + pindesc->name = pin->name; } else { - pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number); + pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number); if (pindesc->name == NULL) { kfree(pindesc); return -ENOMEM; @@ -256,9 +257,11 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, pindesc->dynamic_name = true; } - radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc); + pindesc->drv_data = pin->drv_data; + + radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc); pr_debug("registered pin %d (%s) on %s\n", - number, pindesc->name, pctldev->desc->name); + pin->number, pindesc->name, pctldev->desc->name); return 0; } @@ -270,8 +273,7 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev, int ret = 0; for (i = 0; i < num_descs; i++) { - ret = pinctrl_register_one_pin(pctldev, - pins[i].number, pins[i].name); + ret = pinctrl_register_one_pin(pctldev, &pins[i]); if (ret) return ret; } @@ -1367,8 +1369,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) if (desc == NULL) continue; - seq_printf(s, "pin %d (%s) ", pin, - desc->name ? desc->name : "unnamed"); + seq_printf(s, "pin %d (%s) ", pin, desc->name); /* Driver-specific info per pin */ if (ops->pin_dbg_show) diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index ca08723b9ee1..747c423c11f3 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -134,6 +134,7 @@ struct pinctrl_setting { * @name: a name for the pin, e.g. the name of the pin/pad/finger on a * datasheet or such * @dynamic_name: if the name of this pin was dynamically allocated + * @drv_data: driver-defined per-pin data. pinctrl core does not touch this * @mux_usecount: If zero, the pin is not claimed, and @owner should be NULL. * If non-zero, this pin is claimed by @owner. This field is an integer * rather than a boolean, since pinctrl_get() might process multiple @@ -148,6 +149,7 @@ struct pin_desc { struct pinctrl_dev *pctldev; const char *name; bool dynamic_name; + void *drv_data; /* These fields only added when supporting pinmux drivers */ #ifdef CONFIG_PINMUX unsigned mux_usecount; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index fe04e748dfe4..54dad89fc9bf 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -195,8 +195,13 @@ int pinctrl_dt_to_map(struct pinctrl *p) propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); prop = of_find_property(np, propname, &size); kfree(propname); - if (!prop) + if (!prop) { + if (state == 0) { + of_node_put(np); + return -ENODEV; + } break; + } list = prop->value; size /= sizeof(*list); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index eccb47480e1d..71391757938b 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/mfd/syscon.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> @@ -46,7 +45,7 @@ struct imx_pinctrl { const struct imx_pinctrl_soc_info *info; }; -static const inline struct imx_pin_group *imx_pinctrl_find_group_by_name( +static inline const struct imx_pin_group *imx_pinctrl_find_group_by_name( const struct imx_pinctrl_soc_info *info, const char *name) { @@ -513,13 +512,6 @@ static const struct pinconf_ops imx_pinconf_ops = { .pin_config_group_dbg_show = imx_pinconf_group_dbg_show, }; -static struct pinctrl_desc imx_pinctrl_desc = { - .pctlops = &imx_pctrl_ops, - .pmxops = &imx_pmx_ops, - .confops = &imx_pinconf_ops, - .owner = THIS_MODULE, -}; - /* * Each pin represented in fsl,pins consists of 5 u32 PIN_FUNC_ID and * 1 u32 CONFIG, so 24 types in total for each pin. @@ -722,6 +714,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, { struct regmap_config config = { .name = "gpr" }; struct device_node *dev_np = pdev->dev.of_node; + struct pinctrl_desc *imx_pinctrl_desc; struct device_node *np; struct imx_pinctrl *ipctl; struct resource *res; @@ -776,9 +769,18 @@ int imx_pinctrl_probe(struct platform_device *pdev, } } - imx_pinctrl_desc.name = dev_name(&pdev->dev); - imx_pinctrl_desc.pins = info->pins; - imx_pinctrl_desc.npins = info->npins; + imx_pinctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*imx_pinctrl_desc), + GFP_KERNEL); + if (!imx_pinctrl_desc) + return -ENOMEM; + + imx_pinctrl_desc->name = dev_name(&pdev->dev); + imx_pinctrl_desc->pins = info->pins; + imx_pinctrl_desc->npins = info->npins; + imx_pinctrl_desc->pctlops = &imx_pctrl_ops, + imx_pinctrl_desc->pmxops = &imx_pmx_ops, + imx_pinctrl_desc->confops = &imx_pinconf_ops, + imx_pinctrl_desc->owner = THIS_MODULE, ret = imx_pinctrl_probe_dt(pdev, info); if (ret) { @@ -789,7 +791,8 @@ int imx_pinctrl_probe(struct platform_device *pdev, ipctl->info = info; ipctl->dev = info->dev; platform_set_drvdata(pdev, ipctl); - ipctl->pctl = devm_pinctrl_register(&pdev->dev, &imx_pinctrl_desc, ipctl); + ipctl->pctl = devm_pinctrl_register(&pdev->dev, + imx_pinctrl_desc, ipctl); if (IS_ERR(ipctl->pctl)) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); return PTR_ERR(ipctl->pctl); diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index b4400cb19b61..a4e9f430d452 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -19,7 +19,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/machine.h> @@ -157,7 +156,7 @@ static int imx1_read_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, return !!(readl(reg) & BIT(offset)); } -static const inline struct imx1_pin_group *imx1_pinctrl_find_group_by_name( +static inline const struct imx1_pin_group *imx1_pinctrl_find_group_by_name( const struct imx1_pinctrl_soc_info *info, const char *name) { diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c index 04723455db58..fc8efc748734 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1.c @@ -9,7 +9,7 @@ * (at your option) any later version. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> @@ -262,7 +262,6 @@ static const struct of_device_id imx1_pinctrl_of_match[] = { { .compatible = "fsl,imx1-iomuxc", }, { } }; -MODULE_DEVICE_TABLE(of, imx1_pinctrl_of_match); static struct platform_driver imx1_pinctrl_driver = { .driver = { @@ -270,8 +269,4 @@ static struct platform_driver imx1_pinctrl_driver = { .of_match_table = imx1_pinctrl_of_match, }, }; -module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe); - -MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); -MODULE_DESCRIPTION("Freescale i.MX1 pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe); diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c index aa1221f4dbb7..73e26bc12f09 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx21.c +++ b/drivers/pinctrl/freescale/pinctrl-imx21.c @@ -9,7 +9,7 @@ * (at your option) any later version. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> @@ -325,7 +325,6 @@ static const struct of_device_id imx21_pinctrl_of_match[] = { { .compatible = "fsl,imx21-iomuxc", }, { } }; -MODULE_DEVICE_TABLE(of, imx21_pinctrl_of_match); static struct platform_driver imx21_pinctrl_driver = { .driver = { @@ -333,8 +332,4 @@ static struct platform_driver imx21_pinctrl_driver = { .of_match_table = imx21_pinctrl_of_match, }, }; -module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe); - -MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); -MODULE_DESCRIPTION("Freescale i.MX21 pinctrl driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe); diff --git a/drivers/pinctrl/freescale/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c index 955cbf4f094f..89b4f160138f 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx23.c +++ b/drivers/pinctrl/freescale/pinctrl-imx23.c @@ -1,4 +1,7 @@ /* + * Freescale i.MX23 pinctrl driver + * + * Author: Shawn Guo <shawn.guo@linaro.org> * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public @@ -10,7 +13,6 @@ */ #include <linux/init.h> -#include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mxs.h" @@ -276,15 +278,14 @@ static const struct of_device_id imx23_pinctrl_of_match[] = { { .compatible = "fsl,imx23-pinctrl", }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, imx23_pinctrl_of_match); static struct platform_driver imx23_pinctrl_driver = { .driver = { .name = "imx23-pinctrl", + .suppress_bind_attrs = true, .of_match_table = imx23_pinctrl_of_match, }, .probe = imx23_pinctrl_probe, - .remove = mxs_pinctrl_remove, }; static int __init imx23_pinctrl_init(void) @@ -292,13 +293,3 @@ static int __init imx23_pinctrl_init(void) return platform_driver_register(&imx23_pinctrl_driver); } postcore_initcall(imx23_pinctrl_init); - -static void __exit imx23_pinctrl_exit(void) -{ - platform_driver_unregister(&imx23_pinctrl_driver); -} -module_exit(imx23_pinctrl_exit); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("Freescale i.MX23 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index 81ad546d74bb..d7367fabe712 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c @@ -18,7 +18,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -338,12 +337,3 @@ static int __init imx25_pinctrl_init(void) return platform_driver_register(&imx25_pinctrl_driver); } arch_initcall(imx25_pinctrl_init); - -static void __exit imx25_pinctrl_exit(void) -{ - platform_driver_unregister(&imx25_pinctrl_driver); -} -module_exit(imx25_pinctrl_exit); -MODULE_AUTHOR("Denis Carikli <denis@eukrea.com>"); -MODULE_DESCRIPTION("Freescale IMX25 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c index f828fbbba4b9..e5992036fc6c 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx27.c +++ b/drivers/pinctrl/freescale/pinctrl-imx27.c @@ -14,7 +14,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -412,12 +411,3 @@ static int __init imx27_pinctrl_init(void) return platform_driver_register(&imx27_pinctrl_driver); } arch_initcall(imx27_pinctrl_init); - -static void __exit imx27_pinctrl_exit(void) -{ - platform_driver_unregister(&imx27_pinctrl_driver); -} -module_exit(imx27_pinctrl_exit); -MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>"); -MODULE_DESCRIPTION("Freescale IMX27 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c index 5082efec4f72..295236dfb0bc 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx28.c +++ b/drivers/pinctrl/freescale/pinctrl-imx28.c @@ -1,4 +1,7 @@ /* + * Freescale i.MX28 pinctrl driver + * + * Author: Shawn Guo <shawn.guo@linaro.org> * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public @@ -10,7 +13,6 @@ */ #include <linux/init.h> -#include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mxs.h" @@ -392,15 +394,14 @@ static const struct of_device_id imx28_pinctrl_of_match[] = { { .compatible = "fsl,imx28-pinctrl", }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match); static struct platform_driver imx28_pinctrl_driver = { .driver = { .name = "imx28-pinctrl", + .suppress_bind_attrs = true, .of_match_table = imx28_pinctrl_of_match, }, .probe = imx28_pinctrl_probe, - .remove = mxs_pinctrl_remove, }; static int __init imx28_pinctrl_init(void) @@ -408,13 +409,3 @@ static int __init imx28_pinctrl_init(void) return platform_driver_register(&imx28_pinctrl_driver); } postcore_initcall(imx28_pinctrl_init); - -static void __exit imx28_pinctrl_exit(void) -{ - platform_driver_unregister(&imx28_pinctrl_driver); -} -module_exit(imx28_pinctrl_exit); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("Freescale i.MX28 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c index 13eb224a29a9..6315ba6af431 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx35.c +++ b/drivers/pinctrl/freescale/pinctrl-imx35.c @@ -16,7 +16,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -1028,12 +1027,3 @@ static int __init imx35_pinctrl_init(void) return platform_driver_register(&imx35_pinctrl_driver); } arch_initcall(imx35_pinctrl_init); - -static void __exit imx35_pinctrl_exit(void) -{ - platform_driver_unregister(&imx35_pinctrl_driver); -} -module_exit(imx35_pinctrl_exit); -MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>"); -MODULE_DESCRIPTION("Freescale IMX35 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c index 95a36c88b66a..8e3a17df5c5d 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx50.c +++ b/drivers/pinctrl/freescale/pinctrl-imx50.c @@ -14,7 +14,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -415,11 +414,3 @@ static int __init imx50_pinctrl_init(void) return platform_driver_register(&imx50_pinctrl_driver); } arch_initcall(imx50_pinctrl_init); - -static void __exit imx50_pinctrl_exit(void) -{ - platform_driver_unregister(&imx50_pinctrl_driver); -} -module_exit(imx50_pinctrl_exit); -MODULE_DESCRIPTION("Freescale IMX50 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c index 0863e5279896..eeac64ba2709 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx51.c +++ b/drivers/pinctrl/freescale/pinctrl-imx51.c @@ -15,7 +15,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -791,12 +790,3 @@ static int __init imx51_pinctrl_init(void) return platform_driver_register(&imx51_pinctrl_driver); } arch_initcall(imx51_pinctrl_init); - -static void __exit imx51_pinctrl_exit(void) -{ - platform_driver_unregister(&imx51_pinctrl_driver); -} -module_exit(imx51_pinctrl_exit); -MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>"); -MODULE_DESCRIPTION("Freescale IMX51 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c index 64c9cbe2a5df..46a9572f3473 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx53.c +++ b/drivers/pinctrl/freescale/pinctrl-imx53.c @@ -15,7 +15,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -478,12 +477,3 @@ static int __init imx53_pinctrl_init(void) return platform_driver_register(&imx53_pinctrl_driver); } arch_initcall(imx53_pinctrl_init); - -static void __exit imx53_pinctrl_exit(void) -{ - platform_driver_unregister(&imx53_pinctrl_driver); -} -module_exit(imx53_pinctrl_exit); -MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>"); -MODULE_DESCRIPTION("Freescale IMX53 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c index de17bac8ad89..3f25ca5867cc 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c @@ -1,4 +1,7 @@ /* + * Freescale imx6dl pinctrl driver + * + * Author: Shawn Guo <shawn.guo@linaro.org> * Copyright (C) 2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify @@ -9,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -484,13 +486,3 @@ static int __init imx6dl_pinctrl_init(void) return platform_driver_register(&imx6dl_pinctrl_driver); } arch_initcall(imx6dl_pinctrl_init); - -static void __exit imx6dl_pinctrl_exit(void) -{ - platform_driver_unregister(&imx6dl_pinctrl_driver); -} -module_exit(imx6dl_pinctrl_exit); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("Freescale imx6dl pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c index 55cd8a0e367d..d61651c40458 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx6q.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c @@ -15,7 +15,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -490,12 +489,3 @@ static int __init imx6q_pinctrl_init(void) return platform_driver_register(&imx6q_pinctrl_driver); } arch_initcall(imx6q_pinctrl_init); - -static void __exit imx6q_pinctrl_exit(void) -{ - platform_driver_unregister(&imx6q_pinctrl_driver); -} -module_exit(imx6q_pinctrl_exit); -MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>"); -MODULE_DESCRIPTION("Freescale IMX6Q pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c index bf455b8e73fc..d023f6b00623 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c @@ -1,4 +1,7 @@ /* + * Freescale imx6sl pinctrl driver + * + * Author: Shawn Guo <shawn.guo@linaro.org> * Copyright (C) 2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify @@ -9,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -371,7 +373,6 @@ static const struct of_device_id imx6sl_pinctrl_of_match[] = { { .compatible = "fsl,imx6sl-iomuxc", }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, imx6sl_pinctrl_of_match); static int imx6sl_pinctrl_probe(struct platform_device *pdev) { @@ -391,13 +392,3 @@ static int __init imx6sl_pinctrl_init(void) return platform_driver_register(&imx6sl_pinctrl_driver); } arch_initcall(imx6sl_pinctrl_init); - -static void __exit imx6sl_pinctrl_exit(void) -{ - platform_driver_unregister(&imx6sl_pinctrl_driver); -} -module_exit(imx6sl_pinctrl_exit); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("Freescale imx6sl pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c index 84118c388cc5..898b781701e6 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c @@ -1,4 +1,7 @@ /* + * Freescale imx6sx pinctrl driver + * + * Author: Anson Huang <Anson.Huang@freescale.com> * Copyright (C) 2014 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify @@ -9,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -394,13 +396,3 @@ static int __init imx6sx_pinctrl_init(void) return platform_driver_register(&imx6sx_pinctrl_driver); } arch_initcall(imx6sx_pinctrl_init); - -static void __exit imx6sx_pinctrl_exit(void) -{ - platform_driver_unregister(&imx6sx_pinctrl_driver); -} -module_exit(imx6sx_pinctrl_exit); - -MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>"); -MODULE_DESCRIPTION("Freescale imx6sx pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c index c707fdd933ec..1aeb840aae1d 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c @@ -1,4 +1,7 @@ /* + * Freescale imx6ul pinctrl driver + * + * Author: Anson Huang <Anson.Huang@freescale.com> * Copyright (C) 2015 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify @@ -9,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -310,13 +312,3 @@ static int __init imx6ul_pinctrl_init(void) return platform_driver_register(&imx6ul_pinctrl_driver); } arch_initcall(imx6ul_pinctrl_init); - -static void __exit imx6ul_pinctrl_exit(void) -{ - platform_driver_unregister(&imx6ul_pinctrl_driver); -} -module_exit(imx6ul_pinctrl_exit); - -MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>"); -MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c index d30d91f80dfd..a465a66c3ef4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx7d.c +++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c @@ -1,4 +1,7 @@ /* + * Freescale imx7d pinctrl driver + * + * Author: Anson Huang <Anson.Huang@freescale.com> * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify @@ -9,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -402,13 +404,3 @@ static int __init imx7d_pinctrl_init(void) return platform_driver_register(&imx7d_pinctrl_driver); } arch_initcall(imx7d_pinctrl_init); - -static void __exit imx7d_pinctrl_exit(void) -{ - platform_driver_unregister(&imx7d_pinctrl_driver); -} -module_exit(imx7d_pinctrl_exit); - -MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>"); -MODULE_DESCRIPTION("Freescale imx7d pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index 6bbda6b4ab50..41b5b07d5a2b 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c @@ -12,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pinctrl/machine.h> @@ -553,14 +552,3 @@ err: return ret; } EXPORT_SYMBOL_GPL(mxs_pinctrl_probe); - -int mxs_pinctrl_remove(struct platform_device *pdev) -{ - struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); - - pinctrl_unregister(d->pctl); - iounmap(d->base); - - return 0; -} -EXPORT_SYMBOL_GPL(mxs_pinctrl_remove); diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h index fdd88d0bae22..34dbf75208dc 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.h +++ b/drivers/pinctrl/freescale/pinctrl-mxs.h @@ -86,6 +86,5 @@ struct mxs_pinctrl_soc_data { int mxs_pinctrl_probe(struct platform_device *pdev, struct mxs_pinctrl_soc_data *soc); -int mxs_pinctrl_remove(struct platform_device *pdev); #endif /* __PINCTRL_MXS_H */ diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c index 6d81be096bc0..2b1e198e3092 100644 --- a/drivers/pinctrl/freescale/pinctrl-vf610.c +++ b/drivers/pinctrl/freescale/pinctrl-vf610.c @@ -12,7 +12,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -325,12 +324,3 @@ static int __init vf610_pinctrl_init(void) return platform_driver_register(&vf610_pinctrl_driver); } arch_initcall(vf610_pinctrl_init); - -static void __exit vf610_pinctrl_exit(void) -{ - platform_driver_unregister(&vf610_pinctrl_driver); -} -module_exit(vf610_pinctrl_exit); - -MODULE_DESCRIPTION("Freescale VF610 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index 1c74e038b7b0..00fb055a4897 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -29,6 +29,17 @@ config PINCTRL_CHERRYVIEW Cherryview/Braswell pinctrl driver provides an interface that allows configuring of SoC pins and using them as GPIOs. +config PINCTRL_MERRIFIELD + tristate "Intel Merrifield pinctrl driver" + depends on X86_INTEL_MID + select PINMUX + select PINCONF + select GENERIC_PINCONF + help + Merrifield Family-Level Interface Shim (FLIS) driver provides an + interface that allows configuring of SoC pins and using them as + GPIOs. + config PINCTRL_INTEL tristate select PINMUX diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile index 03bc68e3546c..30803078f09e 100644 --- a/drivers/pinctrl/intel/Makefile +++ b/drivers/pinctrl/intel/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o +obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 7abfd42e8ffd..d22a9fe2e6df 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -15,7 +15,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/bitops.h> @@ -1822,17 +1821,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return 0; } -static int byt_pinctrl_remove(struct platform_device *pdev) -{ - struct byt_gpio *vg = platform_get_drvdata(pdev); - - pm_runtime_disable(&pdev->dev); - gpiochip_remove(&vg->chip); - pinctrl_unregister(vg->pctl_dev); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int byt_gpio_suspend(struct device *dev) { @@ -1930,10 +1918,11 @@ static const struct dev_pm_ops byt_gpio_pm_ops = { static struct platform_driver byt_gpio_driver = { .probe = byt_pinctrl_probe, - .remove = byt_pinctrl_remove, .driver = { - .name = "byt_gpio", - .pm = &byt_gpio_pm_ops, + .name = "byt_gpio", + .pm = &byt_gpio_pm_ops, + .suppress_bind_attrs = true, + .acpi_match_table = ACPI_PTR(byt_gpio_acpi_match), }, }; @@ -1943,9 +1932,3 @@ static int __init byt_gpio_init(void) return platform_driver_register(&byt_gpio_driver); } subsys_initcall(byt_gpio_init); - -static void __exit byt_gpio_exit(void) -{ - platform_driver_unregister(&byt_gpio_driver); -} -module_exit(byt_gpio_exit); diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index 5979d38c46b2..59cb7a6fc5be 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c @@ -1,7 +1,7 @@ /* * Intel Broxton SoC pinctrl/GPIO driver * - * Copyright (C) 2015, Intel Corporation + * Copyright (C) 2015, 2016 Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify @@ -1003,29 +1003,46 @@ static const struct acpi_device_id bxt_pinctrl_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match); +static const struct platform_device_id bxt_pinctrl_platform_ids[] = { + { "apl-pinctrl", (kernel_ulong_t)&apl_pinctrl_soc_data }, + { "broxton-pinctrl", (kernel_ulong_t)&bxt_pinctrl_soc_data }, + { }, +}; + static int bxt_pinctrl_probe(struct platform_device *pdev) { const struct intel_pinctrl_soc_data *soc_data = NULL; const struct intel_pinctrl_soc_data **soc_table; - const struct acpi_device_id *id; struct acpi_device *adev; int i; adev = ACPI_COMPANION(&pdev->dev); - if (!adev) - return -ENODEV; + if (adev) { + const struct acpi_device_id *id; - id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev); - if (!id) - return -ENODEV; + id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev); + if (!id) + return -ENODEV; - soc_table = (const struct intel_pinctrl_soc_data **)id->driver_data; + soc_table = (const struct intel_pinctrl_soc_data **) + id->driver_data; - for (i = 0; soc_table[i]; i++) { - if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) { - soc_data = soc_table[i]; - break; + for (i = 0; soc_table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) { + soc_data = soc_table[i]; + break; + } } + } else { + const struct platform_device_id *pid; + + pid = platform_get_device_id(pdev); + if (!pid) + return -ENODEV; + + soc_table = (const struct intel_pinctrl_soc_data **) + pid->driver_data; + soc_data = soc_table[pdev->id]; } if (!soc_data) @@ -1047,6 +1064,7 @@ static struct platform_driver bxt_pinctrl_driver = { .acpi_match_table = bxt_pinctrl_acpi_match, .pm = &bxt_pinctrl_pm_ops, }, + .id_table = bxt_pinctrl_platform_ids, }; static int __init bxt_pinctrl_init(void) @@ -1064,3 +1082,4 @@ module_exit(bxt_pinctrl_exit); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:broxton-pinctrl"); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index ac4f564f1c3e..5749a4eee746 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -160,7 +160,6 @@ struct chv_pin_context { * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller * @regs: MMIO registers - * @lock: Lock to serialize register accesses * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO * offset (in GPIO number space) * @community: Community this pinctrl instance represents @@ -174,7 +173,6 @@ struct chv_pinctrl { struct pinctrl_dev *pctldev; struct gpio_chip chip; void __iomem *regs; - raw_spinlock_t lock; unsigned intr_lines[16]; const struct chv_community *community; u32 saved_intmask; @@ -657,6 +655,17 @@ static const struct chv_community *chv_communities[] = { &southeast_community, }; +/* + * Lock to serialize register accesses + * + * Due to a silicon issue, a shared lock must be used to prevent + * concurrent accesses across the 4 GPIO controllers. + * + * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005), + * errata #CHT34, for further information. + */ +static DEFINE_RAW_SPINLOCK(chv_lock); + static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset, unsigned reg) { @@ -718,13 +727,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, u32 ctrl0, ctrl1; bool locked; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1)); locked = chv_pad_locked(pctrl, offset); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); if (ctrl0 & CHV_PADCTRL0_GPIOEN) { seq_puts(s, "GPIO "); @@ -787,14 +796,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, grp = &pctrl->community->groups[group]; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); /* Check first that the pad is not locked */ for (i = 0; i < grp->npins; i++) { if (chv_pad_locked(pctrl, grp->pins[i])) { dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n", grp->pins[i]); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return -EBUSY; } } @@ -837,7 +846,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, pin, altfunc->mode, altfunc->invert_oe ? "" : "not "); } - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; } @@ -851,13 +860,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, void __iomem *reg; u32 value; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); if (chv_pad_locked(pctrl, offset)) { value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); if (!(value & CHV_PADCTRL0_GPIOEN)) { /* Locked so cannot enable */ - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return -EBUSY; } } else { @@ -897,7 +906,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, chv_writel(value, reg); } - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; } @@ -911,13 +920,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev, void __iomem *reg; u32 value; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); value = readl(reg) & ~CHV_PADCTRL0_GPIOEN; chv_writel(value, reg); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); } static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, @@ -929,7 +938,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, unsigned long flags; u32 ctrl0; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK; if (input) @@ -938,7 +947,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT; chv_writel(ctrl0, reg); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; } @@ -963,10 +972,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin, u16 arg = 0; u32 term; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT; @@ -1040,7 +1049,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, unsigned long flags; u32 ctrl0, pull; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(reg); switch (param) { @@ -1063,7 +1072,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; break; default: - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return -EINVAL; } @@ -1081,7 +1090,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; break; default: - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return -EINVAL; } @@ -1089,12 +1098,33 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, break; default: - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return -EINVAL; } chv_writel(ctrl0, reg); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; +} + +static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin, + bool enable) +{ + void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1); + unsigned long flags; + u32 ctrl1; + + raw_spin_lock_irqsave(&chv_lock, flags); + ctrl1 = readl(reg); + + if (enable) + ctrl1 |= CHV_PADCTRL1_ODEN; + else + ctrl1 &= ~CHV_PADCTRL1_ODEN; + + chv_writel(ctrl1, reg); + raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; } @@ -1123,6 +1153,18 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin, return ret; break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + ret = chv_config_set_oden(pctrl, pin, false); + if (ret) + return ret; + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + ret = chv_config_set_oden(pctrl, pin, true); + if (ret) + return ret; + break; + default: return -ENOTSUPP; } @@ -1134,10 +1176,52 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin, return 0; } +static int chv_config_group_get(struct pinctrl_dev *pctldev, + unsigned int group, + unsigned long *config) +{ + const unsigned int *pins; + unsigned int npins; + int ret; + + ret = chv_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + ret = chv_config_get(pctldev, pins[0], config); + if (ret) + return ret; + + return 0; +} + +static int chv_config_group_set(struct pinctrl_dev *pctldev, + unsigned int group, unsigned long *configs, + unsigned int num_configs) +{ + const unsigned int *pins; + unsigned int npins; + int i, ret; + + ret = chv_get_group_pins(pctldev, group, &pins, &npins); + if (ret) + return ret; + + for (i = 0; i < npins; i++) { + ret = chv_config_set(pctldev, pins[i], configs, num_configs); + if (ret) + return ret; + } + + return 0; +} + static const struct pinconf_ops chv_pinconf_ops = { .is_generic = true, .pin_config_set = chv_config_set, .pin_config_get = chv_config_get, + .pin_config_group_get = chv_config_group_get, + .pin_config_group_set = chv_config_group_set, }; static struct pinctrl_desc chv_pinctrl_desc = { @@ -1160,9 +1244,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset) unsigned long flags; u32 ctrl0, cfg; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT; @@ -1180,7 +1264,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) void __iomem *reg; u32 ctrl0; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); reg = chv_padreg(pctrl, pin, CHV_PADCTRL0); ctrl0 = readl(reg); @@ -1192,7 +1276,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) chv_writel(ctrl0, reg); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); } static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) @@ -1202,9 +1286,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) u32 ctrl0, direction; unsigned long flags; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT; @@ -1242,14 +1326,14 @@ static void chv_gpio_irq_ack(struct irq_data *d) int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d)); u32 intr_line; - raw_spin_lock(&pctrl->lock); + raw_spin_lock(&chv_lock); intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); intr_line &= CHV_PADCTRL0_INTSEL_MASK; intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT); - raw_spin_unlock(&pctrl->lock); + raw_spin_unlock(&chv_lock); } static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) @@ -1260,7 +1344,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) u32 value, intr_line; unsigned long flags; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); intr_line &= CHV_PADCTRL0_INTSEL_MASK; @@ -1273,7 +1357,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) value |= BIT(intr_line); chv_writel(value, pctrl->regs + CHV_INTMASK); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); } static void chv_gpio_irq_mask(struct irq_data *d) @@ -1307,7 +1391,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) unsigned long flags; u32 intsel, value; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; @@ -1322,7 +1406,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) irq_set_handler_locked(d, handler); pctrl->intr_lines[intsel] = offset; } - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); } chv_gpio_irq_unmask(d); @@ -1338,7 +1422,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) unsigned long flags; u32 value; - raw_spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&chv_lock, flags); /* * Pins which can be used as shared interrupt are configured in @@ -1387,7 +1471,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; } @@ -1499,7 +1583,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev) if (i == ARRAY_SIZE(chv_communities)) return -ENODEV; - raw_spin_lock_init(&pctrl->lock); pctrl->dev = &pdev->dev; #ifdef CONFIG_PM_SLEEP diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 3584e50fa2c6..257cab129692 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -89,7 +89,7 @@ struct intel_pinctrl_context { */ struct intel_pinctrl { struct device *dev; - spinlock_t lock; + raw_spinlock_t lock; struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; @@ -318,7 +318,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned long flags; int i; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); /* * All pins in the groups needs to be accessible and writable @@ -326,7 +326,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, */ for (i = 0; i < grp->npins; i++) { if (!intel_pad_usable(pctrl, grp->pins[i])) { - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return -EBUSY; } } @@ -345,7 +345,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, writel(value, padcfg0); } - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -359,10 +359,10 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, unsigned long flags; u32 value; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); if (!intel_pad_usable(pctrl, pin)) { - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return -EBUSY; } @@ -377,7 +377,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -391,7 +391,7 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, unsigned long flags; u32 value; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); @@ -402,7 +402,7 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, value &= ~PADCFG0_GPIOTXDIS; writel(value, padcfg0); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -490,7 +490,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin, int ret = 0; u32 value; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1); value = readl(padcfg1); @@ -544,7 +544,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin, if (!ret) writel(value, padcfg1); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return ret; } @@ -611,14 +611,14 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) unsigned long flags; u32 padcfg0; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = readl(reg); if (value) padcfg0 |= PADCFG0_GPIOTXSTATE; else padcfg0 &= ~PADCFG0_GPIOTXSTATE; writel(padcfg0, reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } } @@ -651,7 +651,7 @@ static void intel_gpio_irq_ack(struct irq_data *d) const struct intel_community *community; unsigned pin = irqd_to_hwirq(d); - spin_lock(&pctrl->lock); + raw_spin_lock(&pctrl->lock); community = intel_get_community(pctrl, pin); if (community) { @@ -662,7 +662,7 @@ static void intel_gpio_irq_ack(struct irq_data *d) writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); } - spin_unlock(&pctrl->lock); + raw_spin_unlock(&pctrl->lock); } static void intel_gpio_irq_enable(struct irq_data *d) @@ -673,7 +673,7 @@ static void intel_gpio_irq_enable(struct irq_data *d) unsigned pin = irqd_to_hwirq(d); unsigned long flags; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); community = intel_get_community(pctrl, pin); if (community) { @@ -691,7 +691,7 @@ static void intel_gpio_irq_enable(struct irq_data *d) writel(value, community->regs + community->ie_offset + gpp * 4); } - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) @@ -702,7 +702,7 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) unsigned pin = irqd_to_hwirq(d); unsigned long flags; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); community = intel_get_community(pctrl, pin); if (community) { @@ -721,7 +721,7 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) writel(value, reg); } - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void intel_gpio_irq_mask(struct irq_data *d) @@ -757,7 +757,7 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) return -EPERM; } - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); value = readl(reg); @@ -784,7 +784,7 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -796,12 +796,15 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) const struct intel_community *community; unsigned pin = irqd_to_hwirq(d); unsigned padno, gpp, gpp_offset; + unsigned long flags; u32 gpe_en; community = intel_get_community(pctrl, pin); if (!community) return -EINVAL; + raw_spin_lock_irqsave(&pctrl->lock, flags); + padno = pin_to_padno(community, pin); gpp = padno / community->gpp_size; gpp_offset = padno % community->gpp_size; @@ -821,6 +824,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) gpe_en &= ~BIT(gpp_offset); writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin); return 0; } @@ -919,7 +924,8 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) * to the irq directly) because on some platforms several GPIO * controllers share the same interrupt line. */ - ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq, IRQF_SHARED, + ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq, + IRQF_SHARED | IRQF_NO_THREAD, dev_name(pctrl->dev), pctrl); if (ret) { dev_err(pctrl->dev, "failed to request interrupt\n"); @@ -995,7 +1001,7 @@ int intel_pinctrl_probe(struct platform_device *pdev, pctrl->dev = &pdev->dev; pctrl->soc = soc_data; - spin_lock_init(&pctrl->lock); + raw_spin_lock_init(&pctrl->lock); /* * Make a copy of the communities which we can use to hold pointers diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c new file mode 100644 index 000000000000..eb4990ff26ca --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -0,0 +1,911 @@ +/* + * Intel Merrifield SoC pinctrl driver + * + * Copyright (C) 2016, Intel Corporation + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "pinctrl-intel.h" + +#define MRFLD_FAMILY_NR 64 +#define MRFLD_FAMILY_LEN 0x400 + +#define SLEW_OFFSET 0x000 +#define BUFCFG_OFFSET 0x100 +#define MISC_OFFSET 0x300 + +#define BUFCFG_PINMODE_SHIFT 0 +#define BUFCFG_PINMODE_MASK GENMASK(2, 0) +#define BUFCFG_PINMODE_GPIO 0 +#define BUFCFG_PUPD_VAL_SHIFT 4 +#define BUFCFG_PUPD_VAL_MASK GENMASK(5, 4) +#define BUFCFG_PUPD_VAL_2K 0 +#define BUFCFG_PUPD_VAL_20K 1 +#define BUFCFG_PUPD_VAL_50K 2 +#define BUFCFG_PUPD_VAL_910 3 +#define BUFCFG_PU_EN BIT(8) +#define BUFCFG_PD_EN BIT(9) +#define BUFCFG_Px_EN_MASK GENMASK(9, 8) +#define BUFCFG_SLEWSEL BIT(10) +#define BUFCFG_OVINEN BIT(12) +#define BUFCFG_OVINEN_EN BIT(13) +#define BUFCFG_OVINEN_MASK GENMASK(13, 12) +#define BUFCFG_OVOUTEN BIT(14) +#define BUFCFG_OVOUTEN_EN BIT(15) +#define BUFCFG_OVOUTEN_MASK GENMASK(15, 14) +#define BUFCFG_INDATAOV_VAL BIT(16) +#define BUFCFG_INDATAOV_EN BIT(17) +#define BUFCFG_INDATAOV_MASK GENMASK(17, 16) +#define BUFCFG_OUTDATAOV_VAL BIT(18) +#define BUFCFG_OUTDATAOV_EN BIT(19) +#define BUFCFG_OUTDATAOV_MASK GENMASK(19, 18) +#define BUFCFG_OD_EN BIT(21) + +/** + * struct mrfld_family - Intel pin family description + * @barno: MMIO BAR number where registers for this family reside + * @pin_base: Starting pin of pins in this family + * @npins: Number of pins in this family + * @protected: True if family is protected by access + * @regs: family specific common registers + */ +struct mrfld_family { + unsigned int barno; + unsigned int pin_base; + size_t npins; + bool protected; + void __iomem *regs; +}; + +#define MRFLD_FAMILY(b, s, e) \ + { \ + .barno = (b), \ + .pin_base = (s), \ + .npins = (e) - (s) + 1, \ + } + +#define MRFLD_FAMILY_PROTECTED(b, s, e) \ + { \ + .barno = (b), \ + .pin_base = (s), \ + .npins = (e) - (s) + 1, \ + .protected = true, \ + } + +static const struct pinctrl_pin_desc mrfld_pins[] = { + /* Family 0: OCP2SSC (0 pins) */ + /* Family 1: ULPI (13 pins) */ + PINCTRL_PIN(0, "ULPI_CLK"), + PINCTRL_PIN(1, "ULPI_D0"), + PINCTRL_PIN(2, "ULPI_D1"), + PINCTRL_PIN(3, "ULPI_D2"), + PINCTRL_PIN(4, "ULPI_D3"), + PINCTRL_PIN(5, "ULPI_D4"), + PINCTRL_PIN(6, "ULPI_D5"), + PINCTRL_PIN(7, "ULPI_D6"), + PINCTRL_PIN(8, "ULPI_D7"), + PINCTRL_PIN(9, "ULPI_DIR"), + PINCTRL_PIN(10, "ULPI_NXT"), + PINCTRL_PIN(11, "ULPI_REFCLK"), + PINCTRL_PIN(12, "ULPI_STP"), + /* Family 2: eMMC (24 pins) */ + PINCTRL_PIN(13, "EMMC_CLK"), + PINCTRL_PIN(14, "EMMC_CMD"), + PINCTRL_PIN(15, "EMMC_D0"), + PINCTRL_PIN(16, "EMMC_D1"), + PINCTRL_PIN(17, "EMMC_D2"), + PINCTRL_PIN(18, "EMMC_D3"), + PINCTRL_PIN(19, "EMMC_D4"), + PINCTRL_PIN(20, "EMMC_D5"), + PINCTRL_PIN(21, "EMMC_D6"), + PINCTRL_PIN(22, "EMMC_D7"), + PINCTRL_PIN(23, "EMMC_RST_N"), + PINCTRL_PIN(24, "GP154"), + PINCTRL_PIN(25, "GP155"), + PINCTRL_PIN(26, "GP156"), + PINCTRL_PIN(27, "GP157"), + PINCTRL_PIN(28, "GP158"), + PINCTRL_PIN(29, "GP159"), + PINCTRL_PIN(30, "GP160"), + PINCTRL_PIN(31, "GP161"), + PINCTRL_PIN(32, "GP162"), + PINCTRL_PIN(33, "GP163"), + PINCTRL_PIN(34, "GP97"), + PINCTRL_PIN(35, "GP14"), + PINCTRL_PIN(36, "GP15"), + /* Family 3: SDIO (20 pins) */ + PINCTRL_PIN(37, "GP77_SD_CD"), + PINCTRL_PIN(38, "GP78_SD_CLK"), + PINCTRL_PIN(39, "GP79_SD_CMD"), + PINCTRL_PIN(40, "GP80_SD_D0"), + PINCTRL_PIN(41, "GP81_SD_D1"), + PINCTRL_PIN(42, "GP82_SD_D2"), + PINCTRL_PIN(43, "GP83_SD_D3"), + PINCTRL_PIN(44, "GP84_SD_LS_CLK_FB"), + PINCTRL_PIN(45, "GP85_SD_LS_CMD_DIR"), + PINCTRL_PIN(46, "GP86_SD_LVL_D_DIR"), + PINCTRL_PIN(47, "GP88_SD_LS_SEL"), + PINCTRL_PIN(48, "GP87_SD_PD"), + PINCTRL_PIN(49, "GP89_SD_WP"), + PINCTRL_PIN(50, "GP90_SDIO_CLK"), + PINCTRL_PIN(51, "GP91_SDIO_CMD"), + PINCTRL_PIN(52, "GP92_SDIO_D0"), + PINCTRL_PIN(53, "GP93_SDIO_D1"), + PINCTRL_PIN(54, "GP94_SDIO_D2"), + PINCTRL_PIN(55, "GP95_SDIO_D3"), + PINCTRL_PIN(56, "GP96_SDIO_PD"), + /* Family 4: HSI (8 pins) */ + PINCTRL_PIN(57, "HSI_ACDATA"), + PINCTRL_PIN(58, "HSI_ACFLAG"), + PINCTRL_PIN(59, "HSI_ACREADY"), + PINCTRL_PIN(60, "HSI_ACWAKE"), + PINCTRL_PIN(61, "HSI_CADATA"), + PINCTRL_PIN(62, "HSI_CAFLAG"), + PINCTRL_PIN(63, "HSI_CAREADY"), + PINCTRL_PIN(64, "HSI_CAWAKE"), + /* Family 5: SSP Audio (14 pins) */ + PINCTRL_PIN(65, "GP70"), + PINCTRL_PIN(66, "GP71"), + PINCTRL_PIN(67, "GP32_I2S_0_CLK"), + PINCTRL_PIN(68, "GP33_I2S_0_FS"), + PINCTRL_PIN(69, "GP34_I2S_0_RXD"), + PINCTRL_PIN(70, "GP35_I2S_0_TXD"), + PINCTRL_PIN(71, "GP36_I2S_1_CLK"), + PINCTRL_PIN(72, "GP37_I2S_1_FS"), + PINCTRL_PIN(73, "GP38_I2S_1_RXD"), + PINCTRL_PIN(74, "GP39_I2S_1_TXD"), + PINCTRL_PIN(75, "GP40_I2S_2_CLK"), + PINCTRL_PIN(76, "GP41_I2S_2_FS"), + PINCTRL_PIN(77, "GP42_I2S_2_RXD"), + PINCTRL_PIN(78, "GP43_I2S_2_TXD"), + /* Family 6: GP SSP (22 pins) */ + PINCTRL_PIN(79, "GP120_SPI_3_CLK"), + PINCTRL_PIN(80, "GP121_SPI_3_SS"), + PINCTRL_PIN(81, "GP122_SPI_3_RXD"), + PINCTRL_PIN(82, "GP123_SPI_3_TXD"), + PINCTRL_PIN(83, "GP102_SPI_4_CLK"), + PINCTRL_PIN(84, "GP103_SPI_4_SS_0"), + PINCTRL_PIN(85, "GP104_SPI_4_SS_1"), + PINCTRL_PIN(86, "GP105_SPI_4_SS_2"), + PINCTRL_PIN(87, "GP106_SPI_4_SS_3"), + PINCTRL_PIN(88, "GP107_SPI_4_RXD"), + PINCTRL_PIN(89, "GP108_SPI_4_TXD"), + PINCTRL_PIN(90, "GP109_SPI_5_CLK"), + PINCTRL_PIN(91, "GP110_SPI_5_SS_0"), + PINCTRL_PIN(92, "GP111_SPI_5_SS_1"), + PINCTRL_PIN(93, "GP112_SPI_5_SS_2"), + PINCTRL_PIN(94, "GP113_SPI_5_SS_3"), + PINCTRL_PIN(95, "GP114_SPI_5_RXD"), + PINCTRL_PIN(96, "GP115_SPI_5_TXD"), + PINCTRL_PIN(97, "GP116_SPI_6_CLK"), + PINCTRL_PIN(98, "GP117_SPI_6_SS"), + PINCTRL_PIN(99, "GP118_SPI_6_RXD"), + PINCTRL_PIN(100, "GP119_SPI_6_TXD"), + /* Family 7: I2C (14 pins) */ + PINCTRL_PIN(101, "GP19_I2C_1_SCL"), + PINCTRL_PIN(102, "GP20_I2C_1_SDA"), + PINCTRL_PIN(103, "GP21_I2C_2_SCL"), + PINCTRL_PIN(104, "GP22_I2C_2_SDA"), + PINCTRL_PIN(105, "GP17_I2C_3_SCL_HDMI"), + PINCTRL_PIN(106, "GP18_I2C_3_SDA_HDMI"), + PINCTRL_PIN(107, "GP23_I2C_4_SCL"), + PINCTRL_PIN(108, "GP24_I2C_4_SDA"), + PINCTRL_PIN(109, "GP25_I2C_5_SCL"), + PINCTRL_PIN(110, "GP26_I2C_5_SDA"), + PINCTRL_PIN(111, "GP27_I2C_6_SCL"), + PINCTRL_PIN(112, "GP28_I2C_6_SDA"), + PINCTRL_PIN(113, "GP29_I2C_7_SCL"), + PINCTRL_PIN(114, "GP30_I2C_7_SDA"), + /* Family 8: UART (12 pins) */ + PINCTRL_PIN(115, "GP124_UART_0_CTS"), + PINCTRL_PIN(116, "GP125_UART_0_RTS"), + PINCTRL_PIN(117, "GP126_UART_0_RX"), + PINCTRL_PIN(118, "GP127_UART_0_TX"), + PINCTRL_PIN(119, "GP128_UART_1_CTS"), + PINCTRL_PIN(120, "GP129_UART_1_RTS"), + PINCTRL_PIN(121, "GP130_UART_1_RX"), + PINCTRL_PIN(122, "GP131_UART_1_TX"), + PINCTRL_PIN(123, "GP132_UART_2_CTS"), + PINCTRL_PIN(124, "GP133_UART_2_RTS"), + PINCTRL_PIN(125, "GP134_UART_2_RX"), + PINCTRL_PIN(126, "GP135_UART_2_TX"), + /* Family 9: GPIO South (19 pins) */ + PINCTRL_PIN(127, "GP177"), + PINCTRL_PIN(128, "GP178"), + PINCTRL_PIN(129, "GP179"), + PINCTRL_PIN(130, "GP180"), + PINCTRL_PIN(131, "GP181"), + PINCTRL_PIN(132, "GP182_PWM2"), + PINCTRL_PIN(133, "GP183_PWM3"), + PINCTRL_PIN(134, "GP184"), + PINCTRL_PIN(135, "GP185"), + PINCTRL_PIN(136, "GP186"), + PINCTRL_PIN(137, "GP187"), + PINCTRL_PIN(138, "GP188"), + PINCTRL_PIN(139, "GP189"), + PINCTRL_PIN(140, "GP64_FAST_INT0"), + PINCTRL_PIN(141, "GP65_FAST_INT1"), + PINCTRL_PIN(142, "GP66_FAST_INT2"), + PINCTRL_PIN(143, "GP67_FAST_INT3"), + PINCTRL_PIN(144, "GP12_PWM0"), + PINCTRL_PIN(145, "GP13_PWM1"), + /* Family 10: Camera Sideband (12 pins) */ + PINCTRL_PIN(146, "GP0"), + PINCTRL_PIN(147, "GP1"), + PINCTRL_PIN(148, "GP2"), + PINCTRL_PIN(149, "GP3"), + PINCTRL_PIN(150, "GP4"), + PINCTRL_PIN(151, "GP5"), + PINCTRL_PIN(152, "GP6"), + PINCTRL_PIN(153, "GP7"), + PINCTRL_PIN(154, "GP8"), + PINCTRL_PIN(155, "GP9"), + PINCTRL_PIN(156, "GP10"), + PINCTRL_PIN(157, "GP11"), + /* Family 11: Clock (22 pins) */ + PINCTRL_PIN(158, "GP137"), + PINCTRL_PIN(159, "GP138"), + PINCTRL_PIN(160, "GP139"), + PINCTRL_PIN(161, "GP140"), + PINCTRL_PIN(162, "GP141"), + PINCTRL_PIN(163, "GP142"), + PINCTRL_PIN(164, "GP16_HDMI_HPD"), + PINCTRL_PIN(165, "GP68_DSI_A_TE"), + PINCTRL_PIN(166, "GP69_DSI_C_TE"), + PINCTRL_PIN(167, "OSC_CLK_CTRL0"), + PINCTRL_PIN(168, "OSC_CLK_CTRL1"), + PINCTRL_PIN(169, "OSC_CLK0"), + PINCTRL_PIN(170, "OSC_CLK1"), + PINCTRL_PIN(171, "OSC_CLK2"), + PINCTRL_PIN(172, "OSC_CLK3"), + PINCTRL_PIN(173, "OSC_CLK4"), + PINCTRL_PIN(174, "RESETOUT"), + PINCTRL_PIN(175, "PMODE"), + PINCTRL_PIN(176, "PRDY"), + PINCTRL_PIN(177, "PREQ"), + PINCTRL_PIN(178, "GP190"), + PINCTRL_PIN(179, "GP191"), + /* Family 12: MSIC (15 pins) */ + PINCTRL_PIN(180, "I2C_0_SCL"), + PINCTRL_PIN(181, "I2C_0_SDA"), + PINCTRL_PIN(182, "IERR"), + PINCTRL_PIN(183, "JTAG_TCK"), + PINCTRL_PIN(184, "JTAG_TDI"), + PINCTRL_PIN(185, "JTAG_TDO"), + PINCTRL_PIN(186, "JTAG_TMS"), + PINCTRL_PIN(187, "JTAG_TRST"), + PINCTRL_PIN(188, "PROCHOT"), + PINCTRL_PIN(189, "RTC_CLK"), + PINCTRL_PIN(190, "SVID_ALERT"), + PINCTRL_PIN(191, "SVID_CLK"), + PINCTRL_PIN(192, "SVID_D"), + PINCTRL_PIN(193, "THERMTRIP"), + PINCTRL_PIN(194, "STANDBY"), + /* Family 13: Keyboard (20 pins) */ + PINCTRL_PIN(195, "GP44"), + PINCTRL_PIN(196, "GP45"), + PINCTRL_PIN(197, "GP46"), + PINCTRL_PIN(198, "GP47"), + PINCTRL_PIN(199, "GP48"), + PINCTRL_PIN(200, "GP49"), + PINCTRL_PIN(201, "GP50"), + PINCTRL_PIN(202, "GP51"), + PINCTRL_PIN(203, "GP52"), + PINCTRL_PIN(204, "GP53"), + PINCTRL_PIN(205, "GP54"), + PINCTRL_PIN(206, "GP55"), + PINCTRL_PIN(207, "GP56"), + PINCTRL_PIN(208, "GP57"), + PINCTRL_PIN(209, "GP58"), + PINCTRL_PIN(210, "GP59"), + PINCTRL_PIN(211, "GP60"), + PINCTRL_PIN(212, "GP61"), + PINCTRL_PIN(213, "GP62"), + PINCTRL_PIN(214, "GP63"), + /* Family 14: GPIO North (13 pins) */ + PINCTRL_PIN(215, "GP164"), + PINCTRL_PIN(216, "GP165"), + PINCTRL_PIN(217, "GP166"), + PINCTRL_PIN(218, "GP167"), + PINCTRL_PIN(219, "GP168_MJTAG_TCK"), + PINCTRL_PIN(220, "GP169_MJTAG_TDI"), + PINCTRL_PIN(221, "GP170_MJTAG_TDO"), + PINCTRL_PIN(222, "GP171_MJTAG_TMS"), + PINCTRL_PIN(223, "GP172_MJTAG_TRST"), + PINCTRL_PIN(224, "GP173"), + PINCTRL_PIN(225, "GP174"), + PINCTRL_PIN(226, "GP175"), + PINCTRL_PIN(227, "GP176"), + /* Family 15: PTI (5 pins) */ + PINCTRL_PIN(228, "GP72_PTI_CLK"), + PINCTRL_PIN(229, "GP73_PTI_D0"), + PINCTRL_PIN(230, "GP74_PTI_D1"), + PINCTRL_PIN(231, "GP75_PTI_D2"), + PINCTRL_PIN(232, "GP76_PTI_D3"), + /* Family 16: USB3 (0 pins) */ + /* Family 17: HSIC (0 pins) */ + /* Family 18: Broadcast (0 pins) */ +}; + +static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; +static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; +static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; +static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; +static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; +static const unsigned int mrfld_pwm0_pins[] = { 144 }; +static const unsigned int mrfld_pwm1_pins[] = { 145 }; +static const unsigned int mrfld_pwm2_pins[] = { 132 }; +static const unsigned int mrfld_pwm3_pins[] = { 133 }; + +static const struct intel_pingroup mrfld_groups[] = { + PIN_GROUP("sdio_grp", mrfld_sdio_pins, 1), + PIN_GROUP("spi5_grp", mrfld_spi5_pins, 1), + PIN_GROUP("uart0_grp", mrfld_uart0_pins, 1), + PIN_GROUP("uart1_grp", mrfld_uart1_pins, 1), + PIN_GROUP("uart2_grp", mrfld_uart2_pins, 1), + PIN_GROUP("pwm0_grp", mrfld_pwm0_pins, 1), + PIN_GROUP("pwm1_grp", mrfld_pwm1_pins, 1), + PIN_GROUP("pwm2_grp", mrfld_pwm2_pins, 1), + PIN_GROUP("pwm3_grp", mrfld_pwm3_pins, 1), +}; + +static const char * const mrfld_sdio_groups[] = { "sdio_grp" }; +static const char * const mrfld_spi5_groups[] = { "spi5_grp" }; +static const char * const mrfld_uart0_groups[] = { "uart0_grp" }; +static const char * const mrfld_uart1_groups[] = { "uart1_grp" }; +static const char * const mrfld_uart2_groups[] = { "uart2_grp" }; +static const char * const mrfld_pwm0_groups[] = { "pwm0_grp" }; +static const char * const mrfld_pwm1_groups[] = { "pwm1_grp" }; +static const char * const mrfld_pwm2_groups[] = { "pwm2_grp" }; +static const char * const mrfld_pwm3_groups[] = { "pwm3_grp" }; + +static const struct intel_function mrfld_functions[] = { + FUNCTION("sdio", mrfld_sdio_groups), + FUNCTION("spi5", mrfld_spi5_groups), + FUNCTION("uart0", mrfld_uart0_groups), + FUNCTION("uart1", mrfld_uart1_groups), + FUNCTION("uart2", mrfld_uart2_groups), + FUNCTION("pwm0", mrfld_pwm0_groups), + FUNCTION("pwm1", mrfld_pwm1_groups), + FUNCTION("pwm2", mrfld_pwm2_groups), + FUNCTION("pwm3", mrfld_pwm3_groups), +}; + +static const struct mrfld_family mrfld_families[] = { + MRFLD_FAMILY(1, 0, 12), + MRFLD_FAMILY(2, 13, 36), + MRFLD_FAMILY(3, 37, 56), + MRFLD_FAMILY(4, 57, 64), + MRFLD_FAMILY(5, 65, 78), + MRFLD_FAMILY(6, 79, 100), + MRFLD_FAMILY_PROTECTED(7, 101, 114), + MRFLD_FAMILY(8, 115, 126), + MRFLD_FAMILY(9, 127, 145), + MRFLD_FAMILY(10, 146, 157), + MRFLD_FAMILY(11, 158, 179), + MRFLD_FAMILY_PROTECTED(12, 180, 194), + MRFLD_FAMILY(13, 195, 214), + MRFLD_FAMILY(14, 215, 227), + MRFLD_FAMILY(15, 228, 232), +}; + +/** + * struct mrfld_pinctrl - Intel Merrifield pinctrl private structure + * @dev: Pointer to the device structure + * @lock: Lock to serialize register access + * @pctldesc: Pin controller description + * @pctldev: Pointer to the pin controller device + * @families: Array of families this pinctrl handles + * @nfamilies: Number of families in the array + * @functions: Array of functions + * @nfunctions: Number of functions in the array + * @groups: Array of pin groups + * @ngroups: Number of groups in the array + * @pins: Array of pins this pinctrl controls + * @npins: Number of pins in the array + */ +struct mrfld_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + + /* Pin controller configuration */ + const struct mrfld_family *families; + size_t nfamilies; + const struct intel_function *functions; + size_t nfunctions; + const struct intel_pingroup *groups; + size_t ngroups; + const struct pinctrl_pin_desc *pins; + size_t npins; +}; + +#define pin_to_bufno(f, p) ((p) - (f)->pin_base) + +static const struct mrfld_family *mrfld_get_family(struct mrfld_pinctrl *mp, + unsigned int pin) +{ + const struct mrfld_family *family; + unsigned int i; + + for (i = 0; i < mp->nfamilies; i++) { + family = &mp->families[i]; + if (pin >= family->pin_base && + pin < family->pin_base + family->npins) + return family; + } + + dev_warn(mp->dev, "failed to find family for pin %u\n", pin); + return NULL; +} + +static bool mrfld_buf_available(struct mrfld_pinctrl *mp, unsigned int pin) +{ + const struct mrfld_family *family; + + family = mrfld_get_family(mp, pin); + if (!family) + return false; + + return !family->protected; +} + +static void __iomem *mrfld_get_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin) +{ + const struct mrfld_family *family; + unsigned int bufno; + + family = mrfld_get_family(mp, pin); + if (!family) + return NULL; + + bufno = pin_to_bufno(family, pin); + return family->regs + BUFCFG_OFFSET + bufno * 4; +} + +static int mrfld_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + return mp->ngroups; +} + +static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + return mp->groups[group].name; +} + +static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + *pins = mp->groups[group].pins; + *npins = mp->groups[group].npins; + return 0; +} + +static void mrfld_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, + unsigned int pin) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + void __iomem *bufcfg; + u32 value, mode; + + if (!mrfld_buf_available(mp, pin)) { + seq_puts(s, "not available"); + return; + } + + bufcfg = mrfld_get_bufcfg(mp, pin); + value = readl(bufcfg); + + mode = (value & BUFCFG_PINMODE_MASK) >> BUFCFG_PINMODE_SHIFT; + if (!mode) + seq_puts(s, "GPIO "); + else + seq_printf(s, "mode %d ", mode); + + seq_printf(s, "0x%08x", value); +} + +static const struct pinctrl_ops mrfld_pinctrl_ops = { + .get_groups_count = mrfld_get_groups_count, + .get_group_name = mrfld_get_group_name, + .get_group_pins = mrfld_get_group_pins, + .pin_dbg_show = mrfld_pin_dbg_show, +}; + +static int mrfld_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + return mp->nfunctions; +} + +static const char *mrfld_get_function_name(struct pinctrl_dev *pctldev, + unsigned int function) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + return mp->functions[function].name; +} + +static int mrfld_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int function, + const char * const **groups, + unsigned int * const ngroups) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + + *groups = mp->functions[function].groups; + *ngroups = mp->functions[function].ngroups; + return 0; +} + +static void mrfld_update_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin, + u32 bits, u32 mask) +{ + void __iomem *bufcfg; + u32 value; + + bufcfg = mrfld_get_bufcfg(mp, pin); + value = readl(bufcfg); + + value &= ~mask; + value |= bits & mask; + + writel(value, bufcfg); +} + +static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned int function, + unsigned int group) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pingroup *grp = &mp->groups[group]; + u32 bits = grp->mode << BUFCFG_PINMODE_SHIFT; + u32 mask = BUFCFG_PINMODE_MASK; + unsigned long flags; + unsigned int i; + + /* + * All pins in the groups needs to be accessible and writable + * before we can enable the mux for this group. + */ + for (i = 0; i < grp->npins; i++) { + if (!mrfld_buf_available(mp, grp->pins[i])) + return -EBUSY; + } + + /* Now enable the mux setting for each pin in the group */ + raw_spin_lock_irqsave(&mp->lock, flags); + for (i = 0; i < grp->npins; i++) + mrfld_update_bufcfg(mp, grp->pins[i], bits, mask); + raw_spin_unlock_irqrestore(&mp->lock, flags); + + return 0; +} + +static int mrfld_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + u32 bits = BUFCFG_PINMODE_GPIO << BUFCFG_PINMODE_SHIFT; + u32 mask = BUFCFG_PINMODE_MASK; + unsigned long flags; + + if (!mrfld_buf_available(mp, pin)) + return -EBUSY; + + raw_spin_lock_irqsave(&mp->lock, flags); + mrfld_update_bufcfg(mp, pin, bits, mask); + raw_spin_unlock_irqrestore(&mp->lock, flags); + + return 0; +} + +static const struct pinmux_ops mrfld_pinmux_ops = { + .get_functions_count = mrfld_get_functions_count, + .get_function_name = mrfld_get_function_name, + .get_function_groups = mrfld_get_function_groups, + .set_mux = mrfld_pinmux_set_mux, + .gpio_request_enable = mrfld_gpio_request_enable, +}; + +static int mrfld_config_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 value, term; + u16 arg = 0; + + if (!mrfld_buf_available(mp, pin)) + return -ENOTSUPP; + + value = readl(mrfld_get_bufcfg(mp, pin)); + term = (value & BUFCFG_PUPD_VAL_MASK) >> BUFCFG_PUPD_VAL_SHIFT; + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + if (value & BUFCFG_Px_EN_MASK) + return -EINVAL; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + if ((value & BUFCFG_Px_EN_MASK) != BUFCFG_PU_EN) + return -EINVAL; + + switch (term) { + case BUFCFG_PUPD_VAL_910: + arg = 910; + break; + case BUFCFG_PUPD_VAL_2K: + arg = 2000; + break; + case BUFCFG_PUPD_VAL_20K: + arg = 20000; + break; + case BUFCFG_PUPD_VAL_50K: + arg = 50000; + break; + } + + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + if ((value & BUFCFG_Px_EN_MASK) != BUFCFG_PD_EN) + return -EINVAL; + + switch (term) { + case BUFCFG_PUPD_VAL_910: + arg = 910; + break; + case BUFCFG_PUPD_VAL_2K: + arg = 2000; + break; + case BUFCFG_PUPD_VAL_20K: + arg = 20000; + break; + case BUFCFG_PUPD_VAL_50K: + arg = 50000; + break; + } + + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (!(value & BUFCFG_OD_EN)) + return -EINVAL; + break; + + case PIN_CONFIG_SLEW_RATE: + if (!(value & BUFCFG_SLEWSEL)) + arg = 0; + else + arg = 1; + break; + + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + return 0; +} + +static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, + unsigned long config) +{ + unsigned int param = pinconf_to_config_param(config); + unsigned int arg = pinconf_to_config_argument(config); + u32 bits = 0, mask = 0; + unsigned long flags; + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PU_EN; + + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; + break; + case 20000: + bits |= BUFCFG_PUPD_VAL_20K << BUFCFG_PUPD_VAL_SHIFT; + break; + case 2000: + bits |= BUFCFG_PUPD_VAL_2K << BUFCFG_PUPD_VAL_SHIFT; + break; + default: + return -EINVAL; + } + + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PD_EN; + + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; + break; + case 20000: + bits |= BUFCFG_PUPD_VAL_20K << BUFCFG_PUPD_VAL_SHIFT; + break; + case 2000: + bits |= BUFCFG_PUPD_VAL_2K << BUFCFG_PUPD_VAL_SHIFT; + break; + default: + return -EINVAL; + } + + break; + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + mask |= BUFCFG_OD_EN; + if (arg) + bits |= BUFCFG_OD_EN; + break; + + case PIN_CONFIG_SLEW_RATE: + mask |= BUFCFG_SLEWSEL; + if (arg) + bits |= BUFCFG_SLEWSEL; + break; + } + + raw_spin_lock_irqsave(&mp->lock, flags); + mrfld_update_bufcfg(mp, pin, bits, mask); + raw_spin_unlock_irqrestore(&mp->lock, flags); + + return 0; +} + +static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int nconfigs) +{ + struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev); + unsigned int i; + int ret; + + for (i = 0; i < nconfigs; i++) { + switch (pinconf_to_config_param(configs[i])) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + case PIN_CONFIG_SLEW_RATE: + ret = mrfld_config_set_pin(mp, pin, configs[i]); + if (ret) + return ret; + break; + + default: + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops mrfld_pinconf_ops = { + .is_generic = true, + .pin_config_get = mrfld_config_get, + .pin_config_set = mrfld_config_set, +}; + +static const struct pinctrl_desc mrfld_pinctrl_desc = { + .pctlops = &mrfld_pinctrl_ops, + .pmxops = &mrfld_pinmux_ops, + .confops = &mrfld_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int mrfld_pinctrl_probe(struct platform_device *pdev) +{ + struct mrfld_family *families; + struct mrfld_pinctrl *mp; + struct resource *mem; + void __iomem *regs; + size_t nfamilies; + unsigned int i; + + mp = devm_kzalloc(&pdev->dev, sizeof(*mp), GFP_KERNEL); + if (!mp) + return -ENOMEM; + + mp->dev = &pdev->dev; + raw_spin_lock_init(&mp->lock); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* + * Make a copy of the families which we can use to hold pointers + * to the registers. + */ + nfamilies = ARRAY_SIZE(mrfld_families), + families = devm_kmemdup(&pdev->dev, mrfld_families, + nfamilies * sizeof(mrfld_families), + GFP_KERNEL); + if (!families) + return -ENOMEM; + + /* Splice memory resource by chunk per family */ + for (i = 0; i < nfamilies; i++) { + struct mrfld_family *family = &families[i]; + + family->regs = regs + family->barno * MRFLD_FAMILY_LEN; + } + + mp->families = families; + mp->nfamilies = nfamilies; + mp->functions = mrfld_functions; + mp->nfunctions = ARRAY_SIZE(mrfld_functions); + mp->groups = mrfld_groups; + mp->ngroups = ARRAY_SIZE(mrfld_groups); + mp->pctldesc = mrfld_pinctrl_desc; + mp->pctldesc.name = dev_name(&pdev->dev); + mp->pctldesc.pins = mrfld_pins; + mp->pctldesc.npins = ARRAY_SIZE(mrfld_pins); + + mp->pctldev = devm_pinctrl_register(&pdev->dev, &mp->pctldesc, mp); + if (IS_ERR(mp->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(mp->pctldev); + } + + platform_set_drvdata(pdev, mp); + return 0; +} + +static struct platform_driver mrfld_pinctrl_driver = { + .probe = mrfld_pinctrl_probe, + .driver = { + .name = "pinctrl-merrifield", + }, +}; + +static int __init mrfld_pinctrl_init(void) +{ + return platform_driver_register(&mrfld_pinctrl_driver); +} +subsys_initcall(mrfld_pinctrl_init); + +static void __exit mrfld_pinctrl_exit(void) +{ + platform_driver_unregister(&mrfld_pinctrl_driver); +} +module_exit(mrfld_pinctrl_exit); + +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); +MODULE_DESCRIPTION("Intel Merrifield SoC pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:pinctrl-merrifield"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index a607655d7830..ce554e0d6979 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1183,8 +1183,8 @@ static int mtk_eint_resume(struct device *device) } const struct dev_pm_ops mtk_eint_pm_ops = { - .suspend = mtk_eint_suspend, - .resume = mtk_eint_resume, + .suspend_noirq = mtk_eint_suspend, + .resume_noirq = mtk_eint_resume, }; static void mtk_eint_ack(struct irq_data *d) diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index eeabafbbf598..cb4d6ad30530 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -147,6 +147,52 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = { MESON_PIN(GPIO_TEST_N, EE_OFF), }; +static const unsigned int emmc_nand_d07_pins[] = { + PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF), + PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF), + PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF), +}; +static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) }; +static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) }; +static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) }; + +static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) }; +static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) }; +static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) }; +static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) }; +static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) }; +static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) }; + +static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) }; +static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) }; +static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) }; +static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) }; + +static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) }; +static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) }; +static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) }; +static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) }; + +static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_13, EE_OFF) }; +static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) }; +static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) }; +static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) }; + +static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) }; +static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) }; +static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) }; +static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) }; +static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) }; +static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) }; +static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) }; +static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) }; +static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) }; +static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) }; +static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) }; +static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) }; +static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) }; +static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) }; + static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = { MESON_PIN(GPIOAO_0, 0), MESON_PIN(GPIOAO_1, 0), @@ -168,6 +214,16 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), + PIN(GPIOAO_5, 0) }; +static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; +static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; + +static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) }; +static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) }; +static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) }; +static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) }; static struct meson_pmx_group meson_gxbb_periphs_groups[] = { GPIO_GROUP(GPIOZ_0, EE_OFF), @@ -297,6 +353,54 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = { GPIO_GROUP(GPIOCLK_3, EE_OFF), GPIO_GROUP(GPIO_TEST_N, EE_OFF), + + /* Bank X */ + GROUP(uart_tx_a, 4, 13), + GROUP(uart_rx_a, 4, 12), + GROUP(uart_cts_a, 4, 11), + GROUP(uart_rts_a, 4, 10), + + /* Bank Y */ + GROUP(uart_cts_c, 1, 19), + GROUP(uart_rts_c, 1, 18), + GROUP(uart_tx_c, 1, 17), + GROUP(uart_rx_c, 1, 16), + + /* Bank Z */ + GROUP(eth_mdio, 6, 1), + GROUP(eth_mdc, 6, 0), + GROUP(eth_clk_rx_clk, 6, 13), + GROUP(eth_rx_dv, 6, 12), + GROUP(eth_rxd0, 6, 11), + GROUP(eth_rxd1, 6, 10), + GROUP(eth_rxd2, 6, 9), + GROUP(eth_rxd3, 6, 8), + GROUP(eth_rgmii_tx_clk, 6, 7), + GROUP(eth_tx_en, 6, 6), + GROUP(eth_txd0, 6, 5), + GROUP(eth_txd1, 6, 4), + GROUP(eth_txd2, 6, 3), + GROUP(eth_txd3, 6, 2), + + /* Bank DV */ + GROUP(uart_tx_b, 2, 29), + GROUP(uart_rx_b, 2, 28), + GROUP(uart_cts_b, 2, 27), + GROUP(uart_rts_b, 2, 26), + + /* Bank BOOT */ + GROUP(emmc_nand_d07, 4, 30), + GROUP(emmc_clk, 4, 18), + GROUP(emmc_cmd, 4, 19), + GROUP(emmc_ds, 4, 31), + + /* Bank CARD */ + GROUP(sdcard_d1, 2, 14), + GROUP(sdcard_d0, 2, 15), + GROUP(sdcard_d3, 2, 12), + GROUP(sdcard_d2, 2, 13), + GROUP(sdcard_cmd, 2, 10), + GROUP(sdcard_clk, 2, 11), }; static struct meson_pmx_group meson_gxbb_aobus_groups[] = { @@ -316,10 +420,18 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = { GPIO_GROUP(GPIOAO_13, 0), /* bank AO */ + GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_rx_ao_b, 0, 25), GROUP(uart_tx_ao_a, 0, 12), GROUP(uart_rx_ao_a, 0, 11), GROUP(uart_cts_ao_a, 0, 10), GROUP(uart_rts_ao_a, 0, 9), + GROUP(uart_cts_ao_b, 0, 8), + GROUP(uart_rts_ao_b, 0, 7), + GROUP(i2c_sck_ao, 0, 6), + GROUP(i2c_sda_ao, 0, 5), + GROUP(i2c_slave_sck_ao, 0, 2), + GROUP(i2c_slave_sda_ao, 0, 1), }; static const char * const gpio_periphs_groups[] = { @@ -359,6 +471,34 @@ static const char * const gpio_periphs_groups[] = { "GPIO_TEST_N", }; +static const char * const emmc_groups[] = { + "emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds", +}; + +static const char * const sdcard_groups[] = { + "sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3", + "sdcard_cmd", "sdcard_clk", +}; + +static const char * const uart_a_groups[] = { + "uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a", +}; + +static const char * const uart_b_groups[] = { + "uart_tx_b", "uart_rx_b", "uart_cts_b", "uart_rts_b", +}; + +static const char * const uart_c_groups[] = { + "uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c", +}; + +static const char * const eth_groups[] = { + "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv", + "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3", + "eth_rgmii_tx_clk", "eth_tx_en", + "eth_txd0", "eth_txd1", "eth_txd2", "eth_txd3", +}; + static const char * const gpio_aobus_groups[] = { "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9", @@ -366,16 +506,37 @@ static const char * const gpio_aobus_groups[] = { }; static const char * const uart_ao_groups[] = { - "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a" + "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a", +}; + +static const char * const uart_ao_b_groups[] = { + "uart_tx_ao_b", "uart_rx_ao_b", "uart_cts_ao_b", "uart_rts_ao_b", +}; + +static const char * const i2c_ao_groups[] = { + "i2c_sdk_ao", "i2c_sda_ao", +}; + +static const char * const i2c_slave_ao_groups[] = { + "i2c_slave_sdk_ao", "i2c_slave_sda_ao", }; static struct meson_pmx_func meson_gxbb_periphs_functions[] = { FUNCTION(gpio_periphs), + FUNCTION(emmc), + FUNCTION(sdcard), + FUNCTION(uart_a), + FUNCTION(uart_b), + FUNCTION(uart_c), + FUNCTION(eth), }; static struct meson_pmx_func meson_gxbb_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao), + FUNCTION(uart_ao_b), + FUNCTION(i2c_ao), + FUNCTION(i2c_slave_ao), }; static struct meson_bank meson_gxbb_periphs_banks[] = { diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c index a78e9a4997ba..5f89c26f3292 100644 --- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c +++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c @@ -168,87 +168,87 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = { MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1, 1))), MPP_MODE(20, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1, 0)), - MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0, 0))), + MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 0, 0, 0, 0, 0))), MPP_MODE(21, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1, 0))), MPP_MODE(22, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1, 0))), MPP_MODE(23, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1, 0))), MPP_MODE(24, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1, 0))), MPP_MODE(25, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1, 0))), MPP_MODE(26, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1, 0))), MPP_MODE(27, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1, 0))), MPP_MODE(28, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1, 0))), MPP_MODE(29, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 0, 0, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1, 0))), MPP_MODE(30, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)), @@ -280,65 +280,65 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = { MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1, 0))), MPP_MODE(35, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)), MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1, 0)), - MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1, 0))), + MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 1, 1, 1, 1, 0))), MPP_MODE(36, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1, 0))), MPP_MODE(37, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1, 0))), MPP_MODE(38, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1, 0))), MPP_MODE(39, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1, 0))), MPP_MODE(40, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1, 0))), MPP_MODE(41, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1, 0))), MPP_MODE(42, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1, 0))), MPP_MODE(43, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1, 0))), MPP_MODE(44, - MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)), MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1, 0)), - MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 1, 1, 0)), MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1, 0))), MPP_MODE(45, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)), @@ -371,11 +371,12 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = { }; static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = { - MPP_FUNC_CTRL(0, 29, NULL, kirkwood_mpp_ctrl), + MPP_FUNC_CTRL(0, 44, NULL, kirkwood_mpp_ctrl), }; static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = { - MPP_GPIO_RANGE(0, 0, 0, 30), + MPP_GPIO_RANGE(0, 0, 0, 20), + MPP_GPIO_RANGE(1, 35, 35, 10), }; static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = { diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 38faceff2f08..35f62180db4e 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1033,102 +1033,6 @@ static inline void nmk_gpio_dbg_show_one(struct seq_file *s, #define nmk_gpio_dbg_show NULL #endif -void nmk_gpio_clocks_enable(void) -{ - int i; - - for (i = 0; i < NUM_BANKS; i++) { - struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; - - if (!chip) - continue; - - clk_enable(chip->clk); - } -} - -void nmk_gpio_clocks_disable(void) -{ - int i; - - for (i = 0; i < NUM_BANKS; i++) { - struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; - - if (!chip) - continue; - - clk_disable(chip->clk); - } -} - -/* - * Called from the suspend/resume path to only keep the real wakeup interrupts - * (those that have had set_irq_wake() called on them) as wakeup interrupts, - * and not the rest of the interrupts which we needed to have as wakeups for - * cpuidle. - * - * PM ops are not used since this needs to be done at the end, after all the - * other drivers are done with their suspend callbacks. - */ -void nmk_gpio_wakeups_suspend(void) -{ - int i; - - for (i = 0; i < NUM_BANKS; i++) { - struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; - - if (!chip) - break; - - clk_enable(chip->clk); - - writel(chip->rwimsc & chip->real_wake, - chip->addr + NMK_GPIO_RWIMSC); - writel(chip->fwimsc & chip->real_wake, - chip->addr + NMK_GPIO_FWIMSC); - - clk_disable(chip->clk); - } -} - -void nmk_gpio_wakeups_resume(void) -{ - int i; - - for (i = 0; i < NUM_BANKS; i++) { - struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; - - if (!chip) - break; - - clk_enable(chip->clk); - - writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC); - writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC); - - clk_disable(chip->clk); - } -} - -/* - * Read the pull up/pull down status. - * A bit set in 'pull_up' means that pull up - * is selected if pull is enabled in PDIS register. - * Note: only pull up/down set via this driver can - * be detected due to HW limitations. - */ -void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up) -{ - if (gpio_bank < NUM_BANKS) { - struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank]; - - if (!chip) - return; - - *pull_up = chip->pull_up; - } -} - /* * We will allocate memory for the state container using devm* allocators * binding to the first device reaching this point, it doesn't matter if diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index d5bf9fae2ddd..5020ae534479 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -53,7 +53,7 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev, struct seq_file *s, const char *gname, unsigned pin, const struct pin_config_item *items, - int nitems) + int nitems, int *print_sep) { int i; @@ -75,8 +75,10 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev, seq_printf(s, "ERROR READING CONFIG SETTING %d ", i); continue; } - /* Space between multiple configs */ - seq_puts(s, " "); + /* comma between multiple configs */ + if (*print_sep) + seq_puts(s, ", "); + *print_sep = 1; seq_puts(s, items[i].display); /* Print unit if available */ if (items[i].has_arg) { @@ -105,19 +107,21 @@ void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev, struct seq_file *s, const char *gname, unsigned pin) { const struct pinconf_ops *ops = pctldev->desc->confops; + int print_sep = 0; if (!ops->is_generic) return; /* generic parameters */ pinconf_generic_dump_one(pctldev, s, gname, pin, conf_items, - ARRAY_SIZE(conf_items)); + ARRAY_SIZE(conf_items), &print_sep); /* driver-specific parameters */ if (pctldev->desc->num_custom_params && pctldev->desc->custom_conf_items) pinconf_generic_dump_one(pctldev, s, gname, pin, pctldev->desc->custom_conf_items, - pctldev->desc->num_custom_params); + pctldev->desc->num_custom_params, + &print_sep); } void pinconf_generic_dump_config(struct pinctrl_dev *pctldev, @@ -391,4 +395,12 @@ exit: } EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map); +void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, + unsigned num_maps) +{ + pinctrl_utils_free_map(pctldev, map, num_maps); +} +EXPORT_SYMBOL_GPL(pinconf_generic_dt_free_map); + #endif diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index 4dd7722f9935..799048f3c8d4 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c @@ -258,8 +258,7 @@ void pinconf_show_setting(struct seq_file *s, case PIN_MAP_TYPE_CONFIGS_PIN: desc = pin_desc_get(setting->pctldev, setting->data.configs.group_or_pin); - seq_printf(s, "pin %s (%d)", - desc->name ? desc->name : "unnamed", + seq_printf(s, "pin %s (%d)", desc->name, setting->data.configs.group_or_pin); break; case PIN_MAP_TYPE_CONFIGS_GROUP: @@ -311,8 +310,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what) if (desc == NULL) continue; - seq_printf(s, "pin %d (%s):", pin, - desc->name ? desc->name : "unnamed"); + seq_printf(s, "pin %d (%s): ", pin, desc->name); pinconf_dump_pin(pctldev, s, pin); @@ -349,7 +347,7 @@ static int pinconf_groups_show(struct seq_file *s, void *what) while (selector < ngroups) { const char *gname = pctlops->get_group_name(pctldev, selector); - seq_printf(s, "%u (%s):", selector, gname); + seq_printf(s, "%u (%s): ", selector, gname); pinconf_dump_group(pctldev, s, selector, gname); seq_printf(s, "\n"); diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index a025b40d246b..28bbc1bb9e6c 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -20,7 +20,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinconf.h> @@ -421,8 +421,8 @@ static int atmel_pctl_get_group_pins(struct pinctrl_dev *pctldev, return 0; } -struct atmel_group *atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev, - unsigned pin) +static struct atmel_group * +atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev, unsigned pin) { struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev); int i; @@ -879,7 +879,6 @@ static const struct of_device_id atmel_pctrl_of_match[] = { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, atmel_pctrl_of_match); static int atmel_pinctrl_probe(struct platform_device *pdev) { @@ -1074,28 +1073,13 @@ clk_prepare_enable_error: return ret; } -int atmel_pinctrl_remove(struct platform_device *pdev) -{ - struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev); - - irq_domain_remove(atmel_pioctrl->irq_domain); - clk_disable_unprepare(atmel_pioctrl->clk); - gpiochip_remove(atmel_pioctrl->gpio_chip); - - return 0; -} - static struct platform_driver atmel_pinctrl_driver = { .driver = { .name = "pinctrl-at91-pio4", .of_match_table = atmel_pctrl_of_match, .pm = &atmel_pctrl_pm_ops, + .suppress_bind_attrs = true, }, .probe = atmel_pinctrl_probe, - .remove = atmel_pinctrl_remove, }; -module_platform_driver(atmel_pinctrl_driver); - -MODULE_AUTHOR(Ludovic Desroches <ludovic.desroches@atmel.com>); -MODULE_DESCRIPTION("Atmel PIO4 pinctrl driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(atmel_pinctrl_driver); diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index b7c0d6f7c046..80daead3a5a1 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -9,7 +9,6 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/init.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> @@ -189,7 +188,7 @@ struct at91_pinctrl { struct at91_pinctrl_mux_ops *ops; }; -static const inline struct at91_pin_group *at91_pinctrl_find_group_by_name( +static inline const struct at91_pin_group *at91_pinctrl_find_group_by_name( const struct at91_pinctrl *info, const char *name) { @@ -1818,13 +1817,3 @@ static int __init at91_pinctrl_init(void) return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } arch_initcall(at91_pinctrl_init); - -static void __exit at91_pinctrl_exit(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} - -module_exit(at91_pinctrl_exit); -MODULE_AUTHOR("Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>"); -MODULE_DESCRIPTION("Atmel AT91 pinctrl driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c index 30ee56427f56..639a57ecc7c2 100644 --- a/drivers/pinctrl/pinctrl-digicolor.c +++ b/drivers/pinctrl/pinctrl-digicolor.c @@ -15,7 +15,7 @@ * - Pin pad configuration (pull up/down, strength) */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_device.h> @@ -335,27 +335,17 @@ static int dc_pinctrl_probe(struct platform_device *pdev) return dc_gpiochip_add(pmap, pdev->dev.of_node); } -static int dc_pinctrl_remove(struct platform_device *pdev) -{ - struct dc_pinmap *pmap = platform_get_drvdata(pdev); - - gpiochip_remove(&pmap->chip); - - return 0; -} - static const struct of_device_id dc_pinctrl_ids[] = { { .compatible = "cnxt,cx92755-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, dc_pinctrl_ids); static struct platform_driver dc_pinctrl_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = dc_pinctrl_ids, + .suppress_bind_attrs = true, }, .probe = dc_pinctrl_probe, - .remove = dc_pinctrl_remove, }; -module_platform_driver(dc_pinctrl_driver); +builtin_platform_driver(dc_pinctrl_driver); diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index 8a931c7ba2ff..e053f1fa5512 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -11,7 +11,7 @@ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -1365,31 +1365,17 @@ static int lpc18xx_scu_probe(struct platform_device *pdev) return 0; } -static int lpc18xx_scu_remove(struct platform_device *pdev) -{ - struct lpc18xx_scu_data *scu = platform_get_drvdata(pdev); - - clk_disable_unprepare(scu->clk); - - return 0; -} - static const struct of_device_id lpc18xx_scu_match[] = { { .compatible = "nxp,lpc1850-scu" }, {}, }; -MODULE_DEVICE_TABLE(of, lpc18xx_scu_match); static struct platform_driver lpc18xx_scu_driver = { .probe = lpc18xx_scu_probe, - .remove = lpc18xx_scu_remove, .driver = { .name = "lpc18xx-scu", .of_match_table = lpc18xx_scu_match, + .suppress_bind_attrs = true, }, }; -module_platform_driver(lpc18xx_scu_driver); - -MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>"); -MODULE_DESCRIPTION("Pinctrl driver for NXP LPC18xx/43xx SCU"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(lpc18xx_scu_driver); diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c new file mode 100644 index 000000000000..d9ff53e8f715 --- /dev/null +++ b/drivers/pinctrl/pinctrl-max77620.c @@ -0,0 +1,673 @@ +/* + * MAX77620 pin control driver. + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Author: + * Chaitanya Bandi <bandik@nvidia.com> + * Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/mfd/max77620.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "core.h" +#include "pinconf.h" +#include "pinctrl-utils.h" + +#define MAX77620_PIN_NUM 8 + +enum max77620_pin_ppdrv { + MAX77620_PIN_UNCONFIG_DRV, + MAX77620_PIN_OD_DRV, + MAX77620_PIN_PP_DRV, +}; + +enum max77620_pinconf_param { + MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1, + MAX77620_ACTIVE_FPS_POWER_ON_SLOTS, + MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS, + MAX77620_SUSPEND_FPS_SOURCE, + MAX77620_SUSPEND_FPS_POWER_ON_SLOTS, + MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS, +}; + +struct max77620_pin_function { + const char *name; + const char * const *groups; + unsigned int ngroups; + int mux_option; +}; + +static const struct pinconf_generic_params max77620_cfg_params[] = { + { + .property = "maxim,active-fps-source", + .param = MAX77620_ACTIVE_FPS_SOURCE, + }, { + .property = "maxim,active-fps-power-up-slot", + .param = MAX77620_ACTIVE_FPS_POWER_ON_SLOTS, + }, { + .property = "maxim,active-fps-power-down-slot", + .param = MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS, + }, { + .property = "maxim,suspend-fps-source", + .param = MAX77620_SUSPEND_FPS_SOURCE, + }, { + .property = "maxim,suspend-fps-power-up-slot", + .param = MAX77620_SUSPEND_FPS_POWER_ON_SLOTS, + }, { + .property = "maxim,suspend-fps-power-down-slot", + .param = MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS, + }, +}; + +enum max77620_alternate_pinmux_option { + MAX77620_PINMUX_GPIO = 0, + MAX77620_PINMUX_LOW_POWER_MODE_CONTROL_IN = 1, + MAX77620_PINMUX_FLEXIBLE_POWER_SEQUENCER_OUT = 2, + MAX77620_PINMUX_32K_OUT1 = 3, + MAX77620_PINMUX_SD0_DYNAMIC_VOLTAGE_SCALING_IN = 4, + MAX77620_PINMUX_SD1_DYNAMIC_VOLTAGE_SCALING_IN = 5, + MAX77620_PINMUX_REFERENCE_OUT = 6, +}; + +struct max77620_pingroup { + const char *name; + const unsigned int pins[1]; + unsigned int npins; + enum max77620_alternate_pinmux_option alt_option; +}; + +struct max77620_pin_info { + enum max77620_pin_ppdrv drv_type; + int pull_config; +}; + +struct max77620_fps_config { + int active_fps_src; + int active_power_up_slots; + int active_power_down_slots; + int suspend_fps_src; + int suspend_power_up_slots; + int suspend_power_down_slots; +}; + +struct max77620_pctrl_info { + struct device *dev; + struct pinctrl_dev *pctl; + struct regmap *rmap; + int pins_current_opt[MAX77620_GPIO_NR]; + const struct max77620_pin_function *functions; + unsigned int num_functions; + const struct max77620_pingroup *pin_groups; + int num_pin_groups; + const struct pinctrl_pin_desc *pins; + unsigned int num_pins; + struct max77620_pin_info pin_info[MAX77620_PIN_NUM]; + struct max77620_fps_config fps_config[MAX77620_PIN_NUM]; +}; + +static const struct pinctrl_pin_desc max77620_pins_desc[] = { + PINCTRL_PIN(MAX77620_GPIO0, "gpio0"), + PINCTRL_PIN(MAX77620_GPIO1, "gpio1"), + PINCTRL_PIN(MAX77620_GPIO2, "gpio2"), + PINCTRL_PIN(MAX77620_GPIO3, "gpio3"), + PINCTRL_PIN(MAX77620_GPIO4, "gpio4"), + PINCTRL_PIN(MAX77620_GPIO5, "gpio5"), + PINCTRL_PIN(MAX77620_GPIO6, "gpio6"), + PINCTRL_PIN(MAX77620_GPIO7, "gpio7"), +}; + +static const char * const gpio_groups[] = { + "gpio0", + "gpio1", + "gpio2", + "gpio3", + "gpio4", + "gpio5", + "gpio6", + "gpio7", +}; + +#define FUNCTION_GROUP(fname, mux) \ + { \ + .name = fname, \ + .groups = gpio_groups, \ + .ngroups = ARRAY_SIZE(gpio_groups), \ + .mux_option = MAX77620_PINMUX_##mux, \ + } + +static const struct max77620_pin_function max77620_pin_function[] = { + FUNCTION_GROUP("gpio", GPIO), + FUNCTION_GROUP("lpm-control-in", LOW_POWER_MODE_CONTROL_IN), + FUNCTION_GROUP("fps-out", FLEXIBLE_POWER_SEQUENCER_OUT), + FUNCTION_GROUP("32k-out1", 32K_OUT1), + FUNCTION_GROUP("sd0-dvs-in", SD0_DYNAMIC_VOLTAGE_SCALING_IN), + FUNCTION_GROUP("sd1-dvs-in", SD1_DYNAMIC_VOLTAGE_SCALING_IN), + FUNCTION_GROUP("reference-out", REFERENCE_OUT), +}; + +#define MAX77620_PINGROUP(pg_name, pin_id, option) \ + { \ + .name = #pg_name, \ + .pins = {MAX77620_##pin_id}, \ + .npins = 1, \ + .alt_option = MAX77620_PINMUX_##option, \ + } + +static const struct max77620_pingroup max77620_pingroups[] = { + MAX77620_PINGROUP(gpio0, GPIO0, LOW_POWER_MODE_CONTROL_IN), + MAX77620_PINGROUP(gpio1, GPIO1, FLEXIBLE_POWER_SEQUENCER_OUT), + MAX77620_PINGROUP(gpio2, GPIO2, FLEXIBLE_POWER_SEQUENCER_OUT), + MAX77620_PINGROUP(gpio3, GPIO3, FLEXIBLE_POWER_SEQUENCER_OUT), + MAX77620_PINGROUP(gpio4, GPIO4, 32K_OUT1), + MAX77620_PINGROUP(gpio5, GPIO5, SD0_DYNAMIC_VOLTAGE_SCALING_IN), + MAX77620_PINGROUP(gpio6, GPIO6, SD1_DYNAMIC_VOLTAGE_SCALING_IN), + MAX77620_PINGROUP(gpio7, GPIO7, REFERENCE_OUT), +}; + +static int max77620_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + return mpci->num_pin_groups; +} + +static const char *max77620_pinctrl_get_group_name( + struct pinctrl_dev *pctldev, unsigned int group) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + return mpci->pin_groups[group].name; +} + +static int max77620_pinctrl_get_group_pins( + struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *num_pins) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + *pins = mpci->pin_groups[group].pins; + *num_pins = mpci->pin_groups[group].npins; + + return 0; +} + +static const struct pinctrl_ops max77620_pinctrl_ops = { + .get_groups_count = max77620_pinctrl_get_groups_count, + .get_group_name = max77620_pinctrl_get_group_name, + .get_group_pins = max77620_pinctrl_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int max77620_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + return mpci->num_functions; +} + +static const char *max77620_pinctrl_get_func_name(struct pinctrl_dev *pctldev, + unsigned int function) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + return mpci->functions[function].name; +} + +static int max77620_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, + unsigned int function, + const char * const **groups, + unsigned int * const num_groups) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + + *groups = mpci->functions[function].groups; + *num_groups = mpci->functions[function].ngroups; + + return 0; +} + +static int max77620_pinctrl_enable(struct pinctrl_dev *pctldev, + unsigned int function, unsigned int group) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + u8 val; + int ret; + + if (function == MAX77620_PINMUX_GPIO) { + val = 0; + } else if (function == mpci->pin_groups[group].alt_option) { + val = 1 << group; + } else { + dev_err(mpci->dev, "GPIO %u doesn't have function %u\n", + group, function); + return -EINVAL; + } + ret = regmap_update_bits(mpci->rmap, MAX77620_REG_AME_GPIO, + BIT(group), val); + if (ret < 0) + dev_err(mpci->dev, "REG AME GPIO update failed: %d\n", ret); + + return ret; +} + +static const struct pinmux_ops max77620_pinmux_ops = { + .get_functions_count = max77620_pinctrl_get_funcs_count, + .get_function_name = max77620_pinctrl_get_func_name, + .get_function_groups = max77620_pinctrl_get_func_groups, + .set_mux = max77620_pinctrl_enable, +}; + +static int max77620_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = mpci->dev; + enum pin_config_param param = pinconf_to_config_param(*config); + unsigned int val; + int arg = 0; + int ret; + + switch (param) { + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (mpci->pin_info[pin].drv_type == MAX77620_PIN_OD_DRV) + arg = 1; + break; + + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (mpci->pin_info[pin].drv_type == MAX77620_PIN_PP_DRV) + arg = 1; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + ret = regmap_read(mpci->rmap, MAX77620_REG_PUE_GPIO, &val); + if (ret < 0) { + dev_err(dev, "Reg PUE_GPIO read failed: %d\n", ret); + return ret; + } + if (val & BIT(pin)) + arg = 1; + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + ret = regmap_read(mpci->rmap, MAX77620_REG_PDE_GPIO, &val); + if (ret < 0) { + dev_err(dev, "Reg PDE_GPIO read failed: %d\n", ret); + return ret; + } + if (val & BIT(pin)) + arg = 1; + break; + + default: + dev_err(dev, "Properties not supported\n"); + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, (u16)arg); + + return 0; +} + +static int max77620_get_default_fps(struct max77620_pctrl_info *mpci, + int addr, int *fps) +{ + unsigned int val; + int ret; + + ret = regmap_read(mpci->rmap, addr, &val); + if (ret < 0) { + dev_err(mpci->dev, "Reg PUE_GPIO read failed: %d\n", ret); + return ret; + } + *fps = (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT; + + return 0; +} + +static int max77620_set_fps_param(struct max77620_pctrl_info *mpci, + int pin, int param) +{ + struct max77620_fps_config *fps_config = &mpci->fps_config[pin]; + int addr, ret; + int param_val; + int mask, shift; + + if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3)) + return 0; + + addr = MAX77620_REG_FPS_GPIO1 + pin - 1; + switch (param) { + case MAX77620_ACTIVE_FPS_SOURCE: + case MAX77620_SUSPEND_FPS_SOURCE: + mask = MAX77620_FPS_SRC_MASK; + shift = MAX77620_FPS_SRC_SHIFT; + param_val = fps_config->active_fps_src; + if (param == MAX77620_SUSPEND_FPS_SOURCE) + param_val = fps_config->suspend_fps_src; + break; + + case MAX77620_ACTIVE_FPS_POWER_ON_SLOTS: + case MAX77620_SUSPEND_FPS_POWER_ON_SLOTS: + mask = MAX77620_FPS_PU_PERIOD_MASK; + shift = MAX77620_FPS_PU_PERIOD_SHIFT; + param_val = fps_config->active_power_up_slots; + if (param == MAX77620_SUSPEND_FPS_POWER_ON_SLOTS) + param_val = fps_config->suspend_power_up_slots; + break; + + case MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS: + case MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS: + mask = MAX77620_FPS_PD_PERIOD_MASK; + shift = MAX77620_FPS_PD_PERIOD_SHIFT; + param_val = fps_config->active_power_down_slots; + if (param == MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS) + param_val = fps_config->suspend_power_down_slots; + break; + + default: + dev_err(mpci->dev, "Invalid parameter %d for pin %d\n", + param, pin); + return -EINVAL; + } + + if (param_val < 0) + return 0; + + ret = regmap_update_bits(mpci->rmap, addr, mask, param_val << shift); + if (ret < 0) + dev_err(mpci->dev, "Reg 0x%02x update failed %d\n", addr, ret); + + return ret; +} + +static int max77620_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = mpci->dev; + struct max77620_fps_config *fps_config; + int param; + u16 param_val; + unsigned int val; + unsigned int pu_val; + unsigned int pd_val; + int addr, ret; + int i; + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + param_val = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + val = param_val ? 0 : 1; + ret = regmap_update_bits(mpci->rmap, + MAX77620_REG_GPIO0 + pin, + MAX77620_CNFG_GPIO_DRV_MASK, + val); + if (ret < 0) { + dev_err(dev, "Reg 0x%02x update failed %d\n", + MAX77620_REG_GPIO0 + pin, ret); + return ret; + } + mpci->pin_info[pin].drv_type = val ? + MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV; + break; + + case PIN_CONFIG_DRIVE_PUSH_PULL: + val = param_val ? 1 : 0; + ret = regmap_update_bits(mpci->rmap, + MAX77620_REG_GPIO0 + pin, + MAX77620_CNFG_GPIO_DRV_MASK, + val); + if (ret < 0) { + dev_err(dev, "Reg 0x%02x update failed %d\n", + MAX77620_REG_GPIO0 + pin, ret); + return ret; + } + mpci->pin_info[pin].drv_type = val ? + MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV; + break; + + case MAX77620_ACTIVE_FPS_SOURCE: + case MAX77620_ACTIVE_FPS_POWER_ON_SLOTS: + case MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS: + if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3)) + return -EINVAL; + + fps_config = &mpci->fps_config[pin]; + + if ((param == MAX77620_ACTIVE_FPS_SOURCE) && + (param_val == MAX77620_FPS_SRC_DEF)) { + addr = MAX77620_REG_FPS_GPIO1 + pin - 1; + ret = max77620_get_default_fps( + mpci, addr, + &fps_config->active_fps_src); + if (ret < 0) + return ret; + break; + } + + if (param == MAX77620_ACTIVE_FPS_SOURCE) + fps_config->active_fps_src = param_val; + else if (param == MAX77620_ACTIVE_FPS_POWER_ON_SLOTS) + fps_config->active_power_up_slots = param_val; + else + fps_config->active_power_down_slots = param_val; + + ret = max77620_set_fps_param(mpci, pin, param); + if (ret < 0) + return ret; + break; + + case MAX77620_SUSPEND_FPS_SOURCE: + case MAX77620_SUSPEND_FPS_POWER_ON_SLOTS: + case MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS: + if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3)) + return -EINVAL; + + fps_config = &mpci->fps_config[pin]; + + if ((param == MAX77620_SUSPEND_FPS_SOURCE) && + (param_val == MAX77620_FPS_SRC_DEF)) { + addr = MAX77620_REG_FPS_GPIO1 + pin - 1; + ret = max77620_get_default_fps( + mpci, addr, + &fps_config->suspend_fps_src); + if (ret < 0) + return ret; + break; + } + + if (param == MAX77620_SUSPEND_FPS_SOURCE) + fps_config->suspend_fps_src = param_val; + else if (param == MAX77620_SUSPEND_FPS_POWER_ON_SLOTS) + fps_config->suspend_power_up_slots = param_val; + else + fps_config->suspend_power_down_slots = + param_val; + break; + + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + pu_val = (param == PIN_CONFIG_BIAS_PULL_UP) ? + BIT(pin) : 0; + pd_val = (param == PIN_CONFIG_BIAS_PULL_DOWN) ? + BIT(pin) : 0; + + ret = regmap_update_bits(mpci->rmap, + MAX77620_REG_PUE_GPIO, + BIT(pin), pu_val); + if (ret < 0) { + dev_err(dev, "PUE_GPIO update failed: %d\n", + ret); + return ret; + } + + ret = regmap_update_bits(mpci->rmap, + MAX77620_REG_PDE_GPIO, + BIT(pin), pd_val); + if (ret < 0) { + dev_err(dev, "PDE_GPIO update failed: %d\n", + ret); + return ret; + } + break; + + default: + dev_err(dev, "Properties not supported\n"); + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops max77620_pinconf_ops = { + .pin_config_get = max77620_pinconf_get, + .pin_config_set = max77620_pinconf_set, +}; + +static struct pinctrl_desc max77620_pinctrl_desc = { + .pctlops = &max77620_pinctrl_ops, + .pmxops = &max77620_pinmux_ops, + .confops = &max77620_pinconf_ops, +}; + +static int max77620_pinctrl_probe(struct platform_device *pdev) +{ + struct max77620_chip *max77620 = dev_get_drvdata(pdev->dev.parent); + struct max77620_pctrl_info *mpci; + int i; + + mpci = devm_kzalloc(&pdev->dev, sizeof(*mpci), GFP_KERNEL); + if (!mpci) + return -ENOMEM; + + mpci->dev = &pdev->dev; + mpci->dev->of_node = pdev->dev.parent->of_node; + mpci->rmap = max77620->rmap; + + mpci->pins = max77620_pins_desc; + mpci->num_pins = ARRAY_SIZE(max77620_pins_desc); + mpci->functions = max77620_pin_function; + mpci->num_functions = ARRAY_SIZE(max77620_pin_function); + mpci->pin_groups = max77620_pingroups; + mpci->num_pin_groups = ARRAY_SIZE(max77620_pingroups); + platform_set_drvdata(pdev, mpci); + + max77620_pinctrl_desc.name = dev_name(&pdev->dev); + max77620_pinctrl_desc.pins = max77620_pins_desc; + max77620_pinctrl_desc.npins = ARRAY_SIZE(max77620_pins_desc); + max77620_pinctrl_desc.num_custom_params = + ARRAY_SIZE(max77620_cfg_params); + max77620_pinctrl_desc.custom_params = max77620_cfg_params; + + for (i = 0; i < MAX77620_PIN_NUM; ++i) { + mpci->fps_config[i].active_fps_src = -1; + mpci->fps_config[i].active_power_up_slots = -1; + mpci->fps_config[i].active_power_down_slots = -1; + mpci->fps_config[i].suspend_fps_src = -1; + mpci->fps_config[i].suspend_power_up_slots = -1; + mpci->fps_config[i].suspend_power_down_slots = -1; + } + + mpci->pctl = devm_pinctrl_register(&pdev->dev, &max77620_pinctrl_desc, + mpci); + if (IS_ERR(mpci->pctl)) { + dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); + return PTR_ERR(mpci->pctl); + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int max77620_suspend_fps_param[] = { + MAX77620_SUSPEND_FPS_SOURCE, + MAX77620_SUSPEND_FPS_POWER_ON_SLOTS, + MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS, +}; + +static int max77620_active_fps_param[] = { + MAX77620_ACTIVE_FPS_SOURCE, + MAX77620_ACTIVE_FPS_POWER_ON_SLOTS, + MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS, +}; + +static int max77620_pinctrl_suspend(struct device *dev) +{ + struct max77620_pctrl_info *mpci = dev_get_drvdata(dev); + int pin, p; + + for (pin = 0; pin < MAX77620_PIN_NUM; ++pin) { + if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3)) + continue; + for (p = 0; p < 3; ++p) + max77620_set_fps_param( + mpci, pin, max77620_suspend_fps_param[p]); + } + + return 0; +}; + +static int max77620_pinctrl_resume(struct device *dev) +{ + struct max77620_pctrl_info *mpci = dev_get_drvdata(dev); + int pin, p; + + for (pin = 0; pin < MAX77620_PIN_NUM; ++pin) { + if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3)) + continue; + for (p = 0; p < 3; ++p) + max77620_set_fps_param( + mpci, pin, max77620_active_fps_param[p]); + } + + return 0; +} +#endif + +static const struct dev_pm_ops max77620_pinctrl_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS( + max77620_pinctrl_suspend, max77620_pinctrl_resume) +}; + +static const struct platform_device_id max77620_pinctrl_devtype[] = { + { .name = "max77620-pinctrl", }, + { .name = "max20024-pinctrl", }, + {}, +}; +MODULE_DEVICE_TABLE(platform, max77620_pinctrl_devtype); + +static struct platform_driver max77620_pinctrl_driver = { + .driver = { + .name = "max77620-pinctrl", + .pm = &max77620_pinctrl_pm_ops, + }, + .probe = max77620_pinctrl_probe, + .id_table = max77620_pinctrl_devtype, +}; + +module_platform_driver(max77620_pinctrl_driver); + +MODULE_DESCRIPTION("MAX77620/MAX20024 pin control driver"); +MODULE_AUTHOR("Chaitanya Bandi<bandik@nvidia.com>"); +MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>"); +MODULE_ALIAS("platform:max77620-pinctrl"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c new file mode 100644 index 000000000000..917a7d2535d7 --- /dev/null +++ b/drivers/pinctrl/pinctrl-oxnas.c @@ -0,0 +1,846 @@ +/* + * Oxford Semiconductor OXNAS SoC Family pinctrl driver + * + * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com> + * + * Based on pinctrl-pic32.c + * Joshua Henderson, <joshua.henderson@microchip.com> + * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#include <linux/gpio/driver.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> + +#include "pinctrl-utils.h" + +#define PINS_PER_BANK 32 + +#define GPIO_BANK_START(bank) ((bank) * PINS_PER_BANK) + +/* Regmap Offsets */ +#define PINMUX_PRIMARY_SEL0 0x0c +#define PINMUX_SECONDARY_SEL0 0x14 +#define PINMUX_TERTIARY_SEL0 0x8c +#define PINMUX_PRIMARY_SEL1 0x10 +#define PINMUX_SECONDARY_SEL1 0x18 +#define PINMUX_TERTIARY_SEL1 0x90 +#define PINMUX_PULLUP_CTRL0 0xac +#define PINMUX_PULLUP_CTRL1 0xb0 + +/* GPIO Registers */ +#define INPUT_VALUE 0x00 +#define OUTPUT_EN 0x04 +#define IRQ_PENDING 0x0c +#define OUTPUT_SET 0x14 +#define OUTPUT_CLEAR 0x18 +#define OUTPUT_EN_SET 0x1c +#define OUTPUT_EN_CLEAR 0x20 +#define RE_IRQ_ENABLE 0x28 +#define FE_IRQ_ENABLE 0x2c + +struct oxnas_function { + const char *name; + const char * const *groups; + unsigned int ngroups; +}; + +struct oxnas_pin_group { + const char *name; + unsigned int pin; + unsigned int bank; + struct oxnas_desc_function *functions; +}; + +struct oxnas_desc_function { + const char *name; + unsigned int fct; +}; + +struct oxnas_gpio_bank { + void __iomem *reg_base; + struct gpio_chip gpio_chip; + struct irq_chip irq_chip; + unsigned int id; +}; + +struct oxnas_pinctrl { + struct regmap *regmap; + struct device *dev; + struct pinctrl_dev *pctldev; + const struct pinctrl_pin_desc *pins; + unsigned int npins; + const struct oxnas_function *functions; + unsigned int nfunctions; + const struct oxnas_pin_group *groups; + unsigned int ngroups; + struct oxnas_gpio_bank *gpio_banks; + unsigned int nbanks; +}; + +static const struct pinctrl_pin_desc oxnas_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), + PINCTRL_PIN(14, "gpio14"), + PINCTRL_PIN(15, "gpio15"), + PINCTRL_PIN(16, "gpio16"), + PINCTRL_PIN(17, "gpio17"), + PINCTRL_PIN(18, "gpio18"), + PINCTRL_PIN(19, "gpio19"), + PINCTRL_PIN(20, "gpio20"), + PINCTRL_PIN(21, "gpio21"), + PINCTRL_PIN(22, "gpio22"), + PINCTRL_PIN(23, "gpio23"), + PINCTRL_PIN(24, "gpio24"), + PINCTRL_PIN(25, "gpio25"), + PINCTRL_PIN(26, "gpio26"), + PINCTRL_PIN(27, "gpio27"), + PINCTRL_PIN(28, "gpio28"), + PINCTRL_PIN(29, "gpio29"), + PINCTRL_PIN(30, "gpio30"), + PINCTRL_PIN(31, "gpio31"), + PINCTRL_PIN(32, "gpio32"), + PINCTRL_PIN(33, "gpio33"), + PINCTRL_PIN(34, "gpio34"), +}; + +static const char * const oxnas_fct0_group[] = { + "gpio0", "gpio1", "gpio2", "gpio3", + "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", "gpio15", + "gpio16", "gpio17", "gpio18", "gpio19", + "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24", "gpio25", "gpio26", "gpio27", + "gpio28", "gpio29", "gpio30", "gpio31", + "gpio32", "gpio33", "gpio34" +}; + +static const char * const oxnas_fct3_group[] = { + "gpio0", "gpio1", "gpio2", "gpio3", + "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", + "gpio20", + "gpio22", "gpio23", "gpio24", "gpio25", + "gpio26", "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", "gpio33", + "gpio34" +}; + +#define FUNCTION(_name, _gr) \ + { \ + .name = #_name, \ + .groups = oxnas_##_gr##_group, \ + .ngroups = ARRAY_SIZE(oxnas_##_gr##_group), \ + } + +static const struct oxnas_function oxnas_functions[] = { + FUNCTION(gpio, fct0), + FUNCTION(fct3, fct3), +}; + +#define OXNAS_PINCTRL_GROUP(_pin, _name, ...) \ + { \ + .name = #_name, \ + .pin = _pin, \ + .bank = _pin / PINS_PER_BANK, \ + .functions = (struct oxnas_desc_function[]){ \ + __VA_ARGS__, { } }, \ + } + +#define OXNAS_PINCTRL_FUNCTION(_name, _fct) \ + { \ + .name = #_name, \ + .fct = _fct, \ + } + +static const struct oxnas_pin_group oxnas_groups[] = { + OXNAS_PINCTRL_GROUP(0, gpio0, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(1, gpio1, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(2, gpio2, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(3, gpio3, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(4, gpio4, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(5, gpio5, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(6, gpio6, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(7, gpio7, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(8, gpio8, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(9, gpio9, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(10, gpio10, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(11, gpio11, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(12, gpio12, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(13, gpio13, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(14, gpio14, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(15, gpio15, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(16, gpio16, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(17, gpio17, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(18, gpio18, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(19, gpio19, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(20, gpio20, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(21, gpio21, + OXNAS_PINCTRL_FUNCTION(gpio, 0)), + OXNAS_PINCTRL_GROUP(22, gpio22, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(23, gpio23, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(24, gpio24, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(25, gpio25, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(26, gpio26, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(27, gpio27, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(28, gpio28, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(29, gpio29, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(30, gpio30, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(31, gpio31, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(32, gpio32, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(33, gpio33, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), + OXNAS_PINCTRL_GROUP(34, gpio34, + OXNAS_PINCTRL_FUNCTION(gpio, 0), + OXNAS_PINCTRL_FUNCTION(fct3, 3)), +}; + +static inline struct oxnas_gpio_bank *pctl_to_bank(struct oxnas_pinctrl *pctl, + unsigned int pin) +{ + return &pctl->gpio_banks[pin / PINS_PER_BANK]; +} + +static int oxnas_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->ngroups; +} + +static const char *oxnas_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->groups[group].name; +} + +static int oxnas_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + *pins = &pctl->groups[group].pin; + *num_pins = 1; + + return 0; +} + +static const struct pinctrl_ops oxnas_pinctrl_ops = { + .get_groups_count = oxnas_pinctrl_get_groups_count, + .get_group_name = oxnas_pinctrl_get_group_name, + .get_group_pins = oxnas_pinctrl_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int oxnas_pinmux_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->nfunctions; +} + +static const char * +oxnas_pinmux_get_function_name(struct pinctrl_dev *pctldev, unsigned int func) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->functions[func].name; +} + +static int oxnas_pinmux_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int func, + const char * const **groups, + unsigned int * const num_groups) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctl->functions[func].groups; + *num_groups = pctl->functions[func].ngroups; + + return 0; +} + +static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev, + unsigned int func, unsigned int group) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + const struct oxnas_pin_group *pg = &pctl->groups[group]; + const struct oxnas_function *pf = &pctl->functions[func]; + const char *fname = pf->name; + struct oxnas_desc_function *functions = pg->functions; + u32 mask = BIT(pg->pin); + + while (functions->name) { + if (!strcmp(functions->name, fname)) { + dev_dbg(pctl->dev, + "setting function %s bank %d pin %d fct %d mask %x\n", + fname, pg->bank, pg->pin, + functions->fct, mask); + + regmap_write_bits(pctl->regmap, + (pg->bank ? + PINMUX_PRIMARY_SEL1 : + PINMUX_PRIMARY_SEL0), + mask, + (functions->fct == 1 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + (pg->bank ? + PINMUX_SECONDARY_SEL1 : + PINMUX_SECONDARY_SEL0), + mask, + (functions->fct == 2 ? + mask : 0)); + regmap_write_bits(pctl->regmap, + (pg->bank ? + PINMUX_TERTIARY_SEL1 : + PINMUX_TERTIARY_SEL0), + mask, + (functions->fct == 3 ? + mask : 0)); + + return 0; + } + + functions++; + } + + dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func); + + return -EINVAL; +} + +static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc); + u32 mask = BIT(offset - bank->gpio_chip.base); + + dev_dbg(pctl->dev, "requesting gpio %d in bank %d (id %d) with mask 0x%x\n", + offset, bank->gpio_chip.base, bank->id, mask); + + regmap_write_bits(pctl->regmap, + (bank->id ? + PINMUX_PRIMARY_SEL1 : + PINMUX_PRIMARY_SEL0), + mask, 0); + regmap_write_bits(pctl->regmap, + (bank->id ? + PINMUX_SECONDARY_SEL1 : + PINMUX_SECONDARY_SEL0), + mask, 0); + regmap_write_bits(pctl->regmap, + (bank->id ? + PINMUX_TERTIARY_SEL1 : + PINMUX_TERTIARY_SEL0), + mask, 0); + + return 0; +} + +static int oxnas_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(offset); + + return !(readl_relaxed(bank->reg_base + OUTPUT_EN) & mask); +} + +static int oxnas_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(offset); + + writel_relaxed(mask, bank->reg_base + OUTPUT_EN_CLEAR); + + return 0; +} + +static int oxnas_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(offset); + + return (readl_relaxed(bank->reg_base + INPUT_VALUE) & mask) != 0; +} + +static void oxnas_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(offset); + + if (value) + writel_relaxed(mask, bank->reg_base + OUTPUT_SET); + else + writel_relaxed(mask, bank->reg_base + OUTPUT_CLEAR); +} + +static int oxnas_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(offset); + + oxnas_gpio_set(chip, offset, value); + writel_relaxed(mask, bank->reg_base + OUTPUT_EN_SET); + + return 0; +} + +static int oxnas_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset, bool input) +{ + struct gpio_chip *chip = range->gc; + + if (input) + oxnas_gpio_direction_input(chip, offset); + else + oxnas_gpio_direction_output(chip, offset, 0); + + return 0; +} + +static const struct pinmux_ops oxnas_pinmux_ops = { + .get_functions_count = oxnas_pinmux_get_functions_count, + .get_function_name = oxnas_pinmux_get_function_name, + .get_function_groups = oxnas_pinmux_get_function_groups, + .set_mux = oxnas_pinmux_enable, + .gpio_request_enable = oxnas_gpio_request_enable, + .gpio_set_direction = oxnas_gpio_set_direction, +}; + +static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); + unsigned int param = pinconf_to_config_param(*config); + u32 mask = BIT(pin - bank->gpio_chip.base); + int ret; + u32 arg; + + switch (param) { + case PIN_CONFIG_BIAS_PULL_UP: + ret = regmap_read(pctl->regmap, + (bank->id ? + PINMUX_PULLUP_CTRL1 : + PINMUX_PULLUP_CTRL0), + &arg); + if (ret) + return ret; + + arg = !!(arg & mask); + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin); + unsigned int param; + u32 arg; + unsigned int i; + u32 offset = pin - bank->gpio_chip.base; + u32 mask = BIT(offset); + + dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n", + pin, bank->gpio_chip.base, mask); + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_PULL_UP: + dev_dbg(pctl->dev, " pullup\n"); + regmap_write_bits(pctl->regmap, + (bank->id ? + PINMUX_PULLUP_CTRL1 : + PINMUX_PULLUP_CTRL0), + mask, mask); + break; + default: + dev_err(pctl->dev, "Property %u not supported\n", + param); + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops oxnas_pinconf_ops = { + .pin_config_get = oxnas_pinconf_get, + .pin_config_set = oxnas_pinconf_set, + .is_generic = true, +}; + +static struct pinctrl_desc oxnas_pinctrl_desc = { + .name = "oxnas-pinctrl", + .pctlops = &oxnas_pinctrl_ops, + .pmxops = &oxnas_pinmux_ops, + .confops = &oxnas_pinconf_ops, + .owner = THIS_MODULE, +}; + +static void oxnas_gpio_irq_ack(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + u32 mask = BIT(data->hwirq); + + writel(mask, bank->reg_base + IRQ_PENDING); +} + +static void oxnas_gpio_irq_mask(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + unsigned int type = irqd_get_trigger_type(data); + u32 mask = BIT(data->hwirq); + + if (type & IRQ_TYPE_EDGE_RISING) + writel(readl(bank->reg_base + RE_IRQ_ENABLE) & ~mask, + bank->reg_base + RE_IRQ_ENABLE); + + if (type & IRQ_TYPE_EDGE_FALLING) + writel(readl(bank->reg_base + FE_IRQ_ENABLE) & ~mask, + bank->reg_base + FE_IRQ_ENABLE); +} + +static void oxnas_gpio_irq_unmask(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct oxnas_gpio_bank *bank = gpiochip_get_data(chip); + unsigned int type = irqd_get_trigger_type(data); + u32 mask = BIT(data->hwirq); + + if (type & IRQ_TYPE_EDGE_RISING) + writel(readl(bank->reg_base + RE_IRQ_ENABLE) | mask, + bank->reg_base + RE_IRQ_ENABLE); + + if (type & IRQ_TYPE_EDGE_FALLING) + writel(readl(bank->reg_base + FE_IRQ_ENABLE) | mask, + bank->reg_base + FE_IRQ_ENABLE); +} + +static unsigned int oxnas_gpio_irq_startup(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + + oxnas_gpio_direction_input(chip, data->hwirq); + oxnas_gpio_irq_unmask(data); + + return 0; +} + +static int oxnas_gpio_irq_set_type(struct irq_data *data, unsigned int type) +{ + if ((type & (IRQ_TYPE_EDGE_RISING|IRQ_TYPE_EDGE_FALLING)) == 0) + return -EINVAL; + + irq_set_handler_locked(data, handle_edge_irq); + + return 0; +} + +static void oxnas_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct oxnas_gpio_bank *bank = gpiochip_get_data(gc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long stat; + unsigned int pin; + + chained_irq_enter(chip, desc); + + stat = readl(bank->reg_base + IRQ_PENDING); + + for_each_set_bit(pin, &stat, BITS_PER_LONG) + generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin)); + + chained_irq_exit(chip, desc); +} + +#define GPIO_BANK(_bank) \ + { \ + .gpio_chip = { \ + .label = "GPIO" #_bank, \ + .request = gpiochip_generic_request, \ + .free = gpiochip_generic_free, \ + .get_direction = oxnas_gpio_get_direction, \ + .direction_input = oxnas_gpio_direction_input, \ + .direction_output = oxnas_gpio_direction_output, \ + .get = oxnas_gpio_get, \ + .set = oxnas_gpio_set, \ + .ngpio = PINS_PER_BANK, \ + .base = GPIO_BANK_START(_bank), \ + .owner = THIS_MODULE, \ + .can_sleep = 0, \ + }, \ + .irq_chip = { \ + .name = "GPIO" #_bank, \ + .irq_startup = oxnas_gpio_irq_startup, \ + .irq_ack = oxnas_gpio_irq_ack, \ + .irq_mask = oxnas_gpio_irq_mask, \ + .irq_unmask = oxnas_gpio_irq_unmask, \ + .irq_set_type = oxnas_gpio_irq_set_type, \ + }, \ + } + +static struct oxnas_gpio_bank oxnas_gpio_banks[] = { + GPIO_BANK(0), + GPIO_BANK(1), +}; + +static int oxnas_pinctrl_probe(struct platform_device *pdev) +{ + struct oxnas_pinctrl *pctl; + + pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); + if (!pctl) + return -ENOMEM; + pctl->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, pctl); + + pctl->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "oxsemi,sys-ctrl"); + if (IS_ERR(pctl->regmap)) { + dev_err(&pdev->dev, "failed to get sys ctrl regmap\n"); + return -ENODEV; + } + + pctl->pins = oxnas_pins; + pctl->npins = ARRAY_SIZE(oxnas_pins); + pctl->functions = oxnas_functions; + pctl->nfunctions = ARRAY_SIZE(oxnas_functions); + pctl->groups = oxnas_groups; + pctl->ngroups = ARRAY_SIZE(oxnas_groups); + pctl->gpio_banks = oxnas_gpio_banks; + pctl->nbanks = ARRAY_SIZE(oxnas_gpio_banks); + + oxnas_pinctrl_desc.pins = pctl->pins; + oxnas_pinctrl_desc.npins = pctl->npins; + + pctl->pctldev = pinctrl_register(&oxnas_pinctrl_desc, + &pdev->dev, pctl); + if (IS_ERR(pctl->pctldev)) { + dev_err(&pdev->dev, "Failed to register pinctrl device\n"); + return PTR_ERR(pctl->pctldev); + } + + return 0; +} + +static int oxnas_gpio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct of_phandle_args pinspec; + struct oxnas_gpio_bank *bank; + unsigned int id, ngpios; + int irq, ret; + struct resource *res; + + if (of_parse_phandle_with_fixed_args(np, "gpio-ranges", + 3, 0, &pinspec)) { + dev_err(&pdev->dev, "gpio-ranges property not found\n"); + return -EINVAL; + } + + id = pinspec.args[1] / PINS_PER_BANK; + ngpios = pinspec.args[2]; + + if (id >= ARRAY_SIZE(oxnas_gpio_banks)) { + dev_err(&pdev->dev, "invalid gpio-ranges base arg\n"); + return -EINVAL; + } + + if (ngpios > PINS_PER_BANK) { + dev_err(&pdev->dev, "invalid gpio-ranges count arg\n"); + return -EINVAL; + } + + bank = &oxnas_gpio_banks[id]; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bank->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(bank->reg_base)) + return PTR_ERR(bank->reg_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "irq get failed\n"); + return irq; + } + + bank->id = id; + bank->gpio_chip.parent = &pdev->dev; + bank->gpio_chip.of_node = np; + bank->gpio_chip.ngpio = ngpios; + ret = gpiochip_add_data(&bank->gpio_chip, bank); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n", + id, ret); + return ret; + } + + ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip, + 0, handle_level_irq, IRQ_TYPE_NONE); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n", + id, ret); + gpiochip_remove(&bank->gpio_chip); + return ret; + } + + gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip, + irq, oxnas_gpio_irq_handler); + + return 0; +} + +static const struct of_device_id oxnas_pinctrl_of_match[] = { + { .compatible = "oxsemi,ox810se-pinctrl", }, + { }, +}; + +static struct platform_driver oxnas_pinctrl_driver = { + .driver = { + .name = "oxnas-pinctrl", + .of_match_table = oxnas_pinctrl_of_match, + .suppress_bind_attrs = true, + }, + .probe = oxnas_pinctrl_probe, +}; + +static const struct of_device_id oxnas_gpio_of_match[] = { + { .compatible = "oxsemi,ox810se-gpio", }, + { }, +}; + +static struct platform_driver oxnas_gpio_driver = { + .driver = { + .name = "oxnas-gpio", + .of_match_table = oxnas_gpio_of_match, + .suppress_bind_attrs = true, + }, + .probe = oxnas_gpio_probe, +}; + +static int __init oxnas_gpio_register(void) +{ + return platform_driver_register(&oxnas_gpio_driver); +} +arch_initcall(oxnas_gpio_register); + +static int __init oxnas_pinctrl_register(void) +{ + return platform_driver_register(&oxnas_pinctrl_driver); +} +arch_initcall(oxnas_pinctrl_register); diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index a91026e8cd7c..44902c63f507 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -360,7 +360,7 @@ static struct regmap_config rockchip_regmap_config = { .reg_stride = 4, }; -static const inline struct rockchip_pin_group *pinctrl_name_to_group( +static inline const struct rockchip_pin_group *pinctrl_name_to_group( const struct rockchip_pinctrl *info, const char *name) { @@ -2007,7 +2007,7 @@ static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d) irq_gc_mask_clr_bit(d); } -void rockchip_irq_gc_mask_set_bit(struct irq_data *d) +static void rockchip_irq_gc_mask_set_bit(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct rockchip_pin_bank *bank = gc->private; diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index d0ba968af5bb..0de1c67dfb94 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -844,7 +844,7 @@ static int st_pctl_get_group_pins(struct pinctrl_dev *pctldev, return 0; } -static const inline struct st_pctl_group *st_pctl_find_group_by_name( +static inline const struct st_pctl_group *st_pctl_find_group_by_name( const struct st_pinctrl *info, const char *name) { int i; diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c index d1af908a7060..9cc80a500880 100644 --- a/drivers/pinctrl/pinctrl-u300.c +++ b/drivers/pinctrl/pinctrl-u300.c @@ -670,7 +670,7 @@ struct u300_pmx { * u300_pmx_registers - the array of registers read/written for each pinmux * shunt setting */ -const u32 u300_pmx_registers[] = { +static const u32 u300_pmx_registers[] = { U300_SYSCON_PMC1LR, U300_SYSCON_PMC1HR, U300_SYSCON_PMC2R, diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index b9375544dff0..dd85ad1807f5 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -1616,50 +1616,74 @@ struct pinctrl_xway_soc { /* xway xr9 series (DEPRECATED: Use XWAY xRX100/xRX200 Family) */ static struct pinctrl_xway_soc xr9_pinctrl = { - XR9_MAX_PIN, xway_mfp, - xway_grps, ARRAY_SIZE(xway_grps), - xrx_funcs, ARRAY_SIZE(xrx_funcs), - xway_exin_pin_map, 6 + .pin_count = XR9_MAX_PIN, + .mfp = xway_mfp, + .grps = xway_grps, + .num_grps = ARRAY_SIZE(xway_grps), + .funcs = xrx_funcs, + .num_funcs = ARRAY_SIZE(xrx_funcs), + .exin = xway_exin_pin_map, + .num_exin = 6 }; /* XWAY AMAZON Family */ static struct pinctrl_xway_soc ase_pinctrl = { - ASE_MAX_PIN, ase_mfp, - ase_grps, ARRAY_SIZE(ase_grps), - ase_funcs, ARRAY_SIZE(ase_funcs), - ase_exin_pin_map, 3 + .pin_count = ASE_MAX_PIN, + .mfp = ase_mfp, + .grps = ase_grps, + .num_grps = ARRAY_SIZE(ase_grps), + .funcs = ase_funcs, + .num_funcs = ARRAY_SIZE(ase_funcs), + .exin = ase_exin_pin_map, + .num_exin = 3 }; /* XWAY DANUBE Family */ static struct pinctrl_xway_soc danube_pinctrl = { - DANUBE_MAX_PIN, danube_mfp, - danube_grps, ARRAY_SIZE(danube_grps), - danube_funcs, ARRAY_SIZE(danube_funcs), - danube_exin_pin_map, 3 + .pin_count = DANUBE_MAX_PIN, + .mfp = danube_mfp, + .grps = danube_grps, + .num_grps = ARRAY_SIZE(danube_grps), + .funcs = danube_funcs, + .num_funcs = ARRAY_SIZE(danube_funcs), + .exin = danube_exin_pin_map, + .num_exin = 3 }; /* XWAY xRX100 Family */ static struct pinctrl_xway_soc xrx100_pinctrl = { - XRX100_MAX_PIN, xrx100_mfp, - xrx100_grps, ARRAY_SIZE(xrx100_grps), - xrx100_funcs, ARRAY_SIZE(xrx100_funcs), - xrx100_exin_pin_map, 6 + .pin_count = XRX100_MAX_PIN, + .mfp = xrx100_mfp, + .grps = xrx100_grps, + .num_grps = ARRAY_SIZE(xrx100_grps), + .funcs = xrx100_funcs, + .num_funcs = ARRAY_SIZE(xrx100_funcs), + .exin = xrx100_exin_pin_map, + .num_exin = 6 }; /* XWAY xRX200 Family */ static struct pinctrl_xway_soc xrx200_pinctrl = { - XRX200_MAX_PIN, xrx200_mfp, - xrx200_grps, ARRAY_SIZE(xrx200_grps), - xrx200_funcs, ARRAY_SIZE(xrx200_funcs), - xrx200_exin_pin_map, 6 + .pin_count = XRX200_MAX_PIN, + .mfp = xrx200_mfp, + .grps = xrx200_grps, + .num_grps = ARRAY_SIZE(xrx200_grps), + .funcs = xrx200_funcs, + .num_funcs = ARRAY_SIZE(xrx200_funcs), + .exin = xrx200_exin_pin_map, + .num_exin = 6 }; /* XWAY xRX300 Family */ static struct pinctrl_xway_soc xrx300_pinctrl = { - XRX300_MAX_PIN, xrx300_mfp, - xrx300_grps, ARRAY_SIZE(xrx300_grps), - xrx300_funcs, ARRAY_SIZE(xrx300_funcs), - xrx300_exin_pin_map, 5 + .pin_count = XRX300_MAX_PIN, + .mfp = xrx300_mfp, + .grps = xrx300_grps, + .num_grps = ARRAY_SIZE(xrx300_grps), + .funcs = xrx300_funcs, + .num_funcs = ARRAY_SIZE(xrx300_funcs), + .exin = xrx300_exin_pin_map, + .num_exin = 5 }; static struct pinctrl_gpio_range xway_gpio_range = { diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index 8fdc60c5aeaf..7afdbede6823 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -20,7 +20,7 @@ */ #include <linux/io.h> #include <linux/mfd/syscon.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> @@ -1210,7 +1210,6 @@ static const struct of_device_id zynq_pinctrl_of_match[] = { { .compatible = "xlnx,pinctrl-zynq" }, { } }; -MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match); static struct platform_driver zynq_pinctrl_driver = { .driver = { @@ -1225,13 +1224,3 @@ static int __init zynq_pinctrl_init(void) return platform_driver_register(&zynq_pinctrl_driver); } arch_initcall(zynq_pinctrl_init); - -static void __exit zynq_pinctrl_exit(void) -{ - platform_driver_unregister(&zynq_pinctrl_driver); -} -module_exit(zynq_pinctrl_exit); - -MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>"); -MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index c223a9ef1fe1..ece702881946 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -256,7 +256,7 @@ int pinmux_request_gpio(struct pinctrl_dev *pctldev, /* Conjure some name stating what chip and pin this is taken by */ owner = kasprintf(GFP_KERNEL, "%s:%d", range->name, gpio); if (!owner) - return -EINVAL; + return -ENOMEM; ret = pin_request(pctldev, pin, owner, range); if (ret < 0) @@ -606,23 +606,17 @@ static int pinmux_pins_show(struct seq_file *s, void *what) if (pmxops->strict) { if (desc->mux_owner) seq_printf(s, "pin %d (%s): device %s%s", - pin, - desc->name ? desc->name : "unnamed", - desc->mux_owner, + pin, desc->name, desc->mux_owner, is_hog ? " (HOG)" : ""); else if (desc->gpio_owner) seq_printf(s, "pin %d (%s): GPIO %s", - pin, - desc->name ? desc->name : "unnamed", - desc->gpio_owner); + pin, desc->name, desc->gpio_owner); else seq_printf(s, "pin %d (%s): UNCLAIMED", - pin, - desc->name ? desc->name : "unnamed"); + pin, desc->name); } else { /* For non-strict controllers */ - seq_printf(s, "pin %d (%s): %s %s%s", pin, - desc->name ? desc->name : "unnamed", + seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name, desc->mux_owner ? desc->mux_owner : "(MUX UNCLAIMED)", desc->gpio_owner ? desc->gpio_owner diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 67bc70dcda64..93ef268d5ccd 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -55,6 +55,14 @@ config PINCTRL_MSM8960 This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm TLMM block found in the Qualcomm 8960 platform. +config PINCTRL_MDM9615 + tristate "Qualcomm 9615 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm 9615 platform. + config PINCTRL_MSM8X74 tristate "Qualcomm 8x74 pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index c964a2c4b90a..8319e11cecb5 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o +obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c new file mode 100644 index 000000000000..2b8f4521692c --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2014, Sony Mobile Communications AB. + * Copyright (c) 2016 BayLibre, SAS. + * Author : Neil Armstrong <narmstrong@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "pinctrl-msm.h" + +static const struct pinctrl_pin_desc mdm9615_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); + +#define FUNCTION(fname) \ + [MSM_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + MSM_MUX_gpio, \ + MSM_MUX_##f1, \ + MSM_MUX_##f2, \ + MSM_MUX_##f3, \ + MSM_MUX_##f4, \ + MSM_MUX_##f5, \ + MSM_MUX_##f6, \ + MSM_MUX_##f7, \ + MSM_MUX_##f8, \ + MSM_MUX_##f9, \ + MSM_MUX_##f10, \ + MSM_MUX_##f11 \ + }, \ + .nfuncs = 12, \ + .ctl_reg = 0x1000 + 0x10 * id, \ + .io_reg = 0x1004 + 0x10 * id, \ + .intr_cfg_reg = 0x1008 + 0x10 * id, \ + .intr_status_reg = 0x100c + 0x10 * id, \ + .intr_target_reg = 0x400 + 0x4 * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_ack_high = 1, \ + .intr_target_bit = 0, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 3, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 1, \ + } + +enum mdm9615_functions { + MSM_MUX_gpio, + MSM_MUX_gsbi2_i2c, + MSM_MUX_gsbi3, + MSM_MUX_gsbi4, + MSM_MUX_gsbi5_i2c, + MSM_MUX_gsbi5_uart, + MSM_MUX_sdc2, + MSM_MUX_ebi2_lcdc, + MSM_MUX_ps_hold, + MSM_MUX_prim_audio, + MSM_MUX_sec_audio, + MSM_MUX_cdc_mclk, + MSM_MUX_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87" +}; + +static const char * const gsbi2_i2c_groups[] = { + "gpio4", "gpio5" +}; + +static const char * const gsbi3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11" +}; + +static const char * const gsbi4_groups[] = { + "gpio12", "gpio13", "gpio14", "gpio15" +}; + +static const char * const gsbi5_i2c_groups[] = { + "gpio16", "gpio17" +}; + +static const char * const gsbi5_uart_groups[] = { + "gpio18", "gpio19" +}; + +static const char * const sdc2_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", +}; + +static const char * const ebi2_lcdc_groups[] = { + "gpio21", "gpio22", "gpio24", +}; + +static const char * const ps_hold_groups[] = { + "gpio83", +}; + +static const char * const prim_audio_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", +}; + +static const char * const sec_audio_groups[] = { + "gpio25", "gpio26", "gpio27", "gpio28", +}; + +static const char * const cdc_mclk_groups[] = { + "gpio24", +}; + +static const struct msm_function mdm9615_functions[] = { + FUNCTION(gpio), + FUNCTION(gsbi2_i2c), + FUNCTION(gsbi3), + FUNCTION(gsbi4), + FUNCTION(gsbi5_i2c), + FUNCTION(gsbi5_uart), + FUNCTION(sdc2), + FUNCTION(ebi2_lcdc), + FUNCTION(ps_hold), + FUNCTION(prim_audio), + FUNCTION(sec_audio), + FUNCTION(cdc_mclk), +}; + +static const struct msm_pingroup mdm9615_groups[] = { + PINGROUP(0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(4, gsbi2_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(5, gsbi2_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(6, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(8, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(9, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(10, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(11, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(12, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(13, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(14, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(15, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(16, gsbi5_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(17, gsbi5_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(18, gsbi5_uart, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(19, gsbi5_uart, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(20, prim_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(21, prim_audio, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(22, prim_audio, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(23, prim_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(24, cdc_mclk, NA, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(25, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(26, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(27, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(28, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(29, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(30, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(33, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(44, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(45, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(47, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(49, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(54, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(55, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(56, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(57, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(83, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +}; + +#define NUM_GPIO_PINGROUPS 88 + +static const struct msm_pinctrl_soc_data mdm9615_pinctrl = { + .pins = mdm9615_pins, + .npins = ARRAY_SIZE(mdm9615_pins), + .functions = mdm9615_functions, + .nfunctions = ARRAY_SIZE(mdm9615_functions), + .groups = mdm9615_groups, + .ngroups = ARRAY_SIZE(mdm9615_groups), + .ngpios = NUM_GPIO_PINGROUPS, +}; + +static int mdm9615_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &mdm9615_pinctrl); +} + +static const struct of_device_id mdm9615_pinctrl_of_match[] = { + { .compatible = "qcom,mdm9615-pinctrl", }, + { }, +}; + +static struct platform_driver mdm9615_pinctrl_driver = { + .driver = { + .name = "mdm9615-pinctrl", + .of_match_table = mdm9615_pinctrl_of_match, + }, + .probe = mdm9615_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init mdm9615_pinctrl_init(void) +{ + return platform_driver_register(&mdm9615_pinctrl_driver); +} +arch_initcall(mdm9615_pinctrl_init); + +static void __exit mdm9615_pinctrl_exit(void) +{ + platform_driver_unregister(&mdm9615_pinctrl_driver); +} +module_exit(mdm9615_pinctrl_exit); + +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_DESCRIPTION("Qualcomm MDM9615 pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, mdm9615_pinctrl_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 1a44e1d03390..51c42d746883 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -29,6 +29,7 @@ #include <linux/spinlock.h> #include <linux/reboot.h> #include <linux/pm.h> +#include <linux/log2.h> #include "../core.h" #include "../pinconf.h" @@ -138,10 +139,11 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct msm_pingroup *g; unsigned long flags; - u32 val; + u32 val, mask; int i; g = &pctrl->soc->groups[group]; + mask = GENMASK(g->mux_bit + order_base_2(g->nfuncs) - 1, g->mux_bit); for (i = 0; i < g->nfuncs; i++) { if (g->funcs[i] == function) @@ -154,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); - val &= ~(0x7 << g->mux_bit); + val &= mask; val |= i << g->mux_bit; writel(val, pctrl->regs + g->ctl_reg); diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c index 3e8f7ac2ac8a..5591d093bf78 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8660.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c @@ -506,6 +506,8 @@ enum msm8660_functions { MSM_MUX_usb_fs2_oe_n, MSM_MUX_vfe, MSM_MUX_vsens_alarm, + MSM_MUX_ebi2cs, + MSM_MUX_ebi2, MSM_MUX__, }; @@ -696,6 +698,36 @@ static const char * const vfe_groups[] = { static const char * const vsens_alarm_groups[] = { "gpio127" }; +static const char * const ebi2cs_groups[] = { + "gpio39", /* CS1A */ + "gpio40", /* CS2A */ + "gpio123", /* CS1B */ + "gpio124", /* CS2B */ + "gpio131", /* CS5 */ + "gpio132", /* CS4 */ + "gpio133", /* CS3 */ + "gpio134", /* CS0 */ +}; +static const char * const ebi2_groups[] = { + /* ADDR9 & ADDR8 */ + "gpio37", "gpio38", + /* ADDR7 - ADDR 0 */ + "gpio123", "gpio124", "gpio125", "gpio126", + "gpio127", "gpio128", "gpio129", "gpio130", + /* (muxed address+data) AD15 - AD0 */ + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", + "gpio140", "gpio141", "gpio142", "gpio143", "gpio144", + "gpio145", "gpio146", "gpio147", "gpio148", "gpio149", + "gpio150", + "gpio151", /* OE output enable */ + "gpio152", /* clock */ + "gpio153", /* ADV */ + "gpio154", /* WAIT (input) */ + "gpio155", /* UB Upper Byte Enable */ + "gpio156", /* LB Lower Byte Enable */ + "gpio157", /* WE Write Enable */ + "gpio158", /* busy */ +}; static const struct msm_function msm8660_functions[] = { FUNCTION(gpio), @@ -749,6 +781,8 @@ static const struct msm_function msm8660_functions[] = { FUNCTION(usb_fs2_oe_n), FUNCTION(vfe), FUNCTION(vsens_alarm), + FUNCTION(ebi2cs), /* for EBI2 chip selects */ + FUNCTION(ebi2), /* for general EBI2 pins */ }; static const struct msm_pingroup msm8660_groups[] = { @@ -789,10 +823,10 @@ static const struct msm_pingroup msm8660_groups[] = { PINGROUP(34, gsbi1, _, _, _, _, _, _), PINGROUP(35, gsbi1, _, _, _, _, _, _), PINGROUP(36, gsbi1, _, _, _, _, _, _), - PINGROUP(37, gsbi2, _, _, _, _, _, _), - PINGROUP(38, gsbi2, _, _, _, _, _, _), - PINGROUP(39, gsbi2, _, mdp_vsync, _, _, _, _), - PINGROUP(40, gsbi2, _, _, _, _, _, _), + PINGROUP(37, gsbi2, ebi2, _, _, _, _, _), + PINGROUP(38, gsbi2, ebi2, _, _, _, _, _), + PINGROUP(39, gsbi2, ebi2cs, mdp_vsync, _, _, _, _), + PINGROUP(40, gsbi2, ebi2cs, _, _, _, _, _), PINGROUP(41, gsbi3, mdp_vsync, _, _, _, _, _), PINGROUP(42, gsbi3, vfe, _, _, _, _, _), PINGROUP(43, gsbi3, _, _, _, _, _, _), @@ -875,42 +909,42 @@ static const struct msm_pingroup msm8660_groups[] = { PINGROUP(120, i2s, _, _, _, _, _, _), PINGROUP(121, i2s, _, _, _, _, _, _), PINGROUP(122, i2s, gp_clk_1b, _, _, _, _, _), - PINGROUP(123, _, gsbi2_spi_cs1_n, _, _, _, _, _), - PINGROUP(124, _, gsbi2_spi_cs2_n, _, _, _, _, _), - PINGROUP(125, _, gsbi2_spi_cs3_n, _, _, _, _, _), - PINGROUP(126, _, _, _, _, _, _, _), - PINGROUP(127, _, vsens_alarm, _, _, _, _, _), - PINGROUP(128, _, _, _, _, _, _, _), - PINGROUP(129, _, _, _, _, _, _, _), - PINGROUP(130, _, _, _, _, _, _, _), - PINGROUP(131, _, _, _, _, _, _, _), - PINGROUP(132, _, _, _, _, _, _, _), - PINGROUP(133, _, _, _, _, _, _, _), - PINGROUP(134, _, _, _, _, _, _, _), - PINGROUP(135, _, _, _, _, _, _, _), - PINGROUP(136, _, _, _, _, _, _, _), - PINGROUP(137, _, _, _, _, _, _, _), - PINGROUP(138, _, _, _, _, _, _, _), - PINGROUP(139, _, _, _, _, _, _, _), - PINGROUP(140, _, _, _, _, _, _, _), - PINGROUP(141, _, _, _, _, _, _, _), - PINGROUP(142, _, _, _, _, _, _, _), - PINGROUP(143, _, sdc2, _, _, _, _, _), - PINGROUP(144, _, sdc2, _, _, _, _, _), - PINGROUP(145, _, sdc2, _, _, _, _, _), - PINGROUP(146, _, sdc2, _, _, _, _, _), - PINGROUP(147, _, sdc2, _, _, _, _, _), - PINGROUP(148, _, sdc2, _, _, _, _, _), - PINGROUP(149, _, sdc2, _, _, _, _, _), - PINGROUP(150, _, sdc2, _, _, _, _, _), - PINGROUP(151, _, sdc2, _, _, _, _, _), - PINGROUP(152, _, sdc2, _, _, _, _, _), - PINGROUP(153, _, _, _, _, _, _, _), - PINGROUP(154, _, _, _, _, _, _, _), - PINGROUP(155, _, _, _, _, _, _, _), - PINGROUP(156, _, _, _, _, _, _, _), - PINGROUP(157, _, _, _, _, _, _, _), - PINGROUP(158, _, _, _, _, _, _, _), + PINGROUP(123, ebi2, gsbi2_spi_cs1_n, ebi2cs, _, _, _, _), + PINGROUP(124, ebi2, gsbi2_spi_cs2_n, ebi2cs, _, _, _, _), + PINGROUP(125, ebi2, gsbi2_spi_cs3_n, _, _, _, _, _), + PINGROUP(126, ebi2, _, _, _, _, _, _), + PINGROUP(127, ebi2, vsens_alarm, _, _, _, _, _), + PINGROUP(128, ebi2, _, _, _, _, _, _), + PINGROUP(129, ebi2, _, _, _, _, _, _), + PINGROUP(130, ebi2, _, _, _, _, _, _), + PINGROUP(131, ebi2cs, _, _, _, _, _, _), + PINGROUP(132, ebi2cs, _, _, _, _, _, _), + PINGROUP(133, ebi2cs, _, _, _, _, _, _), + PINGROUP(134, ebi2cs, _, _, _, _, _, _), + PINGROUP(135, ebi2, _, _, _, _, _, _), + PINGROUP(136, ebi2, _, _, _, _, _, _), + PINGROUP(137, ebi2, _, _, _, _, _, _), + PINGROUP(138, ebi2, _, _, _, _, _, _), + PINGROUP(139, ebi2, _, _, _, _, _, _), + PINGROUP(140, ebi2, _, _, _, _, _, _), + PINGROUP(141, ebi2, _, _, _, _, _, _), + PINGROUP(142, ebi2, _, _, _, _, _, _), + PINGROUP(143, ebi2, sdc2, _, _, _, _, _), + PINGROUP(144, ebi2, sdc2, _, _, _, _, _), + PINGROUP(145, ebi2, sdc2, _, _, _, _, _), + PINGROUP(146, ebi2, sdc2, _, _, _, _, _), + PINGROUP(147, ebi2, sdc2, _, _, _, _, _), + PINGROUP(148, ebi2, sdc2, _, _, _, _, _), + PINGROUP(149, ebi2, sdc2, _, _, _, _, _), + PINGROUP(150, ebi2, sdc2, _, _, _, _, _), + PINGROUP(151, ebi2, sdc2, _, _, _, _, _), + PINGROUP(152, ebi2, sdc2, _, _, _, _, _), + PINGROUP(153, ebi2, _, _, _, _, _, _), + PINGROUP(154, ebi2, _, _, _, _, _, _), + PINGROUP(155, ebi2, _, _, _, _, _, _), + PINGROUP(156, ebi2, _, _, _, _, _, _), + PINGROUP(157, ebi2, _, _, _, _, _, _), + PINGROUP(158, ebi2, _, _, _, _, _, _), PINGROUP(159, sdc1, _, _, _, _, _, _), PINGROUP(160, sdc1, _, _, _, _, _, _), PINGROUP(161, sdc1, _, _, _, _, _, _), diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c index 46fe6ad5f97e..9eb63d3403d4 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c @@ -172,6 +172,8 @@ static const struct pinctrl_pin_desc msm8x74_pins[] = { PINCTRL_PIN(149, "SDC2_CLK"), PINCTRL_PIN(150, "SDC2_CMD"), PINCTRL_PIN(151, "SDC2_DATA"), + PINCTRL_PIN(152, "HSIC_STROBE"), + PINCTRL_PIN(153, "HSIC_DATA"), }; #define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin } @@ -328,6 +330,8 @@ static const unsigned int sdc1_data_pins[] = { 148 }; static const unsigned int sdc2_clk_pins[] = { 149 }; static const unsigned int sdc2_cmd_pins[] = { 150 }; static const unsigned int sdc2_data_pins[] = { 151 }; +static const unsigned int hsic_strobe_pins[] = { 152 }; +static const unsigned int hsic_data_pins[] = { 153 }; #define FUNCTION(fname) \ [MSM_MUX_##fname] = { \ @@ -399,6 +403,37 @@ static const unsigned int sdc2_data_pins[] = { 151 }; .intr_detection_width = -1, \ } +#define HSIC_PINGROUP(pg_name, ctl) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = ARRAY_SIZE(pg_name##_pins), \ + .funcs = (int[]){ \ + MSM_MUX_gpio, \ + MSM_MUX_hsic_ctl, \ + }, \ + .nfuncs = 2, \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = 25, \ + .pull_bit = -1, \ + .drv_bit = -1, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + /* * TODO: Add the rest of the possible functions and fill out * the pingroup table below. @@ -509,6 +544,7 @@ enum msm8x74_functions { MSM_MUX_fm, MSM_MUX_wlan, MSM_MUX_slimbus, + MSM_MUX_hsic_ctl, MSM_MUX_NA, }; @@ -534,7 +570,8 @@ static const char * const gpio_groups[] = { "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", - "gpio141", "gpio142", "gpio143", "gpio144", "gpio145" + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "hsic_data", + "hsic_strobe", }; static const char * const blsp_uart1_groups[] = { @@ -754,6 +791,7 @@ static const char * const wlan_groups[] = { }; static const char * const slimbus_groups[] = { "gpio70", "gpio71" }; +static const char * const hsic_ctl_groups[] = { "hsic_strobe", "hsic_data" }; static const struct msm_function msm8x74_functions[] = { FUNCTION(gpio), @@ -861,6 +899,7 @@ static const struct msm_function msm8x74_functions[] = { FUNCTION(fm), FUNCTION(wlan), FUNCTION(slimbus), + FUNCTION(hsic_ctl), }; static const struct msm_pingroup msm8x74_groups[] = { @@ -1016,6 +1055,8 @@ static const struct msm_pingroup msm8x74_groups[] = { SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6), SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3), SDC_PINGROUP(sdc2_data, 0x2048, 9, 0), + HSIC_PINGROUP(hsic_strobe, 0x2050), + HSIC_PINGROUP(hsic_data, 0x2054), }; #define NUM_GPIO_PINGROUPS 146 diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 9191727aff5e..0d1392fc32dd 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -744,6 +744,7 @@ static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl, static const struct of_device_id pm8xxx_mpp_of_match[] = { { .compatible = "qcom,pm8018-mpp" }, { .compatible = "qcom,pm8038-mpp" }, + { .compatible = "qcom,pm8058-mpp" }, { .compatible = "qcom,pm8917-mpp" }, { .compatible = "qcom,pm8821-mpp" }, { .compatible = "qcom,pm8921-mpp" }, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c index fb71fc3e5aa0..3000df80709f 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c @@ -998,6 +998,7 @@ static struct platform_driver exynos5440_pinctrl_driver = { .driver = { .name = "exynos5440-pinctrl", .of_match_table = exynos5440_pinctrl_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index ed0b70881e19..513fe6b23248 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1274,6 +1274,7 @@ static struct platform_driver samsung_pinctrl_driver = { .driver = { .name = "samsung-pinctrl", .of_match_table = samsung_pinctrl_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index 9b9cee06ec59..a3b82041b6a2 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -598,15 +598,6 @@ static int sh_pfc_probe(struct platform_device *pdev) return 0; } -static int sh_pfc_remove(struct platform_device *pdev) -{ -#ifdef CONFIG_PINCTRL_SH_PFC_GPIO - sh_pfc_unregister_gpiochip(platform_get_drvdata(pdev)); -#endif - - return 0; -} - static const struct platform_device_id sh_pfc_id_table[] = { #ifdef CONFIG_PINCTRL_PFC_SH7203 { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info }, @@ -650,7 +641,6 @@ static const struct platform_device_id sh_pfc_id_table[] = { static struct platform_driver sh_pfc_driver = { .probe = sh_pfc_probe, - .remove = sh_pfc_remove, .id_table = sh_pfc_id_table, .driver = { .name = DRV_NAME, diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h index dc1b2adb24c5..0bbdea5849f4 100644 --- a/drivers/pinctrl/sh-pfc/core.h +++ b/drivers/pinctrl/sh-pfc/core.h @@ -10,50 +10,16 @@ #ifndef __SH_PFC_CORE_H__ #define __SH_PFC_CORE_H__ -#include <linux/compiler.h> -#include <linux/spinlock.h> #include <linux/types.h> #include "sh_pfc.h" -struct sh_pfc_window { - phys_addr_t phys; - void __iomem *virt; - unsigned long size; -}; - -struct sh_pfc_chip; -struct sh_pfc_pinctrl; - struct sh_pfc_pin_range { u16 start; u16 end; }; -struct sh_pfc { - struct device *dev; - const struct sh_pfc_soc_info *info; - spinlock_t lock; - - unsigned int num_windows; - struct sh_pfc_window *windows; - unsigned int num_irqs; - unsigned int *irqs; - - struct sh_pfc_pin_range *ranges; - unsigned int nr_ranges; - - unsigned int nr_gpio_pins; - - struct sh_pfc_chip *gpio; -#ifdef CONFIG_SUPERH - struct sh_pfc_chip *func; -#endif - -}; - int sh_pfc_register_gpiochip(struct sh_pfc *pfc); -int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc); int sh_pfc_register_pinctrl(struct sh_pfc *pfc); @@ -67,28 +33,4 @@ void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin); int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type); -extern const struct sh_pfc_soc_info emev2_pinmux_info; -extern const struct sh_pfc_soc_info r8a73a4_pinmux_info; -extern const struct sh_pfc_soc_info r8a7740_pinmux_info; -extern const struct sh_pfc_soc_info r8a7778_pinmux_info; -extern const struct sh_pfc_soc_info r8a7779_pinmux_info; -extern const struct sh_pfc_soc_info r8a7790_pinmux_info; -extern const struct sh_pfc_soc_info r8a7791_pinmux_info; -extern const struct sh_pfc_soc_info r8a7793_pinmux_info; -extern const struct sh_pfc_soc_info r8a7794_pinmux_info; -extern const struct sh_pfc_soc_info r8a7795_pinmux_info; -extern const struct sh_pfc_soc_info sh7203_pinmux_info; -extern const struct sh_pfc_soc_info sh7264_pinmux_info; -extern const struct sh_pfc_soc_info sh7269_pinmux_info; -extern const struct sh_pfc_soc_info sh73a0_pinmux_info; -extern const struct sh_pfc_soc_info sh7720_pinmux_info; -extern const struct sh_pfc_soc_info sh7722_pinmux_info; -extern const struct sh_pfc_soc_info sh7723_pinmux_info; -extern const struct sh_pfc_soc_info sh7724_pinmux_info; -extern const struct sh_pfc_soc_info sh7734_pinmux_info; -extern const struct sh_pfc_soc_info sh7757_pinmux_info; -extern const struct sh_pfc_soc_info sh7785_pinmux_info; -extern const struct sh_pfc_soc_info sh7786_pinmux_info; -extern const struct sh_pfc_soc_info shx3_pinmux_info; - #endif /* __SH_PFC_CORE_H__ */ diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c index 97dff6a09ff0..6b5422766f13 100644 --- a/drivers/pinctrl/sh-pfc/gpio.c +++ b/drivers/pinctrl/sh-pfc/gpio.c @@ -318,7 +318,7 @@ sh_pfc_add_gpiochip(struct sh_pfc *pfc, int(*setup)(struct sh_pfc_chip *), if (ret < 0) return ERR_PTR(ret); - ret = gpiochip_add_data(&chip->gpio_chip, chip); + ret = devm_gpiochip_add_data(pfc->dev, &chip->gpio_chip, chip); if (unlikely(ret < 0)) return ERR_PTR(ret); @@ -399,18 +399,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc) chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL); if (IS_ERR(chip)) return PTR_ERR(chip); - - pfc->func = chip; #endif /* CONFIG_SUPERH */ return 0; } - -int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc) -{ - gpiochip_remove(&pfc->gpio->gpio_chip); -#ifdef CONFIG_SUPERH - gpiochip_remove(&pfc->func->gpio_chip); -#endif - return 0; -} diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c index d9d9228b15fa..ff5655dee67e 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/pinctrl/pinconf-generic.h> -#include "core.h" #include "sh_pfc.h" #define CPU_ALL_PORT(fn, pfx, sfx) \ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c index 7f7c8a6e76e8..35f436bcb849 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c @@ -22,7 +22,6 @@ #include <linux/kernel.h> #include <linux/pinctrl/pinconf-generic.h> -#include "core.h" #include "sh_pfc.h" #define CPU_ALL_PORT(fn, pfx, sfx) \ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index 411d0887ba19..18ef7042b3d1 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -23,7 +23,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/pinctrl/pinconf-generic.h> -#include "core.h" + #include "sh_pfc.h" #define PORT_GP_PUP_1(bank, pin, fn, sfx) \ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index eed8daa464cc..b769c05480da 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -24,7 +24,6 @@ #include <linux/io.h> #include <linux/kernel.h> -#include "core.h" #include "sh_pfc.h" /* @@ -4696,47 +4695,6 @@ static const char * const vin3_groups[] = { "vin3_clk", }; -#define IOCTRL6 0x8c - -static int r8a7790_get_io_voltage(struct sh_pfc *pfc, unsigned int pin) -{ - u32 data, mask; - - if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin)) - return -EINVAL; - - data = ioread32(pfc->windows->virt + IOCTRL6), - /* Bits in IOCTRL6 are numbered in opposite order to pins */ - mask = 0x80000000 >> (pin & 0x1f); - - return (data & mask) ? 3300 : 1800; -} - -static int r8a7790_set_io_voltage(struct sh_pfc *pfc, unsigned int pin, u16 mV) -{ - u32 data, mask; - - if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin)) - return -EINVAL; - - if (mV != 1800 && mV != 3300) - return -EINVAL; - - data = ioread32(pfc->windows->virt + IOCTRL6); - /* Bits in IOCTRL6 are numbered in opposite order to pins */ - mask = 0x80000000 >> (pin & 0x1f); - - if (mV == 3300) - data |= mask; - else - data &= ~mask; - - iowrite32(~data, pfc->windows->virt); /* unlock reg */ - iowrite32(data, pfc->windows->virt + IOCTRL6); - - return 0; -} - static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(audio_clk), SH_PFC_FUNCTION(avb), @@ -5736,14 +5694,23 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { { }, }; -static const struct sh_pfc_soc_operations pinmux_ops = { - .get_io_voltage = r8a7790_get_io_voltage, - .set_io_voltage = r8a7790_set_io_voltage, +static int r8a7790_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl) +{ + if (pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31)) + return -EINVAL; + + *pocctrl = 0xe606008c; + + return 31 - (pin & 0x1f); +} + +static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = { + .pin_to_pocctrl = r8a7790_pin_to_pocctrl, }; const struct sh_pfc_soc_info r8a7790_pinmux_info = { .name = "r8a77900_pfc", - .ops = &pinmux_ops, + .ops = &r8a7790_pinmux_ops, .unlock_reg = 0xe6060000, /* PMMR */ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 01abbd5b4e49..0c1a60c9a844 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> -#include "core.h" #include "sh_pfc.h" #define CPU_ALL_PORT(fn, sfx) \ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 44632b1a5c97..b74cdd310d83 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c @@ -17,8 +17,12 @@ PORT_GP_CFG_16(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_15(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ - PORT_GP_CFG_16(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ - PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ + PORT_GP_CFG_12(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_CFG_1(3, 12, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ + PORT_GP_CFG_1(3, 13, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ + PORT_GP_CFG_1(3, 14, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ + PORT_GP_CFG_1(3, 15, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ + PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \ PORT_GP_CFG_26(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_32(6, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_4(7, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH) @@ -552,6 +556,9 @@ static const u16 pinmux_data[] = { PINMUX_SINGLE(AVS2), PINMUX_SINGLE(HDMI0_CEC), PINMUX_SINGLE(HDMI1_CEC), + PINMUX_SINGLE(I2C_SEL_0_1), + PINMUX_SINGLE(I2C_SEL_3_1), + PINMUX_SINGLE(I2C_SEL_5_1), PINMUX_SINGLE(MSIOF0_RXD), PINMUX_SINGLE(MSIOF0_SCK), PINMUX_SINGLE(MSIOF0_TXD), @@ -1401,11 +1408,6 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1), PINMUX_IPSR_GPSR(IP17_7_4, TPU0TO3), - - /* I2C */ - PINMUX_IPSR_NOGP(0, I2C_SEL_0_1), - PINMUX_IPSR_NOGP(0, I2C_SEL_3_1), - PINMUX_IPSR_NOGP(0, I2C_SEL_5_1), }; static const struct sh_pfc_pin pinmux_pins[] = { @@ -1654,6 +1656,221 @@ static const unsigned int canfd1_data_mux[] = { CANFD1_TX_MARK, CANFD1_RX_MARK, }; +/* - DRIF0 --------------------------------------------------------------- */ +static const unsigned int drif0_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; +static const unsigned int drif0_ctrl_a_mux[] = { + RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK, +}; +static const unsigned int drif0_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int drif0_data0_a_mux[] = { + RIF0_D0_A_MARK, +}; +static const unsigned int drif0_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 7), +}; +static const unsigned int drif0_data1_a_mux[] = { + RIF0_D1_A_MARK, +}; +static const unsigned int drif0_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4), +}; +static const unsigned int drif0_ctrl_b_mux[] = { + RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK, +}; +static const unsigned int drif0_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 1), +}; +static const unsigned int drif0_data0_b_mux[] = { + RIF0_D0_B_MARK, +}; +static const unsigned int drif0_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 2), +}; +static const unsigned int drif0_data1_b_mux[] = { + RIF0_D1_B_MARK, +}; +static const unsigned int drif0_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15), +}; +static const unsigned int drif0_ctrl_c_mux[] = { + RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK, +}; +static const unsigned int drif0_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 13), +}; +static const unsigned int drif0_data0_c_mux[] = { + RIF0_D0_C_MARK, +}; +static const unsigned int drif0_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 14), +}; +static const unsigned int drif0_data1_c_mux[] = { + RIF0_D1_C_MARK, +}; +/* - DRIF1 --------------------------------------------------------------- */ +static const unsigned int drif1_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; +static const unsigned int drif1_ctrl_a_mux[] = { + RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK, +}; +static const unsigned int drif1_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; +static const unsigned int drif1_data0_a_mux[] = { + RIF1_D0_A_MARK, +}; +static const unsigned int drif1_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int drif1_data1_a_mux[] = { + RIF1_D1_A_MARK, +}; +static const unsigned int drif1_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3), +}; +static const unsigned int drif1_ctrl_b_mux[] = { + RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK, +}; +static const unsigned int drif1_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 7), +}; +static const unsigned int drif1_data0_b_mux[] = { + RIF1_D0_B_MARK, +}; +static const unsigned int drif1_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 8), +}; +static const unsigned int drif1_data1_b_mux[] = { + RIF1_D1_B_MARK, +}; +static const unsigned int drif1_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11), +}; +static const unsigned int drif1_ctrl_c_mux[] = { + RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK, +}; +static const unsigned int drif1_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 6), +}; +static const unsigned int drif1_data0_c_mux[] = { + RIF1_D0_C_MARK, +}; +static const unsigned int drif1_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 10), +}; +static const unsigned int drif1_data1_c_mux[] = { + RIF1_D1_C_MARK, +}; +/* - DRIF2 --------------------------------------------------------------- */ +static const unsigned int drif2_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; +static const unsigned int drif2_ctrl_a_mux[] = { + RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK, +}; +static const unsigned int drif2_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 7), +}; +static const unsigned int drif2_data0_a_mux[] = { + RIF2_D0_A_MARK, +}; +static const unsigned int drif2_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 10), +}; +static const unsigned int drif2_data1_a_mux[] = { + RIF2_D1_A_MARK, +}; +static const unsigned int drif2_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27), +}; +static const unsigned int drif2_ctrl_b_mux[] = { + RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK, +}; +static const unsigned int drif2_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 30), +}; +static const unsigned int drif2_data0_b_mux[] = { + RIF2_D0_B_MARK, +}; +static const unsigned int drif2_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 31), +}; +static const unsigned int drif2_data1_b_mux[] = { + RIF2_D1_B_MARK, +}; +/* - DRIF3 --------------------------------------------------------------- */ +static const unsigned int drif3_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; +static const unsigned int drif3_ctrl_a_mux[] = { + RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK, +}; +static const unsigned int drif3_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; +static const unsigned int drif3_data0_a_mux[] = { + RIF3_D0_A_MARK, +}; +static const unsigned int drif3_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; +static const unsigned int drif3_data1_a_mux[] = { + RIF3_D1_A_MARK, +}; +static const unsigned int drif3_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25), +}; +static const unsigned int drif3_ctrl_b_mux[] = { + RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK, +}; +static const unsigned int drif3_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 28), +}; +static const unsigned int drif3_data0_b_mux[] = { + RIF3_D0_B_MARK, +}; +static const unsigned int drif3_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 29), +}; +static const unsigned int drif3_data1_b_mux[] = { + RIF3_D1_B_MARK, +}; + /* - HSCIF0 ----------------------------------------------------------------- */ static const unsigned int hscif0_data_pins[] = { /* RX, TX */ @@ -3346,6 +3563,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(canfd0_data_a), SH_PFC_PIN_GROUP(canfd0_data_b), SH_PFC_PIN_GROUP(canfd1_data), + SH_PFC_PIN_GROUP(drif0_ctrl_a), + SH_PFC_PIN_GROUP(drif0_data0_a), + SH_PFC_PIN_GROUP(drif0_data1_a), + SH_PFC_PIN_GROUP(drif0_ctrl_b), + SH_PFC_PIN_GROUP(drif0_data0_b), + SH_PFC_PIN_GROUP(drif0_data1_b), + SH_PFC_PIN_GROUP(drif0_ctrl_c), + SH_PFC_PIN_GROUP(drif0_data0_c), + SH_PFC_PIN_GROUP(drif0_data1_c), + SH_PFC_PIN_GROUP(drif1_ctrl_a), + SH_PFC_PIN_GROUP(drif1_data0_a), + SH_PFC_PIN_GROUP(drif1_data1_a), + SH_PFC_PIN_GROUP(drif1_ctrl_b), + SH_PFC_PIN_GROUP(drif1_data0_b), + SH_PFC_PIN_GROUP(drif1_data1_b), + SH_PFC_PIN_GROUP(drif1_ctrl_c), + SH_PFC_PIN_GROUP(drif1_data0_c), + SH_PFC_PIN_GROUP(drif1_data1_c), + SH_PFC_PIN_GROUP(drif2_ctrl_a), + SH_PFC_PIN_GROUP(drif2_data0_a), + SH_PFC_PIN_GROUP(drif2_data1_a), + SH_PFC_PIN_GROUP(drif2_ctrl_b), + SH_PFC_PIN_GROUP(drif2_data0_b), + SH_PFC_PIN_GROUP(drif2_data1_b), + SH_PFC_PIN_GROUP(drif3_ctrl_a), + SH_PFC_PIN_GROUP(drif3_data0_a), + SH_PFC_PIN_GROUP(drif3_data1_a), + SH_PFC_PIN_GROUP(drif3_ctrl_b), + SH_PFC_PIN_GROUP(drif3_data0_b), + SH_PFC_PIN_GROUP(drif3_data1_b), SH_PFC_PIN_GROUP(hscif0_data), SH_PFC_PIN_GROUP(hscif0_clk), SH_PFC_PIN_GROUP(hscif0_ctrl), @@ -3629,6 +3876,48 @@ static const char * const canfd1_groups[] = { "canfd1_data", }; +static const char * const drif0_groups[] = { + "drif0_ctrl_a", + "drif0_data0_a", + "drif0_data1_a", + "drif0_ctrl_b", + "drif0_data0_b", + "drif0_data1_b", + "drif0_ctrl_c", + "drif0_data0_c", + "drif0_data1_c", +}; + +static const char * const drif1_groups[] = { + "drif1_ctrl_a", + "drif1_data0_a", + "drif1_data1_a", + "drif1_ctrl_b", + "drif1_data0_b", + "drif1_data1_b", + "drif1_ctrl_c", + "drif1_data0_c", + "drif1_data1_c", +}; + +static const char * const drif2_groups[] = { + "drif2_ctrl_a", + "drif2_data0_a", + "drif2_data1_a", + "drif2_ctrl_b", + "drif2_data0_b", + "drif2_data1_b", +}; + +static const char * const drif3_groups[] = { + "drif3_ctrl_a", + "drif3_data0_a", + "drif3_data1_a", + "drif3_ctrl_b", + "drif3_data0_b", + "drif3_data1_b", +}; + static const char * const hscif0_groups[] = { "hscif0_data", "hscif0_clk", @@ -3972,6 +4261,10 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(can_clk), SH_PFC_FUNCTION(canfd0), SH_PFC_FUNCTION(canfd1), + SH_PFC_FUNCTION(drif0), + SH_PFC_FUNCTION(drif1), + SH_PFC_FUNCTION(drif2), + SH_PFC_FUNCTION(drif3), SH_PFC_FUNCTION(hscif0), SH_PFC_FUNCTION(hscif1), SH_PFC_FUNCTION(hscif2), @@ -4765,8 +5058,28 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { }, }; +static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl) +{ + int bit = -EINVAL; + + *pocctrl = 0xe6060380; + + if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11)) + bit = pin & 0x1f; + + if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17)) + bit = (pin & 0x1f) + 12; + + return bit; +} + +static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = { + .pin_to_pocctrl = r8a7795_pin_to_pocctrl, +}; + const struct sh_pfc_soc_info r8a7795_pinmux_info = { .name = "r8a77950_pfc", + .ops = &r8a7795_pinmux_ops, .unlock_reg = 0xe6060000, /* PMMR */ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c index 0555a1fe076e..6d8c31caefc1 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c @@ -1625,7 +1625,6 @@ static const struct pinmux_func pinmux_func_gpios[] = { GPIO_FN(VBIOS_CS), /* PTW (mobule: LBSC, EVC, SCIF) */ - GPIO_FN(A16), GPIO_FN(A15), GPIO_FN(A14), GPIO_FN(A13), diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index fdb445d68b9a..e208ee04a9f4 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -632,19 +632,21 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin, } case PIN_CONFIG_POWER_SOURCE: { - int ret; + u32 pocctrl, val; + int bit; - if (!pfc->info->ops || !pfc->info->ops->get_io_voltage) + if (!pfc->info->ops || !pfc->info->ops->pin_to_pocctrl) return -ENOTSUPP; + bit = pfc->info->ops->pin_to_pocctrl(pfc, _pin, &pocctrl); + if (WARN(bit < 0, "invalid pin %#x", _pin)) + return bit; + spin_lock_irqsave(&pfc->lock, flags); - ret = pfc->info->ops->get_io_voltage(pfc, _pin); + val = sh_pfc_read_reg(pfc, pocctrl, 32); spin_unlock_irqrestore(&pfc->lock, flags); - if (ret < 0) - return ret; - - *config = ret; + *config = (val & BIT(bit)) ? 3300 : 1800; break; } @@ -696,20 +698,29 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin, } case PIN_CONFIG_POWER_SOURCE: { - unsigned int arg = - pinconf_to_config_argument(configs[i]); - int ret; + unsigned int mV = pinconf_to_config_argument(configs[i]); + u32 pocctrl, val; + int bit; - if (!pfc->info->ops || !pfc->info->ops->set_io_voltage) + if (!pfc->info->ops || !pfc->info->ops->pin_to_pocctrl) return -ENOTSUPP; + bit = pfc->info->ops->pin_to_pocctrl(pfc, _pin, &pocctrl); + if (WARN(bit < 0, "invalid pin %#x", _pin)) + return bit; + + if (mV != 1800 && mV != 3300) + return -EINVAL; + spin_lock_irqsave(&pfc->lock, flags); - ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg); + val = sh_pfc_read_reg(pfc, pocctrl, 32); + if (mV == 3300) + val |= BIT(bit); + else + val &= ~BIT(bit); + sh_pfc_write_reg(pfc, pocctrl, 32, val); spin_unlock_irqrestore(&pfc->lock, flags); - if (ret) - return ret; - break; } @@ -803,8 +814,5 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc) pmx->pctl_desc.npins = pfc->info->nr_pins; pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx); - if (IS_ERR(pmx->pctl)) - return PTR_ERR(pmx->pctl); - - return 0; + return PTR_ERR_OR_ZERO(pmx->pctl); } diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 656ea32f776c..5e966c09434d 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -13,6 +13,7 @@ #include <linux/bug.h> #include <linux/pinctrl/pinconf-generic.h> +#include <linux/spinlock.h> #include <linux/stringify.h> enum { @@ -182,16 +183,38 @@ struct pinmux_range { u16 force; }; -struct sh_pfc; +struct sh_pfc_window { + phys_addr_t phys; + void __iomem *virt; + unsigned long size; +}; + +struct sh_pfc_pin_range; + +struct sh_pfc { + struct device *dev; + const struct sh_pfc_soc_info *info; + spinlock_t lock; + + unsigned int num_windows; + struct sh_pfc_window *windows; + unsigned int num_irqs; + unsigned int *irqs; + + struct sh_pfc_pin_range *ranges; + unsigned int nr_ranges; + + unsigned int nr_gpio_pins; + + struct sh_pfc_chip *gpio; +}; struct sh_pfc_soc_operations { int (*init)(struct sh_pfc *pfc); unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin); void (*set_bias)(struct sh_pfc *pfc, unsigned int pin, unsigned int bias); - int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin); - int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin, - u16 voltage_mV); + int (*pin_to_pocctrl)(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl); }; struct sh_pfc_soc_info { @@ -227,6 +250,30 @@ struct sh_pfc_soc_info { u32 unlock_reg; }; +extern const struct sh_pfc_soc_info emev2_pinmux_info; +extern const struct sh_pfc_soc_info r8a73a4_pinmux_info; +extern const struct sh_pfc_soc_info r8a7740_pinmux_info; +extern const struct sh_pfc_soc_info r8a7778_pinmux_info; +extern const struct sh_pfc_soc_info r8a7779_pinmux_info; +extern const struct sh_pfc_soc_info r8a7790_pinmux_info; +extern const struct sh_pfc_soc_info r8a7791_pinmux_info; +extern const struct sh_pfc_soc_info r8a7793_pinmux_info; +extern const struct sh_pfc_soc_info r8a7794_pinmux_info; +extern const struct sh_pfc_soc_info r8a7795_pinmux_info; +extern const struct sh_pfc_soc_info sh7203_pinmux_info; +extern const struct sh_pfc_soc_info sh7264_pinmux_info; +extern const struct sh_pfc_soc_info sh7269_pinmux_info; +extern const struct sh_pfc_soc_info sh73a0_pinmux_info; +extern const struct sh_pfc_soc_info sh7720_pinmux_info; +extern const struct sh_pfc_soc_info sh7722_pinmux_info; +extern const struct sh_pfc_soc_info sh7723_pinmux_info; +extern const struct sh_pfc_soc_info sh7724_pinmux_info; +extern const struct sh_pfc_soc_info sh7734_pinmux_info; +extern const struct sh_pfc_soc_info sh7757_pinmux_info; +extern const struct sh_pfc_soc_info sh7785_pinmux_info; +extern const struct sh_pfc_soc_info sh7786_pinmux_info; +extern const struct sh_pfc_soc_info shx3_pinmux_info; + /* ----------------------------------------------------------------------------- * Helper macros to create pin and port lists */ diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 168c0f5d4079..19952f73fa8c 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -5424,8 +5424,10 @@ static int atlas7_pinmux_probe(struct platform_device *pdev) if (ret) return ret; pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res); - if (IS_ERR(pmx->sys2pci_base)) + if (IS_ERR(pmx->sys2pci_base)) { + of_node_put(sys2pci_np); return -ENOMEM; + } pmx->dev = &pdev->dev; diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig index 0f28841b2332..4c40dae384d1 100644 --- a/drivers/pinctrl/stm32/Kconfig +++ b/drivers/pinctrl/stm32/Kconfig @@ -13,4 +13,10 @@ config PINCTRL_STM32F429 default MACH_STM32F429 select PINCTRL_STM32 +config PINCTRL_STM32F746 + bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746 + depends on OF + default MACH_STM32F746 + select PINCTRL_STM32 + endif diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile index fc17d4238845..4a1ee748441f 100644 --- a/drivers/pinctrl/stm32/Makefile +++ b/drivers/pinctrl/stm32/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o # SoC Drivers obj-$(CONFIG_PINCTRL_STM32F429) += pinctrl-stm32f429.o +obj-$(CONFIG_PINCTRL_STM32F746) += pinctrl-stm32f746.o diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index ae9fab82a1b9..4ae596bf19b5 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -638,8 +638,8 @@ static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank, return (val >> (offset * 2)); } -static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank, - unsigned int offset) +static bool stm32_pconf_get(struct stm32_gpio_bank *bank, + unsigned int offset, bool dir) { unsigned long flags; u32 val; @@ -647,23 +647,12 @@ static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank, clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); - val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); - - spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); - - return val; -} - -static bool stm32_pconf_output_get(struct stm32_gpio_bank *bank, - unsigned int offset) -{ - unsigned long flags; - u32 val; - - clk_enable(bank->clk); - spin_lock_irqsave(&bank->lock, flags); - val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) & BIT(offset)); + if (dir) + val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & + BIT(offset)); + else + val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) & + BIT(offset)); spin_unlock_irqrestore(&bank->lock, flags); clk_disable(bank->clk); @@ -772,7 +761,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, switch (mode) { /* input */ case 0: - val = stm32_pconf_input_get(bank, offset); + val = stm32_pconf_get(bank, offset, true); seq_printf(s, "- %s - %s", val ? "high" : "low", biasing[bias]); @@ -782,7 +771,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, case 1: drive = stm32_pconf_get_driving(bank, offset); speed = stm32_pconf_get_speed(bank, offset); - val = stm32_pconf_output_get(bank, offset); + val = stm32_pconf_get(bank, offset, false); seq_printf(s, "- %s - %s - %s - %s %s", val ? "high" : "low", drive ? "open drain" : "push pull", diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f746.c b/drivers/pinctrl/stm32/pinctrl-stm32f746.c new file mode 100644 index 000000000000..c0b4462ce97e --- /dev/null +++ b/drivers/pinctrl/stm32/pinctrl-stm32f746.c @@ -0,0 +1,1681 @@ +/* + * Copyright (C) Maxime Coquelin 2015 + * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> + * License terms: GNU General Public License (GPL), version 2 + */ +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "pinctrl-stm32.h" + +static const struct stm32_desc_pin stm32f746_pins[] = { + STM32_PIN( + PINCTRL_PIN(0, "PA0"), + STM32_FUNCTION(0, "GPIOA0"), + STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"), + STM32_FUNCTION(3, "TIM5_CH1"), + STM32_FUNCTION(4, "TIM8_ETR"), + STM32_FUNCTION(8, "USART2_CTS"), + STM32_FUNCTION(9, "UART4_TX"), + STM32_FUNCTION(11, "SAI2_SD_B"), + STM32_FUNCTION(12, "ETH_MII_CRS"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(1, "PA1"), + STM32_FUNCTION(0, "GPIOA1"), + STM32_FUNCTION(2, "TIM2_CH2"), + STM32_FUNCTION(3, "TIM5_CH2"), + STM32_FUNCTION(8, "USART2_RTS"), + STM32_FUNCTION(9, "UART4_RX"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO3"), + STM32_FUNCTION(11, "SAI2_MCLK_B"), + STM32_FUNCTION(12, "ETH_MII_RX_CLK ETH_RMII_REF_CLK"), + STM32_FUNCTION(15, "LCD_R2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(2, "PA2"), + STM32_FUNCTION(0, "GPIOA2"), + STM32_FUNCTION(2, "TIM2_CH3"), + STM32_FUNCTION(3, "TIM5_CH3"), + STM32_FUNCTION(4, "TIM9_CH1"), + STM32_FUNCTION(8, "USART2_TX"), + STM32_FUNCTION(9, "SAI2_SCK_B"), + STM32_FUNCTION(12, "ETH_MDIO"), + STM32_FUNCTION(15, "LCD_R1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(3, "PA3"), + STM32_FUNCTION(0, "GPIOA3"), + STM32_FUNCTION(2, "TIM2_CH4"), + STM32_FUNCTION(3, "TIM5_CH4"), + STM32_FUNCTION(4, "TIM9_CH2"), + STM32_FUNCTION(8, "USART2_RX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D0"), + STM32_FUNCTION(12, "ETH_MII_COL"), + STM32_FUNCTION(15, "LCD_B5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(4, "PA4"), + STM32_FUNCTION(0, "GPIOA4"), + STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"), + STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"), + STM32_FUNCTION(8, "USART2_CK"), + STM32_FUNCTION(13, "OTG_HS_SOF"), + STM32_FUNCTION(14, "DCMI_HSYNC"), + STM32_FUNCTION(15, "LCD_VSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(5, "PA5"), + STM32_FUNCTION(0, "GPIOA5"), + STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"), + STM32_FUNCTION(4, "TIM8_CH1N"), + STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"), + STM32_FUNCTION(11, "OTG_HS_ULPI_CK"), + STM32_FUNCTION(15, "LCD_R4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(6, "PA6"), + STM32_FUNCTION(0, "GPIOA6"), + STM32_FUNCTION(2, "TIM1_BKIN"), + STM32_FUNCTION(3, "TIM3_CH1"), + STM32_FUNCTION(4, "TIM8_BKIN"), + STM32_FUNCTION(6, "SPI1_MISO"), + STM32_FUNCTION(10, "TIM13_CH1"), + STM32_FUNCTION(14, "DCMI_PIXCLK"), + STM32_FUNCTION(15, "LCD_G2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(7, "PA7"), + STM32_FUNCTION(0, "GPIOA7"), + STM32_FUNCTION(2, "TIM1_CH1N"), + STM32_FUNCTION(3, "TIM3_CH2"), + STM32_FUNCTION(4, "TIM8_CH1N"), + STM32_FUNCTION(6, "SPI1_MOSI I2S1_SD"), + STM32_FUNCTION(10, "TIM14_CH1"), + STM32_FUNCTION(12, "ETH_MII_RX_DV ETH_RMII_CRS_DV"), + STM32_FUNCTION(13, "FMC_SDNWE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(8, "PA8"), + STM32_FUNCTION(0, "GPIOA8"), + STM32_FUNCTION(1, "MCO1"), + STM32_FUNCTION(2, "TIM1_CH1"), + STM32_FUNCTION(4, "TIM8_BKIN2"), + STM32_FUNCTION(5, "I2C3_SCL"), + STM32_FUNCTION(8, "USART1_CK"), + STM32_FUNCTION(11, "OTG_FS_SOF"), + STM32_FUNCTION(15, "LCD_R6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(9, "PA9"), + STM32_FUNCTION(0, "GPIOA9"), + STM32_FUNCTION(2, "TIM1_CH2"), + STM32_FUNCTION(5, "I2C3_SMBA"), + STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"), + STM32_FUNCTION(8, "USART1_TX"), + STM32_FUNCTION(14, "DCMI_D0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(10, "PA10"), + STM32_FUNCTION(0, "GPIOA10"), + STM32_FUNCTION(2, "TIM1_CH3"), + STM32_FUNCTION(8, "USART1_RX"), + STM32_FUNCTION(11, "OTG_FS_ID"), + STM32_FUNCTION(14, "DCMI_D1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(11, "PA11"), + STM32_FUNCTION(0, "GPIOA11"), + STM32_FUNCTION(2, "TIM1_CH4"), + STM32_FUNCTION(8, "USART1_CTS"), + STM32_FUNCTION(10, "CAN1_RX"), + STM32_FUNCTION(11, "OTG_FS_DM"), + STM32_FUNCTION(15, "LCD_R4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(12, "PA12"), + STM32_FUNCTION(0, "GPIOA12"), + STM32_FUNCTION(2, "TIM1_ETR"), + STM32_FUNCTION(8, "USART1_RTS"), + STM32_FUNCTION(9, "SAI2_FS_B"), + STM32_FUNCTION(10, "CAN1_TX"), + STM32_FUNCTION(11, "OTG_FS_DP"), + STM32_FUNCTION(15, "LCD_R5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(13, "PA13"), + STM32_FUNCTION(0, "GPIOA13"), + STM32_FUNCTION(1, "JTMS SWDIO"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(14, "PA14"), + STM32_FUNCTION(0, "GPIOA14"), + STM32_FUNCTION(1, "JTCK SWCLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(15, "PA15"), + STM32_FUNCTION(0, "GPIOA15"), + STM32_FUNCTION(1, "JTDI"), + STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"), + STM32_FUNCTION(5, "HDMI_CEC"), + STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"), + STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"), + STM32_FUNCTION(9, "UART4_RTS"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(16, "PB0"), + STM32_FUNCTION(0, "GPIOB0"), + STM32_FUNCTION(2, "TIM1_CH2N"), + STM32_FUNCTION(3, "TIM3_CH3"), + STM32_FUNCTION(4, "TIM8_CH2N"), + STM32_FUNCTION(9, "UART4_CTS"), + STM32_FUNCTION(10, "LCD_R3"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D1"), + STM32_FUNCTION(12, "ETH_MII_RXD2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(17, "PB1"), + STM32_FUNCTION(0, "GPIOB1"), + STM32_FUNCTION(2, "TIM1_CH3N"), + STM32_FUNCTION(3, "TIM3_CH4"), + STM32_FUNCTION(4, "TIM8_CH3N"), + STM32_FUNCTION(10, "LCD_R6"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D2"), + STM32_FUNCTION(12, "ETH_MII_RXD3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(18, "PB2"), + STM32_FUNCTION(0, "GPIOB2"), + STM32_FUNCTION(7, "SAI1_SD_A"), + STM32_FUNCTION(8, "SPI3_MOSI I2S3_SD"), + STM32_FUNCTION(10, "QUADSPI_CLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(19, "PB3"), + STM32_FUNCTION(0, "GPIOB3"), + STM32_FUNCTION(1, "JTDO TRACESWO"), + STM32_FUNCTION(2, "TIM2_CH2"), + STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"), + STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(20, "PB4"), + STM32_FUNCTION(0, "GPIOB4"), + STM32_FUNCTION(1, "NJTRST"), + STM32_FUNCTION(3, "TIM3_CH1"), + STM32_FUNCTION(6, "SPI1_MISO"), + STM32_FUNCTION(7, "SPI3_MISO"), + STM32_FUNCTION(8, "SPI2_NSS I2S2_WS"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(21, "PB5"), + STM32_FUNCTION(0, "GPIOB5"), + STM32_FUNCTION(3, "TIM3_CH2"), + STM32_FUNCTION(5, "I2C1_SMBA"), + STM32_FUNCTION(6, "SPI1_MOSI I2S1_SD"), + STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"), + STM32_FUNCTION(10, "CAN2_RX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D7"), + STM32_FUNCTION(12, "ETH_PPS_OUT"), + STM32_FUNCTION(13, "FMC_SDCKE1"), + STM32_FUNCTION(14, "DCMI_D10"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(22, "PB6"), + STM32_FUNCTION(0, "GPIOB6"), + STM32_FUNCTION(3, "TIM4_CH1"), + STM32_FUNCTION(4, "HDMI_CEC"), + STM32_FUNCTION(5, "I2C1_SCL"), + STM32_FUNCTION(8, "USART1_TX"), + STM32_FUNCTION(10, "CAN2_TX"), + STM32_FUNCTION(11, "QUADSPI_BK1_NCS"), + STM32_FUNCTION(13, "FMC_SDNE1"), + STM32_FUNCTION(14, "DCMI_D5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(23, "PB7"), + STM32_FUNCTION(0, "GPIOB7"), + STM32_FUNCTION(3, "TIM4_CH2"), + STM32_FUNCTION(5, "I2C1_SDA"), + STM32_FUNCTION(8, "USART1_RX"), + STM32_FUNCTION(13, "FMC_NL"), + STM32_FUNCTION(14, "DCMI_VSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(24, "PB8"), + STM32_FUNCTION(0, "GPIOB8"), + STM32_FUNCTION(3, "TIM4_CH3"), + STM32_FUNCTION(4, "TIM10_CH1"), + STM32_FUNCTION(5, "I2C1_SCL"), + STM32_FUNCTION(10, "CAN1_RX"), + STM32_FUNCTION(12, "ETH_MII_TXD3"), + STM32_FUNCTION(13, "SDMMC1_D4"), + STM32_FUNCTION(14, "DCMI_D6"), + STM32_FUNCTION(15, "LCD_B6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(25, "PB9"), + STM32_FUNCTION(0, "GPIOB9"), + STM32_FUNCTION(3, "TIM4_CH4"), + STM32_FUNCTION(4, "TIM11_CH1"), + STM32_FUNCTION(5, "I2C1_SDA"), + STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"), + STM32_FUNCTION(10, "CAN1_TX"), + STM32_FUNCTION(13, "SDMMC1_D5"), + STM32_FUNCTION(14, "DCMI_D7"), + STM32_FUNCTION(15, "LCD_B7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(26, "PB10"), + STM32_FUNCTION(0, "GPIOB10"), + STM32_FUNCTION(2, "TIM2_CH3"), + STM32_FUNCTION(5, "I2C2_SCL"), + STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"), + STM32_FUNCTION(8, "USART3_TX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D3"), + STM32_FUNCTION(12, "ETH_MII_RX_ER"), + STM32_FUNCTION(15, "LCD_G4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(27, "PB11"), + STM32_FUNCTION(0, "GPIOB11"), + STM32_FUNCTION(2, "TIM2_CH4"), + STM32_FUNCTION(5, "I2C2_SDA"), + STM32_FUNCTION(8, "USART3_RX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D4"), + STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"), + STM32_FUNCTION(15, "LCD_G5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(28, "PB12"), + STM32_FUNCTION(0, "GPIOB12"), + STM32_FUNCTION(2, "TIM1_BKIN"), + STM32_FUNCTION(5, "I2C2_SMBA"), + STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"), + STM32_FUNCTION(8, "USART3_CK"), + STM32_FUNCTION(10, "CAN2_RX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D5"), + STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"), + STM32_FUNCTION(13, "OTG_HS_ID"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(29, "PB13"), + STM32_FUNCTION(0, "GPIOB13"), + STM32_FUNCTION(2, "TIM1_CH1N"), + STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"), + STM32_FUNCTION(8, "USART3_CTS"), + STM32_FUNCTION(10, "CAN2_TX"), + STM32_FUNCTION(11, "OTG_HS_ULPI_D6"), + STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(30, "PB14"), + STM32_FUNCTION(0, "GPIOB14"), + STM32_FUNCTION(2, "TIM1_CH2N"), + STM32_FUNCTION(4, "TIM8_CH2N"), + STM32_FUNCTION(6, "SPI2_MISO"), + STM32_FUNCTION(8, "USART3_RTS"), + STM32_FUNCTION(10, "TIM12_CH1"), + STM32_FUNCTION(13, "OTG_HS_DM"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(31, "PB15"), + STM32_FUNCTION(0, "GPIOB15"), + STM32_FUNCTION(1, "RTC_REFIN"), + STM32_FUNCTION(2, "TIM1_CH3N"), + STM32_FUNCTION(4, "TIM8_CH3N"), + STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"), + STM32_FUNCTION(10, "TIM12_CH2"), + STM32_FUNCTION(13, "OTG_HS_DP"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(32, "PC0"), + STM32_FUNCTION(0, "GPIOC0"), + STM32_FUNCTION(9, "SAI2_FS_B"), + STM32_FUNCTION(11, "OTG_HS_ULPI_STP"), + STM32_FUNCTION(13, "FMC_SDNWE"), + STM32_FUNCTION(15, "LCD_R5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(33, "PC1"), + STM32_FUNCTION(0, "GPIOC1"), + STM32_FUNCTION(1, "TRACED0"), + STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"), + STM32_FUNCTION(7, "SAI1_SD_A"), + STM32_FUNCTION(12, "ETH_MDC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(34, "PC2"), + STM32_FUNCTION(0, "GPIOC2"), + STM32_FUNCTION(6, "SPI2_MISO"), + STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"), + STM32_FUNCTION(12, "ETH_MII_TXD2"), + STM32_FUNCTION(13, "FMC_SDNE0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(35, "PC3"), + STM32_FUNCTION(0, "GPIOC3"), + STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"), + STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"), + STM32_FUNCTION(12, "ETH_MII_TX_CLK"), + STM32_FUNCTION(13, "FMC_SDCKE0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(36, "PC4"), + STM32_FUNCTION(0, "GPIOC4"), + STM32_FUNCTION(6, "I2S1_MCK"), + STM32_FUNCTION(9, "SPDIFRX_IN2"), + STM32_FUNCTION(12, "ETH_MII_RXD0 ETH_RMII_RXD0"), + STM32_FUNCTION(13, "FMC_SDNE0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(37, "PC5"), + STM32_FUNCTION(0, "GPIOC5"), + STM32_FUNCTION(9, "SPDIFRX_IN3"), + STM32_FUNCTION(12, "ETH_MII_RXD1 ETH_RMII_RXD1"), + STM32_FUNCTION(13, "FMC_SDCKE0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(38, "PC6"), + STM32_FUNCTION(0, "GPIOC6"), + STM32_FUNCTION(3, "TIM3_CH1"), + STM32_FUNCTION(4, "TIM8_CH1"), + STM32_FUNCTION(6, "I2S2_MCK"), + STM32_FUNCTION(9, "USART6_TX"), + STM32_FUNCTION(13, "SDMMC1_D6"), + STM32_FUNCTION(14, "DCMI_D0"), + STM32_FUNCTION(15, "LCD_HSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(39, "PC7"), + STM32_FUNCTION(0, "GPIOC7"), + STM32_FUNCTION(3, "TIM3_CH2"), + STM32_FUNCTION(4, "TIM8_CH2"), + STM32_FUNCTION(7, "I2S3_MCK"), + STM32_FUNCTION(9, "USART6_RX"), + STM32_FUNCTION(13, "SDMMC1_D7"), + STM32_FUNCTION(14, "DCMI_D1"), + STM32_FUNCTION(15, "LCD_G6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(40, "PC8"), + STM32_FUNCTION(0, "GPIOC8"), + STM32_FUNCTION(1, "TRACED1"), + STM32_FUNCTION(3, "TIM3_CH3"), + STM32_FUNCTION(4, "TIM8_CH3"), + STM32_FUNCTION(8, "UART5_RTS"), + STM32_FUNCTION(9, "USART6_CK"), + STM32_FUNCTION(13, "SDMMC1_D0"), + STM32_FUNCTION(14, "DCMI_D2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(41, "PC9"), + STM32_FUNCTION(0, "GPIOC9"), + STM32_FUNCTION(1, "MCO2"), + STM32_FUNCTION(3, "TIM3_CH4"), + STM32_FUNCTION(4, "TIM8_CH4"), + STM32_FUNCTION(5, "I2C3_SDA"), + STM32_FUNCTION(6, "I2S_CKIN"), + STM32_FUNCTION(8, "UART5_CTS"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO0"), + STM32_FUNCTION(13, "SDMMC1_D1"), + STM32_FUNCTION(14, "DCMI_D3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(42, "PC10"), + STM32_FUNCTION(0, "GPIOC10"), + STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"), + STM32_FUNCTION(8, "USART3_TX"), + STM32_FUNCTION(9, "UART4_TX"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO1"), + STM32_FUNCTION(13, "SDMMC1_D2"), + STM32_FUNCTION(14, "DCMI_D8"), + STM32_FUNCTION(15, "LCD_R2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(43, "PC11"), + STM32_FUNCTION(0, "GPIOC11"), + STM32_FUNCTION(7, "SPI3_MISO"), + STM32_FUNCTION(8, "USART3_RX"), + STM32_FUNCTION(9, "UART4_RX"), + STM32_FUNCTION(10, "QUADSPI_BK2_NCS"), + STM32_FUNCTION(13, "SDMMC1_D3"), + STM32_FUNCTION(14, "DCMI_D4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(44, "PC12"), + STM32_FUNCTION(0, "GPIOC12"), + STM32_FUNCTION(1, "TRACED3"), + STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"), + STM32_FUNCTION(8, "USART3_CK"), + STM32_FUNCTION(9, "UART5_TX"), + STM32_FUNCTION(13, "SDMMC1_CK"), + STM32_FUNCTION(14, "DCMI_D9"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(45, "PC13"), + STM32_FUNCTION(0, "GPIOC13"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(46, "PC14"), + STM32_FUNCTION(0, "GPIOC14"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(47, "PC15"), + STM32_FUNCTION(0, "GPIOC15"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(48, "PD0"), + STM32_FUNCTION(0, "GPIOD0"), + STM32_FUNCTION(10, "CAN1_RX"), + STM32_FUNCTION(13, "FMC_D2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(49, "PD1"), + STM32_FUNCTION(0, "GPIOD1"), + STM32_FUNCTION(10, "CAN1_TX"), + STM32_FUNCTION(13, "FMC_D3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(50, "PD2"), + STM32_FUNCTION(0, "GPIOD2"), + STM32_FUNCTION(1, "TRACED2"), + STM32_FUNCTION(3, "TIM3_ETR"), + STM32_FUNCTION(9, "UART5_RX"), + STM32_FUNCTION(13, "SDMMC1_CMD"), + STM32_FUNCTION(14, "DCMI_D11"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(51, "PD3"), + STM32_FUNCTION(0, "GPIOD3"), + STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"), + STM32_FUNCTION(8, "USART2_CTS"), + STM32_FUNCTION(13, "FMC_CLK"), + STM32_FUNCTION(14, "DCMI_D5"), + STM32_FUNCTION(15, "LCD_G7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(52, "PD4"), + STM32_FUNCTION(0, "GPIOD4"), + STM32_FUNCTION(8, "USART2_RTS"), + STM32_FUNCTION(13, "FMC_NOE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(53, "PD5"), + STM32_FUNCTION(0, "GPIOD5"), + STM32_FUNCTION(8, "USART2_TX"), + STM32_FUNCTION(13, "FMC_NWE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(54, "PD6"), + STM32_FUNCTION(0, "GPIOD6"), + STM32_FUNCTION(6, "SPI3_MOSI I2S3_SD"), + STM32_FUNCTION(7, "SAI1_SD_A"), + STM32_FUNCTION(8, "USART2_RX"), + STM32_FUNCTION(13, "FMC_NWAIT"), + STM32_FUNCTION(14, "DCMI_D10"), + STM32_FUNCTION(15, "LCD_B2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(55, "PD7"), + STM32_FUNCTION(0, "GPIOD7"), + STM32_FUNCTION(8, "USART2_CK"), + STM32_FUNCTION(9, "SPDIFRX_IN0"), + STM32_FUNCTION(13, "FMC_NE1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(56, "PD8"), + STM32_FUNCTION(0, "GPIOD8"), + STM32_FUNCTION(8, "USART3_TX"), + STM32_FUNCTION(9, "SPDIFRX_IN1"), + STM32_FUNCTION(13, "FMC_D13"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(57, "PD9"), + STM32_FUNCTION(0, "GPIOD9"), + STM32_FUNCTION(8, "USART3_RX"), + STM32_FUNCTION(13, "FMC_D14"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(58, "PD10"), + STM32_FUNCTION(0, "GPIOD10"), + STM32_FUNCTION(8, "USART3_CK"), + STM32_FUNCTION(13, "FMC_D15"), + STM32_FUNCTION(15, "LCD_B3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(59, "PD11"), + STM32_FUNCTION(0, "GPIOD11"), + STM32_FUNCTION(5, "I2C4_SMBA"), + STM32_FUNCTION(8, "USART3_CTS"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO0"), + STM32_FUNCTION(11, "SAI2_SD_A"), + STM32_FUNCTION(13, "FMC_A16 FMC_CLE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(60, "PD12"), + STM32_FUNCTION(0, "GPIOD12"), + STM32_FUNCTION(3, "TIM4_CH1"), + STM32_FUNCTION(4, "LPTIM1_IN1"), + STM32_FUNCTION(5, "I2C4_SCL"), + STM32_FUNCTION(8, "USART3_RTS"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO1"), + STM32_FUNCTION(11, "SAI2_FS_A"), + STM32_FUNCTION(13, "FMC_A17 FMC_ALE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(61, "PD13"), + STM32_FUNCTION(0, "GPIOD13"), + STM32_FUNCTION(3, "TIM4_CH2"), + STM32_FUNCTION(4, "LPTIM1_OUT"), + STM32_FUNCTION(5, "I2C4_SDA"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO3"), + STM32_FUNCTION(11, "SAI2_SCK_A"), + STM32_FUNCTION(13, "FMC_A18"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(62, "PD14"), + STM32_FUNCTION(0, "GPIOD14"), + STM32_FUNCTION(3, "TIM4_CH3"), + STM32_FUNCTION(9, "UART8_CTS"), + STM32_FUNCTION(13, "FMC_D0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(63, "PD15"), + STM32_FUNCTION(0, "GPIOD15"), + STM32_FUNCTION(3, "TIM4_CH4"), + STM32_FUNCTION(9, "UART8_RTS"), + STM32_FUNCTION(13, "FMC_D1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(64, "PE0"), + STM32_FUNCTION(0, "GPIOE0"), + STM32_FUNCTION(3, "TIM4_ETR"), + STM32_FUNCTION(4, "LPTIM1_ETR"), + STM32_FUNCTION(9, "UART8_RX"), + STM32_FUNCTION(11, "SAI2_MCLK_A"), + STM32_FUNCTION(13, "FMC_NBL0"), + STM32_FUNCTION(14, "DCMI_D2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(65, "PE1"), + STM32_FUNCTION(0, "GPIOE1"), + STM32_FUNCTION(4, "LPTIM1_IN2"), + STM32_FUNCTION(9, "UART8_TX"), + STM32_FUNCTION(13, "FMC_NBL1"), + STM32_FUNCTION(14, "DCMI_D3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(66, "PE2"), + STM32_FUNCTION(0, "GPIOE2"), + STM32_FUNCTION(1, "TRACECLK"), + STM32_FUNCTION(6, "SPI4_SCK"), + STM32_FUNCTION(7, "SAI1_MCLK_A"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO2"), + STM32_FUNCTION(12, "ETH_MII_TXD3"), + STM32_FUNCTION(13, "FMC_A23"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(67, "PE3"), + STM32_FUNCTION(0, "GPIOE3"), + STM32_FUNCTION(1, "TRACED0"), + STM32_FUNCTION(7, "SAI1_SD_B"), + STM32_FUNCTION(13, "FMC_A19"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(68, "PE4"), + STM32_FUNCTION(0, "GPIOE4"), + STM32_FUNCTION(1, "TRACED1"), + STM32_FUNCTION(6, "SPI4_NSS"), + STM32_FUNCTION(7, "SAI1_FS_A"), + STM32_FUNCTION(13, "FMC_A20"), + STM32_FUNCTION(14, "DCMI_D4"), + STM32_FUNCTION(15, "LCD_B0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(69, "PE5"), + STM32_FUNCTION(0, "GPIOE5"), + STM32_FUNCTION(1, "TRACED2"), + STM32_FUNCTION(4, "TIM9_CH1"), + STM32_FUNCTION(6, "SPI4_MISO"), + STM32_FUNCTION(7, "SAI1_SCK_A"), + STM32_FUNCTION(13, "FMC_A21"), + STM32_FUNCTION(14, "DCMI_D6"), + STM32_FUNCTION(15, "LCD_G0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(70, "PE6"), + STM32_FUNCTION(0, "GPIOE6"), + STM32_FUNCTION(1, "TRACED3"), + STM32_FUNCTION(2, "TIM1_BKIN2"), + STM32_FUNCTION(4, "TIM9_CH2"), + STM32_FUNCTION(6, "SPI4_MOSI"), + STM32_FUNCTION(7, "SAI1_SD_A"), + STM32_FUNCTION(11, "SAI2_MCLK_B"), + STM32_FUNCTION(13, "FMC_A22"), + STM32_FUNCTION(14, "DCMI_D7"), + STM32_FUNCTION(15, "LCD_G1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(71, "PE7"), + STM32_FUNCTION(0, "GPIOE7"), + STM32_FUNCTION(2, "TIM1_ETR"), + STM32_FUNCTION(9, "UART7_RX"), + STM32_FUNCTION(11, "QUADSPI_BK2_IO0"), + STM32_FUNCTION(13, "FMC_D4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(72, "PE8"), + STM32_FUNCTION(0, "GPIOE8"), + STM32_FUNCTION(2, "TIM1_CH1N"), + STM32_FUNCTION(9, "UART7_TX"), + STM32_FUNCTION(11, "QUADSPI_BK2_IO1"), + STM32_FUNCTION(13, "FMC_D5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(73, "PE9"), + STM32_FUNCTION(0, "GPIOE9"), + STM32_FUNCTION(2, "TIM1_CH1"), + STM32_FUNCTION(9, "UART7_RTS"), + STM32_FUNCTION(11, "QUADSPI_BK2_IO2"), + STM32_FUNCTION(13, "FMC_D6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(74, "PE10"), + STM32_FUNCTION(0, "GPIOE10"), + STM32_FUNCTION(2, "TIM1_CH2N"), + STM32_FUNCTION(9, "UART7_CTS"), + STM32_FUNCTION(11, "QUADSPI_BK2_IO3"), + STM32_FUNCTION(13, "FMC_D7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(75, "PE11"), + STM32_FUNCTION(0, "GPIOE11"), + STM32_FUNCTION(2, "TIM1_CH2"), + STM32_FUNCTION(6, "SPI4_NSS"), + STM32_FUNCTION(11, "SAI2_SD_B"), + STM32_FUNCTION(13, "FMC_D8"), + STM32_FUNCTION(15, "LCD_G3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(76, "PE12"), + STM32_FUNCTION(0, "GPIOE12"), + STM32_FUNCTION(2, "TIM1_CH3N"), + STM32_FUNCTION(6, "SPI4_SCK"), + STM32_FUNCTION(11, "SAI2_SCK_B"), + STM32_FUNCTION(13, "FMC_D9"), + STM32_FUNCTION(15, "LCD_B4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(77, "PE13"), + STM32_FUNCTION(0, "GPIOE13"), + STM32_FUNCTION(2, "TIM1_CH3"), + STM32_FUNCTION(6, "SPI4_MISO"), + STM32_FUNCTION(11, "SAI2_FS_B"), + STM32_FUNCTION(13, "FMC_D10"), + STM32_FUNCTION(15, "LCD_DE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(78, "PE14"), + STM32_FUNCTION(0, "GPIOE14"), + STM32_FUNCTION(2, "TIM1_CH4"), + STM32_FUNCTION(6, "SPI4_MOSI"), + STM32_FUNCTION(11, "SAI2_MCLK_B"), + STM32_FUNCTION(13, "FMC_D11"), + STM32_FUNCTION(15, "LCD_CLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(79, "PE15"), + STM32_FUNCTION(0, "GPIOE15"), + STM32_FUNCTION(2, "TIM1_BKIN"), + STM32_FUNCTION(13, "FMC_D12"), + STM32_FUNCTION(15, "LCD_R7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(80, "PF0"), + STM32_FUNCTION(0, "GPIOF0"), + STM32_FUNCTION(5, "I2C2_SDA"), + STM32_FUNCTION(13, "FMC_A0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(81, "PF1"), + STM32_FUNCTION(0, "GPIOF1"), + STM32_FUNCTION(5, "I2C2_SCL"), + STM32_FUNCTION(13, "FMC_A1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(82, "PF2"), + STM32_FUNCTION(0, "GPIOF2"), + STM32_FUNCTION(5, "I2C2_SMBA"), + STM32_FUNCTION(13, "FMC_A2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(83, "PF3"), + STM32_FUNCTION(0, "GPIOF3"), + STM32_FUNCTION(13, "FMC_A3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(84, "PF4"), + STM32_FUNCTION(0, "GPIOF4"), + STM32_FUNCTION(13, "FMC_A4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(85, "PF5"), + STM32_FUNCTION(0, "GPIOF5"), + STM32_FUNCTION(13, "FMC_A5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(86, "PF6"), + STM32_FUNCTION(0, "GPIOF6"), + STM32_FUNCTION(4, "TIM10_CH1"), + STM32_FUNCTION(6, "SPI5_NSS"), + STM32_FUNCTION(7, "SAI1_SD_B"), + STM32_FUNCTION(9, "UART7_RX"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(87, "PF7"), + STM32_FUNCTION(0, "GPIOF7"), + STM32_FUNCTION(4, "TIM11_CH1"), + STM32_FUNCTION(6, "SPI5_SCK"), + STM32_FUNCTION(7, "SAI1_MCLK_B"), + STM32_FUNCTION(9, "UART7_TX"), + STM32_FUNCTION(10, "QUADSPI_BK1_IO2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(88, "PF8"), + STM32_FUNCTION(0, "GPIOF8"), + STM32_FUNCTION(6, "SPI5_MISO"), + STM32_FUNCTION(7, "SAI1_SCK_B"), + STM32_FUNCTION(9, "UART7_RTS"), + STM32_FUNCTION(10, "TIM13_CH1"), + STM32_FUNCTION(11, "QUADSPI_BK1_IO0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(89, "PF9"), + STM32_FUNCTION(0, "GPIOF9"), + STM32_FUNCTION(6, "SPI5_MOSI"), + STM32_FUNCTION(7, "SAI1_FS_B"), + STM32_FUNCTION(9, "UART7_CTS"), + STM32_FUNCTION(10, "TIM14_CH1"), + STM32_FUNCTION(11, "QUADSPI_BK1_IO1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(90, "PF10"), + STM32_FUNCTION(0, "GPIOF10"), + STM32_FUNCTION(14, "DCMI_D11"), + STM32_FUNCTION(15, "LCD_DE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(91, "PF11"), + STM32_FUNCTION(0, "GPIOF11"), + STM32_FUNCTION(6, "SPI5_MOSI"), + STM32_FUNCTION(11, "SAI2_SD_B"), + STM32_FUNCTION(13, "FMC_SDNRAS"), + STM32_FUNCTION(14, "DCMI_D12"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(92, "PF12"), + STM32_FUNCTION(0, "GPIOF12"), + STM32_FUNCTION(13, "FMC_A6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(93, "PF13"), + STM32_FUNCTION(0, "GPIOF13"), + STM32_FUNCTION(5, "I2C4_SMBA"), + STM32_FUNCTION(13, "FMC_A7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(94, "PF14"), + STM32_FUNCTION(0, "GPIOF14"), + STM32_FUNCTION(5, "I2C4_SCL"), + STM32_FUNCTION(13, "FMC_A8"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(95, "PF15"), + STM32_FUNCTION(0, "GPIOF15"), + STM32_FUNCTION(5, "I2C4_SDA"), + STM32_FUNCTION(13, "FMC_A9"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(96, "PG0"), + STM32_FUNCTION(0, "GPIOG0"), + STM32_FUNCTION(13, "FMC_A10"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(97, "PG1"), + STM32_FUNCTION(0, "GPIOG1"), + STM32_FUNCTION(13, "FMC_A11"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(98, "PG2"), + STM32_FUNCTION(0, "GPIOG2"), + STM32_FUNCTION(13, "FMC_A12"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(99, "PG3"), + STM32_FUNCTION(0, "GPIOG3"), + STM32_FUNCTION(13, "FMC_A13"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(100, "PG4"), + STM32_FUNCTION(0, "GPIOG4"), + STM32_FUNCTION(13, "FMC_A14 FMC_BA0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(101, "PG5"), + STM32_FUNCTION(0, "GPIOG5"), + STM32_FUNCTION(13, "FMC_A15 FMC_BA1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(102, "PG6"), + STM32_FUNCTION(0, "GPIOG6"), + STM32_FUNCTION(14, "DCMI_D12"), + STM32_FUNCTION(15, "LCD_R7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(103, "PG7"), + STM32_FUNCTION(0, "GPIOG7"), + STM32_FUNCTION(9, "USART6_CK"), + STM32_FUNCTION(13, "FMC_INT"), + STM32_FUNCTION(14, "DCMI_D13"), + STM32_FUNCTION(15, "LCD_CLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(104, "PG8"), + STM32_FUNCTION(0, "GPIOG8"), + STM32_FUNCTION(6, "SPI6_NSS"), + STM32_FUNCTION(8, "SPDIFRX_IN2"), + STM32_FUNCTION(9, "USART6_RTS"), + STM32_FUNCTION(12, "ETH_PPS_OUT"), + STM32_FUNCTION(13, "FMC_SDCLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(105, "PG9"), + STM32_FUNCTION(0, "GPIOG9"), + STM32_FUNCTION(8, "SPDIFRX_IN3"), + STM32_FUNCTION(9, "USART6_RX"), + STM32_FUNCTION(10, "QUADSPI_BK2_IO2"), + STM32_FUNCTION(11, "SAI2_FS_B"), + STM32_FUNCTION(13, "FMC_NE2 FMC_NCE"), + STM32_FUNCTION(14, "DCMI_VSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(106, "PG10"), + STM32_FUNCTION(0, "GPIOG10"), + STM32_FUNCTION(10, "LCD_G3"), + STM32_FUNCTION(11, "SAI2_SD_B"), + STM32_FUNCTION(13, "FMC_NE3"), + STM32_FUNCTION(14, "DCMI_D2"), + STM32_FUNCTION(15, "LCD_B2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(107, "PG11"), + STM32_FUNCTION(0, "GPIOG11"), + STM32_FUNCTION(8, "SPDIFRX_IN0"), + STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"), + STM32_FUNCTION(14, "DCMI_D3"), + STM32_FUNCTION(15, "LCD_B3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(108, "PG12"), + STM32_FUNCTION(0, "GPIOG12"), + STM32_FUNCTION(4, "LPTIM1_IN1"), + STM32_FUNCTION(6, "SPI6_MISO"), + STM32_FUNCTION(8, "SPDIFRX_IN1"), + STM32_FUNCTION(9, "USART6_RTS"), + STM32_FUNCTION(10, "LCD_B4"), + STM32_FUNCTION(13, "FMC_NE4"), + STM32_FUNCTION(15, "LCD_B1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(109, "PG13"), + STM32_FUNCTION(0, "GPIOG13"), + STM32_FUNCTION(1, "TRACED0"), + STM32_FUNCTION(4, "LPTIM1_OUT"), + STM32_FUNCTION(6, "SPI6_SCK"), + STM32_FUNCTION(9, "USART6_CTS"), + STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"), + STM32_FUNCTION(13, "FMC_A24"), + STM32_FUNCTION(15, "LCD_R0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(110, "PG14"), + STM32_FUNCTION(0, "GPIOG14"), + STM32_FUNCTION(1, "TRACED1"), + STM32_FUNCTION(4, "LPTIM1_ETR"), + STM32_FUNCTION(6, "SPI6_MOSI"), + STM32_FUNCTION(9, "USART6_TX"), + STM32_FUNCTION(10, "QUADSPI_BK2_IO3"), + STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"), + STM32_FUNCTION(13, "FMC_A25"), + STM32_FUNCTION(15, "LCD_B0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(111, "PG15"), + STM32_FUNCTION(0, "GPIOG15"), + STM32_FUNCTION(9, "USART6_CTS"), + STM32_FUNCTION(13, "FMC_SDNCAS"), + STM32_FUNCTION(14, "DCMI_D13"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(112, "PH0"), + STM32_FUNCTION(0, "GPIOH0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(113, "PH1"), + STM32_FUNCTION(0, "GPIOH1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(114, "PH2"), + STM32_FUNCTION(0, "GPIOH2"), + STM32_FUNCTION(4, "LPTIM1_IN2"), + STM32_FUNCTION(10, "QUADSPI_BK2_IO0"), + STM32_FUNCTION(11, "SAI2_SCK_B"), + STM32_FUNCTION(12, "ETH_MII_CRS"), + STM32_FUNCTION(13, "FMC_SDCKE0"), + STM32_FUNCTION(15, "LCD_R0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(115, "PH3"), + STM32_FUNCTION(0, "GPIOH3"), + STM32_FUNCTION(10, "QUADSPI_BK2_IO1"), + STM32_FUNCTION(11, "SAI2_MCLK_B"), + STM32_FUNCTION(12, "ETH_MII_COL"), + STM32_FUNCTION(13, "FMC_SDNE0"), + STM32_FUNCTION(15, "LCD_R1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(116, "PH4"), + STM32_FUNCTION(0, "GPIOH4"), + STM32_FUNCTION(5, "I2C2_SCL"), + STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(117, "PH5"), + STM32_FUNCTION(0, "GPIOH5"), + STM32_FUNCTION(5, "I2C2_SDA"), + STM32_FUNCTION(6, "SPI5_NSS"), + STM32_FUNCTION(13, "FMC_SDNWE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(118, "PH6"), + STM32_FUNCTION(0, "GPIOH6"), + STM32_FUNCTION(5, "I2C2_SMBA"), + STM32_FUNCTION(6, "SPI5_SCK"), + STM32_FUNCTION(10, "TIM12_CH1"), + STM32_FUNCTION(12, "ETH_MII_RXD2"), + STM32_FUNCTION(13, "FMC_SDNE1"), + STM32_FUNCTION(14, "DCMI_D8"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(119, "PH7"), + STM32_FUNCTION(0, "GPIOH7"), + STM32_FUNCTION(5, "I2C3_SCL"), + STM32_FUNCTION(6, "SPI5_MISO"), + STM32_FUNCTION(12, "ETH_MII_RXD3"), + STM32_FUNCTION(13, "FMC_SDCKE1"), + STM32_FUNCTION(14, "DCMI_D9"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(120, "PH8"), + STM32_FUNCTION(0, "GPIOH8"), + STM32_FUNCTION(5, "I2C3_SDA"), + STM32_FUNCTION(13, "FMC_D16"), + STM32_FUNCTION(14, "DCMI_HSYNC"), + STM32_FUNCTION(15, "LCD_R2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(121, "PH9"), + STM32_FUNCTION(0, "GPIOH9"), + STM32_FUNCTION(5, "I2C3_SMBA"), + STM32_FUNCTION(10, "TIM12_CH2"), + STM32_FUNCTION(13, "FMC_D17"), + STM32_FUNCTION(14, "DCMI_D0"), + STM32_FUNCTION(15, "LCD_R3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(122, "PH10"), + STM32_FUNCTION(0, "GPIOH10"), + STM32_FUNCTION(3, "TIM5_CH1"), + STM32_FUNCTION(5, "I2C4_SMBA"), + STM32_FUNCTION(13, "FMC_D18"), + STM32_FUNCTION(14, "DCMI_D1"), + STM32_FUNCTION(15, "LCD_R4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(123, "PH11"), + STM32_FUNCTION(0, "GPIOH11"), + STM32_FUNCTION(3, "TIM5_CH2"), + STM32_FUNCTION(5, "I2C4_SCL"), + STM32_FUNCTION(13, "FMC_D19"), + STM32_FUNCTION(14, "DCMI_D2"), + STM32_FUNCTION(15, "LCD_R5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(124, "PH12"), + STM32_FUNCTION(0, "GPIOH12"), + STM32_FUNCTION(3, "TIM5_CH3"), + STM32_FUNCTION(5, "I2C4_SDA"), + STM32_FUNCTION(13, "FMC_D20"), + STM32_FUNCTION(14, "DCMI_D3"), + STM32_FUNCTION(15, "LCD_R6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(125, "PH13"), + STM32_FUNCTION(0, "GPIOH13"), + STM32_FUNCTION(4, "TIM8_CH1N"), + STM32_FUNCTION(10, "CAN1_TX"), + STM32_FUNCTION(13, "FMC_D21"), + STM32_FUNCTION(15, "LCD_G2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(126, "PH14"), + STM32_FUNCTION(0, "GPIOH14"), + STM32_FUNCTION(4, "TIM8_CH2N"), + STM32_FUNCTION(13, "FMC_D22"), + STM32_FUNCTION(14, "DCMI_D4"), + STM32_FUNCTION(15, "LCD_G3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(127, "PH15"), + STM32_FUNCTION(0, "GPIOH15"), + STM32_FUNCTION(4, "TIM8_CH3N"), + STM32_FUNCTION(13, "FMC_D23"), + STM32_FUNCTION(14, "DCMI_D11"), + STM32_FUNCTION(15, "LCD_G4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(128, "PI0"), + STM32_FUNCTION(0, "GPIOI0"), + STM32_FUNCTION(3, "TIM5_CH4"), + STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"), + STM32_FUNCTION(13, "FMC_D24"), + STM32_FUNCTION(14, "DCMI_D13"), + STM32_FUNCTION(15, "LCD_G5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(129, "PI1"), + STM32_FUNCTION(0, "GPIOI1"), + STM32_FUNCTION(4, "TIM8_BKIN2"), + STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"), + STM32_FUNCTION(13, "FMC_D25"), + STM32_FUNCTION(14, "DCMI_D8"), + STM32_FUNCTION(15, "LCD_G6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(130, "PI2"), + STM32_FUNCTION(0, "GPIOI2"), + STM32_FUNCTION(4, "TIM8_CH4"), + STM32_FUNCTION(6, "SPI2_MISO"), + STM32_FUNCTION(13, "FMC_D26"), + STM32_FUNCTION(14, "DCMI_D9"), + STM32_FUNCTION(15, "LCD_G7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(131, "PI3"), + STM32_FUNCTION(0, "GPIOI3"), + STM32_FUNCTION(4, "TIM8_ETR"), + STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"), + STM32_FUNCTION(13, "FMC_D27"), + STM32_FUNCTION(14, "DCMI_D10"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(132, "PI4"), + STM32_FUNCTION(0, "GPIOI4"), + STM32_FUNCTION(4, "TIM8_BKIN"), + STM32_FUNCTION(11, "SAI2_MCLK_A"), + STM32_FUNCTION(13, "FMC_NBL2"), + STM32_FUNCTION(14, "DCMI_D5"), + STM32_FUNCTION(15, "LCD_B4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(133, "PI5"), + STM32_FUNCTION(0, "GPIOI5"), + STM32_FUNCTION(4, "TIM8_CH1"), + STM32_FUNCTION(11, "SAI2_SCK_A"), + STM32_FUNCTION(13, "FMC_NBL3"), + STM32_FUNCTION(14, "DCMI_VSYNC"), + STM32_FUNCTION(15, "LCD_B5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(134, "PI6"), + STM32_FUNCTION(0, "GPIOI6"), + STM32_FUNCTION(4, "TIM8_CH2"), + STM32_FUNCTION(11, "SAI2_SD_A"), + STM32_FUNCTION(13, "FMC_D28"), + STM32_FUNCTION(14, "DCMI_D6"), + STM32_FUNCTION(15, "LCD_B6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(135, "PI7"), + STM32_FUNCTION(0, "GPIOI7"), + STM32_FUNCTION(4, "TIM8_CH3"), + STM32_FUNCTION(11, "SAI2_FS_A"), + STM32_FUNCTION(13, "FMC_D29"), + STM32_FUNCTION(14, "DCMI_D7"), + STM32_FUNCTION(15, "LCD_B7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(136, "PI8"), + STM32_FUNCTION(0, "GPIOI8"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(137, "PI9"), + STM32_FUNCTION(0, "GPIOI9"), + STM32_FUNCTION(10, "CAN1_RX"), + STM32_FUNCTION(13, "FMC_D30"), + STM32_FUNCTION(15, "LCD_VSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(138, "PI10"), + STM32_FUNCTION(0, "GPIOI10"), + STM32_FUNCTION(12, "ETH_MII_RX_ER"), + STM32_FUNCTION(13, "FMC_D31"), + STM32_FUNCTION(15, "LCD_HSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(139, "PI11"), + STM32_FUNCTION(0, "GPIOI11"), + STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(140, "PI12"), + STM32_FUNCTION(0, "GPIOI12"), + STM32_FUNCTION(15, "LCD_HSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(141, "PI13"), + STM32_FUNCTION(0, "GPIOI13"), + STM32_FUNCTION(15, "LCD_VSYNC"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(142, "PI14"), + STM32_FUNCTION(0, "GPIOI14"), + STM32_FUNCTION(15, "LCD_CLK"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(143, "PI15"), + STM32_FUNCTION(0, "GPIOI15"), + STM32_FUNCTION(15, "LCD_R0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(144, "PJ0"), + STM32_FUNCTION(0, "GPIOJ0"), + STM32_FUNCTION(15, "LCD_R1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(145, "PJ1"), + STM32_FUNCTION(0, "GPIOJ1"), + STM32_FUNCTION(15, "LCD_R2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(146, "PJ2"), + STM32_FUNCTION(0, "GPIOJ2"), + STM32_FUNCTION(15, "LCD_R3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(147, "PJ3"), + STM32_FUNCTION(0, "GPIOJ3"), + STM32_FUNCTION(15, "LCD_R4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(148, "PJ4"), + STM32_FUNCTION(0, "GPIOJ4"), + STM32_FUNCTION(15, "LCD_R5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(149, "PJ5"), + STM32_FUNCTION(0, "GPIOJ5"), + STM32_FUNCTION(15, "LCD_R6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(150, "PJ6"), + STM32_FUNCTION(0, "GPIOJ6"), + STM32_FUNCTION(15, "LCD_R7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(151, "PJ7"), + STM32_FUNCTION(0, "GPIOJ7"), + STM32_FUNCTION(15, "LCD_G0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(152, "PJ8"), + STM32_FUNCTION(0, "GPIOJ8"), + STM32_FUNCTION(15, "LCD_G1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(153, "PJ9"), + STM32_FUNCTION(0, "GPIOJ9"), + STM32_FUNCTION(15, "LCD_G2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(154, "PJ10"), + STM32_FUNCTION(0, "GPIOJ10"), + STM32_FUNCTION(15, "LCD_G3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(155, "PJ11"), + STM32_FUNCTION(0, "GPIOJ11"), + STM32_FUNCTION(15, "LCD_G4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(156, "PJ12"), + STM32_FUNCTION(0, "GPIOJ12"), + STM32_FUNCTION(15, "LCD_B0"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(157, "PJ13"), + STM32_FUNCTION(0, "GPIOJ13"), + STM32_FUNCTION(15, "LCD_B1"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(158, "PJ14"), + STM32_FUNCTION(0, "GPIOJ14"), + STM32_FUNCTION(15, "LCD_B2"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(159, "PJ15"), + STM32_FUNCTION(0, "GPIOJ15"), + STM32_FUNCTION(15, "LCD_B3"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(160, "PK0"), + STM32_FUNCTION(0, "GPIOK0"), + STM32_FUNCTION(15, "LCD_G5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(161, "PK1"), + STM32_FUNCTION(0, "GPIOK1"), + STM32_FUNCTION(15, "LCD_G6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(162, "PK2"), + STM32_FUNCTION(0, "GPIOK2"), + STM32_FUNCTION(15, "LCD_G7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(163, "PK3"), + STM32_FUNCTION(0, "GPIOK3"), + STM32_FUNCTION(15, "LCD_B4"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(164, "PK4"), + STM32_FUNCTION(0, "GPIOK4"), + STM32_FUNCTION(15, "LCD_B5"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(165, "PK5"), + STM32_FUNCTION(0, "GPIOK5"), + STM32_FUNCTION(15, "LCD_B6"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(166, "PK6"), + STM32_FUNCTION(0, "GPIOK6"), + STM32_FUNCTION(15, "LCD_B7"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), + STM32_PIN( + PINCTRL_PIN(167, "PK7"), + STM32_FUNCTION(0, "GPIOK7"), + STM32_FUNCTION(15, "LCD_DE"), + STM32_FUNCTION(16, "EVENTOUT"), + STM32_FUNCTION(17, "ANALOG") + ), +}; + +static struct stm32_pinctrl_match_data stm32f746_match_data = { + .pins = stm32f746_pins, + .npins = ARRAY_SIZE(stm32f746_pins), +}; + +static const struct of_device_id stm32f746_pctrl_match[] = { + { + .compatible = "st,stm32f746-pinctrl", + .data = &stm32f746_match_data, + }, + { } +}; + +static struct platform_driver stm32f746_pinctrl_driver = { + .probe = stm32_pctl_probe, + .driver = { + .name = "stm32f746-pinctrl", + .of_match_table = stm32f746_pctrl_match, + }, +}; +builtin_platform_driver(stm32f746_pinctrl_driver); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c index 55083d278bb1..ce483b03a263 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c @@ -180,17 +180,17 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQS */ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c index 8b381d69df86..3040abe6f73a 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c @@ -140,17 +140,17 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQS */ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c index 11760bbe9d51..26a2ad3b651f 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c @@ -219,17 +219,17 @@ static const struct sunxi_desc_pin sun8i_h3_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQS */ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0), diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 6e82b290cb4f..277622b4b6fb 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -632,11 +632,11 @@ static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx) u32 val; for (i = 0; i < pmx->soc->ngroups; ++i) { - if (pmx->soc->groups[i].parked_reg >= 0) { - g = &pmx->soc->groups[i]; - val = pmx_readl(pmx, g->parked_bank, g->parked_reg); + g = &pmx->soc->groups[i]; + if (g->parked_bit >= 0) { + val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(1 << g->parked_bit); - pmx_writel(pmx, val, g->parked_bank, g->parked_reg); + pmx_writel(pmx, val, g->mux_bank, g->mux_reg); } } } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h index d2ced17382b5..33b17cb1471e 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.h +++ b/drivers/pinctrl/tegra/pinctrl-tegra.h @@ -93,9 +93,7 @@ struct tegra_function { * @tri_reg: Tri-state register offset. * @tri_bank: Tri-state register bank. * @tri_bit: Tri-state register bit. - * @parked_reg: Parked register offset. -1 if unsupported. - * @parked_bank: Parked register bank. 0 if unsupported. - * @parked_bit: Parked register bit. 0 if unsupported. + * @parked_bit: Parked register bit. -1 if unsupported. * @einput_bit: Enable-input register bit. * @odrain_bit: Open-drain register bit. * @lock_bit: Lock register bit. @@ -138,12 +136,10 @@ struct tegra_pingroup { s16 pupd_reg; s16 tri_reg; s16 drv_reg; - s16 parked_reg; u32 mux_bank:2; u32 pupd_bank:2; u32 tri_bank:2; u32 drv_bank:2; - u32 parked_bank:2; s32 mux_bit:6; s32 pupd_bit:6; s32 tri_bit:6; diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c index 4851d169f4c7..952132ce5ea0 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra114.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c @@ -1578,7 +1578,7 @@ static struct tegra_function tegra114_functions[] = { .lock_bit = 7, \ .ioreset_bit = PINGROUP_BIT_##ior(8), \ .rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \ - .parked_reg = -1, \ + .parked_bit = -1, \ .drv_reg = -1, \ } @@ -1599,7 +1599,7 @@ static struct tegra_function tegra114_functions[] = { .rcv_sel_bit = -1, \ .drv_reg = DRV_PINGROUP_REG(r), \ .drv_bank = 0, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .hsm_bit = hsm_b, \ .schmitt_bit = schmitt_b, \ .lpmd_bit = lpmd_b, \ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c index a0ce723a9482..bca239e3ae50 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra124.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c @@ -1747,7 +1747,7 @@ static struct tegra_function tegra124_functions[] = { .lock_bit = 7, \ .ioreset_bit = PINGROUP_BIT_##ior(8), \ .rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \ - .parked_reg = -1, \ + .parked_bit = -1, \ .drv_reg = -1, \ } @@ -1768,7 +1768,7 @@ static struct tegra_function tegra124_functions[] = { .rcv_sel_bit = -1, \ .drv_reg = DRV_PINGROUP_REG(r), \ .drv_bank = 0, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .hsm_bit = hsm_b, \ .schmitt_bit = schmitt_b, \ .lpmd_bit = lpmd_b, \ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c index 09bad6980ad1..ad62451a5a9b 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra20.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c @@ -1994,7 +1994,7 @@ static struct tegra_function tegra20_functions[] = { .tri_reg = ((tri_r) - TRISTATE_REG_A), \ .tri_bank = 0, \ .tri_bit = tri_b, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .einput_bit = -1, \ .odrain_bit = -1, \ .lock_bit = -1, \ @@ -2014,7 +2014,7 @@ static struct tegra_function tegra20_functions[] = { .pupd_bank = 2, \ .pupd_bit = pupd_b, \ .drv_reg = -1, \ - .parked_reg = -1, \ + .parked_bit = -1, \ } /* Pin groups for drive strength registers (configurable version) */ @@ -2030,7 +2030,7 @@ static struct tegra_function tegra20_functions[] = { .tri_reg = -1, \ .drv_reg = ((r) - PINGROUP_REG_A), \ .drv_bank = 3, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .hsm_bit = hsm_b, \ .schmitt_bit = schmitt_b, \ .lpmd_bit = lpmd_b, \ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c index 2d856af389ef..2b70e93da9db 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra210.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c @@ -1310,8 +1310,6 @@ static struct tegra_function tegra210_functions[] = { .lock_bit = 7, \ .ioreset_bit = -1, \ .rcv_sel_bit = PINGROUP_BIT_##e_io_hv(10), \ - .parked_reg = PINGROUP_REG(r), \ - .parked_bank = 1, \ .parked_bit = 5, \ .hsm_bit = PINGROUP_BIT_##hsm(9), \ .schmitt_bit = 12, \ @@ -1345,7 +1343,7 @@ static struct tegra_function tegra210_functions[] = { .rcv_sel_bit = -1, \ .drv_reg = DRV_PINGROUP_REG(r), \ .drv_bank = 0, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .hsm_bit = -1, \ .schmitt_bit = -1, \ .lpmd_bit = -1, \ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c index fb7817fea2d9..474ac6daf513 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra30.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c @@ -2139,7 +2139,7 @@ static struct tegra_function tegra30_functions[] = { .lock_bit = 7, \ .ioreset_bit = PINGROUP_BIT_##ior(8), \ .rcv_sel_bit = -1, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .drv_reg = -1, \ } @@ -2160,7 +2160,7 @@ static struct tegra_function tegra30_functions[] = { .rcv_sel_bit = -1, \ .drv_reg = DRV_PINGROUP_REG(r), \ .drv_bank = 0, \ - .parked_reg = -1, \ + .parked_bit = -1, \ .hsm_bit = hsm_b, \ .schmitt_bit = schmitt_b, \ .lpmd_bit = lpmd_b, \ diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig index 0b40ded5738f..e077a9ec23d9 100644 --- a/drivers/pinctrl/uniphier/Kconfig +++ b/drivers/pinctrl/uniphier/Kconfig @@ -10,26 +10,34 @@ if PINCTRL_UNIPHIER config PINCTRL_UNIPHIER_LD4 tristate "UniPhier PH1-LD4 SoC pinctrl driver" - default y + default ARM config PINCTRL_UNIPHIER_PRO4 tristate "UniPhier PH1-Pro4 SoC pinctrl driver" - default y + default ARM config PINCTRL_UNIPHIER_SLD8 tristate "UniPhier PH1-sLD8 SoC pinctrl driver" - default y + default ARM config PINCTRL_UNIPHIER_PRO5 tristate "UniPhier PH1-Pro5 SoC pinctrl driver" - default y + default ARM config PINCTRL_UNIPHIER_PXS2 tristate "UniPhier ProXstream2 SoC pinctrl driver" - default y + default ARM config PINCTRL_UNIPHIER_LD6B tristate "UniPhier PH1-LD6b SoC pinctrl driver" - default y + default ARM + +config PINCTRL_UNIPHIER_LD11 + tristate "UniPhier PH1-LD11 SoC pinctrl driver" + default ARM64 + +config PINCTRL_UNIPHIER_LD20 + tristate "UniPhier PH1-LD20 SoC pinctrl driver" + default ARM64 endif diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile index 3b8f9ee0bb6f..9f4bc8aa6f68 100644 --- a/drivers/pinctrl/uniphier/Makefile +++ b/drivers/pinctrl/uniphier/Makefile @@ -6,3 +6,5 @@ obj-$(CONFIG_PINCTRL_UNIPHIER_SLD8) += pinctrl-uniphier-sld8.o obj-$(CONFIG_PINCTRL_UNIPHIER_PRO5) += pinctrl-uniphier-pro5.o obj-$(CONFIG_PINCTRL_UNIPHIER_PXS2) += pinctrl-uniphier-pxs2.o obj-$(CONFIG_PINCTRL_UNIPHIER_LD6B) += pinctrl-uniphier-ld6b.o +obj-$(CONFIG_PINCTRL_UNIPHIER_LD11) += pinctrl-uniphier-ld11.o +obj-$(CONFIG_PINCTRL_UNIPHIER_LD20) += pinctrl-uniphier-ld20.o diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c index 967400971d45..9b2ee717bccc 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c @@ -14,6 +14,7 @@ #include <linux/export.h> #include <linux/mfd/syscon.h> +#include <linux/of.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> @@ -26,8 +27,10 @@ #include "pinctrl-uniphier.h" struct uniphier_pinctrl_priv { + struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct regmap *regmap; + unsigned int regbase; struct uniphier_pinctrl_socdata *socdata; }; @@ -63,16 +66,22 @@ static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev, static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { - const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset]; - const char *pull_dir, *drv_str; + const struct pin_desc *desc = pin_desc_get(pctldev, offset); + const char *pull_dir, *drv_type; - switch (uniphier_pin_get_pull_dir(pin->drv_data)) { + switch (uniphier_pin_get_pull_dir(desc->drv_data)) { case UNIPHIER_PIN_PULL_UP: pull_dir = "UP"; break; case UNIPHIER_PIN_PULL_DOWN: pull_dir = "DOWN"; break; + case UNIPHIER_PIN_PULL_UP_FIXED: + pull_dir = "UP(FIXED)"; + break; + case UNIPHIER_PIN_PULL_DOWN_FIXED: + pull_dir = "DOWN(FIXED)"; + break; case UNIPHIER_PIN_PULL_NONE: pull_dir = "NONE"; break; @@ -80,30 +89,33 @@ static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev, BUG(); } - switch (uniphier_pin_get_drv_str(pin->drv_data)) { - case UNIPHIER_PIN_DRV_4_8: - drv_str = "4/8(mA)"; + switch (uniphier_pin_get_drv_type(desc->drv_data)) { + case UNIPHIER_PIN_DRV_1BIT: + drv_type = "4/8(mA)"; + break; + case UNIPHIER_PIN_DRV_2BIT: + drv_type = "8/12/16/20(mA)"; break; - case UNIPHIER_PIN_DRV_8_12_16_20: - drv_str = "8/12/16/20(mA)"; + case UNIPHIER_PIN_DRV_3BIT: + drv_type = "4/5/7/9/11/12/14/16(mA)"; break; - case UNIPHIER_PIN_DRV_FIXED_4: - drv_str = "4(mA)"; + case UNIPHIER_PIN_DRV_FIXED4: + drv_type = "4(mA)"; break; - case UNIPHIER_PIN_DRV_FIXED_5: - drv_str = "5(mA)"; + case UNIPHIER_PIN_DRV_FIXED5: + drv_type = "5(mA)"; break; - case UNIPHIER_PIN_DRV_FIXED_8: - drv_str = "8(mA)"; + case UNIPHIER_PIN_DRV_FIXED8: + drv_type = "8(mA)"; break; case UNIPHIER_PIN_DRV_NONE: - drv_str = "NONE"; + drv_type = "NONE"; break; default: BUG(); } - seq_printf(s, " PULL_DIR=%s DRV_STR=%s", pull_dir, drv_str); + seq_printf(s, " PULL_DIR=%s DRV_TYPE=%s", pull_dir, drv_type); } #endif @@ -119,12 +131,12 @@ static const struct pinctrl_ops uniphier_pctlops = { }; static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin, + const struct pin_desc *desc, enum pin_config_param param) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); enum uniphier_pin_pull_dir pull_dir = - uniphier_pin_get_pull_dir(pin->drv_data); + uniphier_pin_get_pull_dir(desc->drv_data); unsigned int pupdctrl, reg, shift, val; unsigned int expected = 1; int ret; @@ -154,12 +166,12 @@ static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev, BUG(); } - pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data); + pupdctrl = uniphier_pin_get_pupdctrl(desc->drv_data); reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4; shift = pupdctrl % 32; - ret = regmap_read(priv->regmap, reg, &val); + ret = regmap_read(priv->regmap, priv->regbase + reg, &val); if (ret) return ret; @@ -169,34 +181,42 @@ static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev, } static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin, + const struct pin_desc *desc, u16 *strength) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); - enum uniphier_pin_drv_str drv_str = - uniphier_pin_get_drv_str(pin->drv_data); - const unsigned int strength_4_8[] = {4, 8}; - const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20}; + enum uniphier_pin_drv_type type = + uniphier_pin_get_drv_type(desc->drv_data); + const unsigned int strength_1bit[] = {4, 8}; + const unsigned int strength_2bit[] = {8, 12, 16, 20}; + const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16}; const unsigned int *supported_strength; unsigned int drvctrl, reg, shift, mask, width, val; int ret; - switch (drv_str) { - case UNIPHIER_PIN_DRV_4_8: - supported_strength = strength_4_8; + switch (type) { + case UNIPHIER_PIN_DRV_1BIT: + supported_strength = strength_1bit; + reg = UNIPHIER_PINCTRL_DRVCTRL_BASE; width = 1; break; - case UNIPHIER_PIN_DRV_8_12_16_20: - supported_strength = strength_8_12_16_20; + case UNIPHIER_PIN_DRV_2BIT: + supported_strength = strength_2bit; + reg = UNIPHIER_PINCTRL_DRV2CTRL_BASE; width = 2; break; - case UNIPHIER_PIN_DRV_FIXED_4: + case UNIPHIER_PIN_DRV_3BIT: + supported_strength = strength_3bit; + reg = UNIPHIER_PINCTRL_DRV3CTRL_BASE; + width = 4; + break; + case UNIPHIER_PIN_DRV_FIXED4: *strength = 4; return 0; - case UNIPHIER_PIN_DRV_FIXED_5: + case UNIPHIER_PIN_DRV_FIXED5: *strength = 5; return 0; - case UNIPHIER_PIN_DRV_FIXED_8: + case UNIPHIER_PIN_DRV_FIXED8: *strength = 8; return 0; default: @@ -204,17 +224,14 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev, return -EINVAL; } - drvctrl = uniphier_pin_get_drvctrl(pin->drv_data); + drvctrl = uniphier_pin_get_drvctrl(desc->drv_data); drvctrl *= width; - reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE : - UNIPHIER_PINCTRL_DRVCTRL_BASE; - reg += drvctrl / 32 * 4; shift = drvctrl % 32; mask = (1U << width) - 1; - ret = regmap_read(priv->regmap, reg, &val); + ret = regmap_read(priv->regmap, priv->regbase + reg, &val); if (ret) return ret; @@ -224,10 +241,10 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev, } static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin) + const struct pin_desc *desc) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); - unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data); + unsigned int iectrl = uniphier_pin_get_iectrl(desc->drv_data); unsigned int val; int ret; @@ -235,7 +252,8 @@ static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev, /* This pin is always input-enabled. */ return 0; - ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val); + ret = regmap_read(priv->regmap, + priv->regbase + UNIPHIER_PINCTRL_IECTRL, &val); if (ret) return ret; @@ -246,7 +264,7 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs) { - const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin]; + const struct pin_desc *desc = pin_desc_get(pctldev, pin); enum pin_config_param param = pinconf_to_config_param(*configs); bool has_arg = false; u16 arg; @@ -256,14 +274,14 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_DISABLE: case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: - ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param); + ret = uniphier_conf_pin_bias_get(pctldev, desc, param); break; case PIN_CONFIG_DRIVE_STRENGTH: - ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg); + ret = uniphier_conf_pin_drive_get(pctldev, desc, &arg); has_arg = true; break; case PIN_CONFIG_INPUT_ENABLE: - ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc); + ret = uniphier_conf_pin_input_enable_get(pctldev, desc); break; default: /* unsupported parameter */ @@ -278,13 +296,12 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev, } static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin, - enum pin_config_param param, - u16 arg) + const struct pin_desc *desc, + enum pin_config_param param, u16 arg) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); enum uniphier_pin_pull_dir pull_dir = - uniphier_pin_get_pull_dir(pin->drv_data); + uniphier_pin_get_pull_dir(desc->drv_data); unsigned int pupdctrl, reg, shift; unsigned int val = 1; @@ -295,8 +312,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED || pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) { dev_err(pctldev->dev, - "can not disable pull register for pin %u (%s)\n", - pin->number, pin->name); + "can not disable pull register for pin %s\n", + desc->name); return -EINVAL; } val = 0; @@ -306,8 +323,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, return 0; if (pull_dir != UNIPHIER_PIN_PULL_UP) { dev_err(pctldev->dev, - "pull-up is unsupported for pin %u (%s)\n", - pin->number, pin->name); + "pull-up is unsupported for pin %s\n", + desc->name); return -EINVAL; } if (arg == 0) { @@ -320,8 +337,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, return 0; if (pull_dir != UNIPHIER_PIN_PULL_DOWN) { dev_err(pctldev->dev, - "pull-down is unsupported for pin %u (%s)\n", - pin->number, pin->name); + "pull-down is unsupported for pin %s\n", + desc->name); return -EINVAL; } if (arg == 0) { @@ -332,8 +349,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: if (pull_dir == UNIPHIER_PIN_PULL_NONE) { dev_err(pctldev->dev, - "pull-up/down is unsupported for pin %u (%s)\n", - pin->number, pin->name); + "pull-up/down is unsupported for pin %s\n", + desc->name); return -EINVAL; } @@ -344,39 +361,48 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev, BUG(); } - pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data); + pupdctrl = uniphier_pin_get_pupdctrl(desc->drv_data); reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4; shift = pupdctrl % 32; - return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift); + return regmap_update_bits(priv->regmap, priv->regbase + reg, + 1 << shift, val << shift); } static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin, + const struct pin_desc *desc, u16 strength) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); - enum uniphier_pin_drv_str drv_str = - uniphier_pin_get_drv_str(pin->drv_data); - const unsigned int strength_4_8[] = {4, 8, -1}; - const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1}; + enum uniphier_pin_drv_type type = + uniphier_pin_get_drv_type(desc->drv_data); + const unsigned int strength_1bit[] = {4, 8, -1}; + const unsigned int strength_2bit[] = {8, 12, 16, 20, -1}; + const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16, -1}; const unsigned int *supported_strength; unsigned int drvctrl, reg, shift, mask, width, val; - switch (drv_str) { - case UNIPHIER_PIN_DRV_4_8: - supported_strength = strength_4_8; + switch (type) { + case UNIPHIER_PIN_DRV_1BIT: + supported_strength = strength_1bit; + reg = UNIPHIER_PINCTRL_DRVCTRL_BASE; width = 1; break; - case UNIPHIER_PIN_DRV_8_12_16_20: - supported_strength = strength_8_12_16_20; + case UNIPHIER_PIN_DRV_2BIT: + supported_strength = strength_2bit; + reg = UNIPHIER_PINCTRL_DRV2CTRL_BASE; width = 2; break; + case UNIPHIER_PIN_DRV_3BIT: + supported_strength = strength_3bit; + reg = UNIPHIER_PINCTRL_DRV3CTRL_BASE; + width = 4; + break; default: dev_err(pctldev->dev, - "cannot change drive strength for pin %u (%s)\n", - pin->number, pin->name); + "cannot change drive strength for pin %s\n", + desc->name); return -EINVAL; } @@ -387,49 +413,48 @@ static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev, if (val == 0) { dev_err(pctldev->dev, - "unsupported drive strength %u mA for pin %u (%s)\n", - strength, pin->number, pin->name); + "unsupported drive strength %u mA for pin %s\n", + strength, desc->name); return -EINVAL; } val--; - drvctrl = uniphier_pin_get_drvctrl(pin->drv_data); + drvctrl = uniphier_pin_get_drvctrl(desc->drv_data); drvctrl *= width; - reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE : - UNIPHIER_PINCTRL_DRVCTRL_BASE; - reg += drvctrl / 32 * 4; shift = drvctrl % 32; mask = (1U << width) - 1; - return regmap_update_bits(priv->regmap, reg, + return regmap_update_bits(priv->regmap, priv->regbase + reg, mask << shift, val << shift); } static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev, - const struct pinctrl_pin_desc *pin, + const struct pin_desc *desc, u16 enable) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); - unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data); + unsigned int iectrl = uniphier_pin_get_iectrl(desc->drv_data); + unsigned int reg, mask; - if (enable == 0) { - /* - * Multiple pins share one input enable, so per-pin disabling - * is impossible. - */ - dev_err(pctldev->dev, "unable to disable input\n"); + /* + * Multiple pins share one input enable, per-pin disabling is + * impossible. + */ + if (!(priv->socdata->caps & UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL) && + !enable) return -EINVAL; - } + /* UNIPHIER_PIN_IECTRL_NONE means the pin is always input-enabled */ if (iectrl == UNIPHIER_PIN_IECTRL_NONE) - /* This pin is always input-enabled. nothing to do. */ - return 0; + return enable ? 0 : -EINVAL; + + reg = priv->regbase + UNIPHIER_PINCTRL_IECTRL + iectrl / 32 * 4; + mask = BIT(iectrl % 32); - return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL, - BIT(iectrl), BIT(iectrl)); + return regmap_update_bits(priv->regmap, reg, mask, enable ? mask : 0); } static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev, @@ -437,7 +462,7 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev, unsigned long *configs, unsigned num_configs) { - const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin]; + const struct pin_desc *desc = pin_desc_get(pctldev, pin); int i, ret; for (i = 0; i < num_configs; i++) { @@ -450,16 +475,15 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: - ret = uniphier_conf_pin_bias_set(pctldev, pin_desc, + ret = uniphier_conf_pin_bias_set(pctldev, desc, param, arg); break; case PIN_CONFIG_DRIVE_STRENGTH: - ret = uniphier_conf_pin_drive_set(pctldev, pin_desc, - arg); + ret = uniphier_conf_pin_drive_set(pctldev, desc, arg); break; case PIN_CONFIG_INPUT_ENABLE: - ret = uniphier_conf_pin_input_enable(pctldev, - pin_desc, arg); + ret = uniphier_conf_pin_input_enable(pctldev, desc, + arg); break; default: dev_err(pctldev->dev, @@ -531,20 +555,42 @@ static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev, } static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin, - unsigned muxval) + int muxval) { struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev); - unsigned mux_bits = priv->socdata->mux_bits; - unsigned reg_stride = priv->socdata->reg_stride; - unsigned reg, reg_end, shift, mask; + unsigned int mux_bits, reg_stride, reg, reg_end, shift, mask; + bool load_pinctrl; int ret; /* some pins need input-enabling */ ret = uniphier_conf_pin_input_enable(pctldev, - &pctldev->desc->pins[pin], 1); + pin_desc_get(pctldev, pin), 1); if (ret) return ret; + if (muxval < 0) + return 0; /* dedicated pin; nothing to do for pin-mux */ + + if (priv->socdata->caps & UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE) { + /* + * Mode reg_offset bit_position + * Normal 4 * n shift+3:shift + * Debug 4 * n shift+7:shift+4 + */ + mux_bits = 4; + reg_stride = 8; + load_pinctrl = true; + } else { + /* + * Mode reg_offset bit_position + * Normal 8 * n shift+3:shift + * Debug 8 * n + 4 shift+3:shift + */ + mux_bits = 8; + reg_stride = 4; + load_pinctrl = false; + } + reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride; reg_end = reg + reg_stride; shift = pin * mux_bits % 32; @@ -555,16 +601,17 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin, * stored in the offset+4. */ for (; reg < reg_end; reg += 4) { - ret = regmap_update_bits(priv->regmap, reg, + ret = regmap_update_bits(priv->regmap, priv->regbase + reg, mask << shift, muxval << shift); if (ret) return ret; muxval >>= mux_bits; } - if (priv->socdata->load_pinctrl) { + if (load_pinctrl) { ret = regmap_write(priv->regmap, - UNIPHIER_PINCTRL_LOAD_PINMUX, 1); + priv->regbase + UNIPHIER_PINCTRL_LOAD_PINMUX, + 1); if (ret) return ret; } @@ -633,19 +680,16 @@ static const struct pinmux_ops uniphier_pmxops = { }; int uniphier_pinctrl_probe(struct platform_device *pdev, - struct pinctrl_desc *desc, struct uniphier_pinctrl_socdata *socdata) { struct device *dev = &pdev->dev; struct uniphier_pinctrl_priv *priv; + struct device_node *parent; if (!socdata || - !socdata->groups || - !socdata->groups_count || - !socdata->functions || - !socdata->functions_count || - !socdata->mux_bits || - !socdata->reg_stride) { + !socdata->pins || !socdata->npins || + !socdata->groups || !socdata->groups_count || + !socdata->functions || !socdata->functions_count) { dev_err(dev, "pinctrl socdata lacks necessary members\n"); return -EINVAL; } @@ -654,18 +698,36 @@ int uniphier_pinctrl_probe(struct platform_device *pdev, if (!priv) return -ENOMEM; - priv->regmap = syscon_node_to_regmap(dev->of_node); + if (of_device_is_compatible(dev->of_node, "socionext,ph1-ld4-pinctrl") || + of_device_is_compatible(dev->of_node, "socionext,ph1-pro4-pinctrl") || + of_device_is_compatible(dev->of_node, "socionext,ph1-sld8-pinctrl") || + of_device_is_compatible(dev->of_node, "socionext,ph1-pro5-pinctrl") || + of_device_is_compatible(dev->of_node, "socionext,proxstream2-pinctrl") || + of_device_is_compatible(dev->of_node, "socionext,ph1-ld6b-pinctrl")) { + /* old binding */ + priv->regmap = syscon_node_to_regmap(dev->of_node); + } else { + priv->regbase = 0x1000; + parent = of_get_parent(dev->of_node); + priv->regmap = syscon_node_to_regmap(parent); + of_node_put(parent); + } + if (IS_ERR(priv->regmap)) { dev_err(dev, "failed to get regmap\n"); return PTR_ERR(priv->regmap); } priv->socdata = socdata; - desc->pctlops = &uniphier_pctlops; - desc->pmxops = &uniphier_pmxops; - desc->confops = &uniphier_confops; - - priv->pctldev = devm_pinctrl_register(dev, desc, priv); + priv->pctldesc.name = dev->driver->name; + priv->pctldesc.pins = socdata->pins; + priv->pctldesc.npins = socdata->npins; + priv->pctldesc.pctlops = &uniphier_pctlops; + priv->pctldesc.pmxops = &uniphier_pmxops; + priv->pctldesc.confops = &uniphier_confops; + priv->pctldesc.owner = dev->driver->owner; + + priv->pctldev = devm_pinctrl_register(dev, &priv->pctldesc, priv); if (IS_ERR(priv->pctldev)) { dev_err(dev, "failed to register UniPhier pinctrl driver\n"); return PTR_ERR(priv->pctldev); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c new file mode 100644 index 000000000000..77a0236ee781 --- /dev/null +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c @@ -0,0 +1,952 @@ +/* + * Copyright (C) 2016 Socionext Inc. + * Author: Masahiro Yamada <yamada.masahiro@socionext.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/platform_device.h> + +#include "pinctrl-uniphier.h" + +static const struct pinctrl_pin_desc uniphier_ld11_pins[] = { + UNIPHIER_PINCTRL_PIN(0, "XECS1", 0, + 0, UNIPHIER_PIN_DRV_1BIT, + 0, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(1, "ERXW", UNIPHIER_PIN_IECTRL_NONE, + 1, UNIPHIER_PIN_DRV_1BIT, + 1, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(2, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, + 2, UNIPHIER_PIN_DRV_1BIT, + 2, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(3, "XNFWP", 3, + 3, UNIPHIER_PIN_DRV_1BIT, + 3, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(4, "XNFCE0", 4, + 4, UNIPHIER_PIN_DRV_1BIT, + 4, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(5, "NFRYBY0", 5, + 5, UNIPHIER_PIN_DRV_1BIT, + 5, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(6, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, + 6, UNIPHIER_PIN_DRV_1BIT, + 6, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(7, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, + 7, UNIPHIER_PIN_DRV_1BIT, + 7, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(8, "NFALE", UNIPHIER_PIN_IECTRL_NONE, + 8, UNIPHIER_PIN_DRV_1BIT, + 8, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(9, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, + 9, UNIPHIER_PIN_DRV_1BIT, + 9, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(10, "NFD0", 10, + 10, UNIPHIER_PIN_DRV_1BIT, + 10, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(11, "NFD1", 11, + 11, UNIPHIER_PIN_DRV_1BIT, + 11, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(12, "NFD2", 12, + 12, UNIPHIER_PIN_DRV_1BIT, + 12, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(13, "NFD3", 13, + 13, UNIPHIER_PIN_DRV_1BIT, + 13, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(14, "NFD4", 14, + 14, UNIPHIER_PIN_DRV_1BIT, + 14, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(15, "NFD5", 15, + 15, UNIPHIER_PIN_DRV_1BIT, + 15, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(16, "NFD6", 16, + 16, UNIPHIER_PIN_DRV_1BIT, + 16, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(17, "NFD7", 17, + 17, UNIPHIER_PIN_DRV_1BIT, + 17, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(18, "XERST", 18, + 0, UNIPHIER_PIN_DRV_2BIT, + 18, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(19, "MMCCLK", 19, + 1, UNIPHIER_PIN_DRV_2BIT, + 19, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(20, "MMCCMD", 20, + 2, UNIPHIER_PIN_DRV_2BIT, + 20, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(21, "MMCDS", 21, + 3, UNIPHIER_PIN_DRV_2BIT, + 21, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(22, "MMCDAT0", 22, + 4, UNIPHIER_PIN_DRV_2BIT, + 22, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(23, "MMCDAT1", 23, + 5, UNIPHIER_PIN_DRV_2BIT, + 23, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(24, "MMCDAT2", 24, + 6, UNIPHIER_PIN_DRV_2BIT, + 24, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(25, "MMCDAT3", 25, + 7, UNIPHIER_PIN_DRV_2BIT, + 25, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(26, "MMCDAT4", 26, + 8, UNIPHIER_PIN_DRV_2BIT, + 26, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(27, "MMCDAT5", 27, + 9, UNIPHIER_PIN_DRV_2BIT, + 27, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(28, "MMCDAT6", 28, + 10, UNIPHIER_PIN_DRV_2BIT, + 28, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(29, "MMCDAT7", 29, + 11, UNIPHIER_PIN_DRV_2BIT, + 29, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(46, "USB0VBUS", 46, + 46, UNIPHIER_PIN_DRV_1BIT, + 46, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(47, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, + 47, UNIPHIER_PIN_DRV_1BIT, + 47, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(48, "USB1VBUS", 48, + 48, UNIPHIER_PIN_DRV_1BIT, + 48, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(49, "USB1OD", 49, + 49, UNIPHIER_PIN_DRV_1BIT, + 49, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(50, "USB2VBUS", 50, + 50, UNIPHIER_PIN_DRV_1BIT, + 50, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(51, "USB2OD", 51, + 51, UNIPHIER_PIN_DRV_1BIT, + 51, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(54, "TXD0", 54, + 54, UNIPHIER_PIN_DRV_1BIT, + 54, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(55, "RXD0", 55, + 55, UNIPHIER_PIN_DRV_1BIT, + 55, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(56, "SPISYNC0", 56, + 56, UNIPHIER_PIN_DRV_1BIT, + 56, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(57, "SPISCLK0", 57, + 57, UNIPHIER_PIN_DRV_1BIT, + 57, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(58, "SPITXD0", 58, + 58, UNIPHIER_PIN_DRV_1BIT, + 58, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(59, "SPIRXD0", 59, + 59, UNIPHIER_PIN_DRV_1BIT, + 59, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(60, "AGCI", 60, + 60, UNIPHIER_PIN_DRV_1BIT, + 60, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(61, "DMDSDA0", 61, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(62, "DMDSCL0", 62, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(63, "SDA0", 63, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(64, "SCL0", 64, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(65, "SDA1", 65, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(66, "SCL1", 66, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(67, "HIN", 67, + -1, UNIPHIER_PIN_DRV_FIXED5, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(68, "VIN", 68, + -1, UNIPHIER_PIN_DRV_FIXED5, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(69, "PCA00", 69, + 69, UNIPHIER_PIN_DRV_1BIT, + 69, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(70, "PCA01", 70, + 70, UNIPHIER_PIN_DRV_1BIT, + 70, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(71, "PCA02", 71, + 71, UNIPHIER_PIN_DRV_1BIT, + 71, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(72, "PCA03", 72, + 72, UNIPHIER_PIN_DRV_1BIT, + 72, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(73, "PCA04", 73, + 73, UNIPHIER_PIN_DRV_1BIT, + 73, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(74, "PCA05", 74, + 74, UNIPHIER_PIN_DRV_1BIT, + 74, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(75, "PCA06", 75, + 75, UNIPHIER_PIN_DRV_1BIT, + 75, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(76, "PCA07", 76, + 76, UNIPHIER_PIN_DRV_1BIT, + 76, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(77, "PCA08", 77, + 77, UNIPHIER_PIN_DRV_1BIT, + 77, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(78, "PCA09", 78, + 78, UNIPHIER_PIN_DRV_1BIT, + 78, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(79, "PCA10", 79, + 79, UNIPHIER_PIN_DRV_1BIT, + 79, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(80, "PCA11", 80, + 80, UNIPHIER_PIN_DRV_1BIT, + 80, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(81, "PCA12", 81, + 81, UNIPHIER_PIN_DRV_1BIT, + 81, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(82, "PCA13", 82, + 82, UNIPHIER_PIN_DRV_1BIT, + 82, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(83, "PCA14", 83, + 83, UNIPHIER_PIN_DRV_1BIT, + 83, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(84, "PC0READY", 84, + 84, UNIPHIER_PIN_DRV_1BIT, + 84, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(85, "PC0CD1", 85, + 85, UNIPHIER_PIN_DRV_1BIT, + 85, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(86, "PC0CD2", 86, + 86, UNIPHIER_PIN_DRV_1BIT, + 86, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(87, "PC0WAIT", 87, + 87, UNIPHIER_PIN_DRV_1BIT, + 87, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(88, "PC0RESET", 88, + 88, UNIPHIER_PIN_DRV_1BIT, + 88, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(89, "PC0CE1", 89, + 89, UNIPHIER_PIN_DRV_1BIT, + 89, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(90, "PC0WE", 90, + 90, UNIPHIER_PIN_DRV_1BIT, + 90, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(91, "PC0OE", 91, + 91, UNIPHIER_PIN_DRV_1BIT, + 91, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(92, "PC0IOWR", 92, + 92, UNIPHIER_PIN_DRV_1BIT, + 92, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(93, "PC0IORD", 93, + 93, UNIPHIER_PIN_DRV_1BIT, + 93, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(94, "PCD00", 94, + 94, UNIPHIER_PIN_DRV_1BIT, + 94, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(95, "PCD01", 95, + 95, UNIPHIER_PIN_DRV_1BIT, + 95, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(96, "PCD02", 96, + 96, UNIPHIER_PIN_DRV_1BIT, + 96, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(97, "PCD03", 97, + 97, UNIPHIER_PIN_DRV_1BIT, + 97, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(98, "PCD04", 98, + 98, UNIPHIER_PIN_DRV_1BIT, + 98, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(99, "PCD05", 99, + 99, UNIPHIER_PIN_DRV_1BIT, + 99, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(100, "PCD06", 100, + 100, UNIPHIER_PIN_DRV_1BIT, + 100, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(101, "PCD07", 101, + 101, UNIPHIER_PIN_DRV_1BIT, + 101, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(102, "HS0BCLKIN", 102, + 102, UNIPHIER_PIN_DRV_1BIT, + 102, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(103, "HS0SYNCIN", 103, + 103, UNIPHIER_PIN_DRV_1BIT, + 103, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(104, "HS0VALIN", 104, + 104, UNIPHIER_PIN_DRV_1BIT, + 104, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(105, "HS0DIN0", 105, + 105, UNIPHIER_PIN_DRV_1BIT, + 105, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(106, "HS0DIN1", 106, + 106, UNIPHIER_PIN_DRV_1BIT, + 106, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(107, "HS0DIN2", 107, + 107, UNIPHIER_PIN_DRV_1BIT, + 107, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(108, "HS0DIN3", 108, + 108, UNIPHIER_PIN_DRV_1BIT, + 108, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(109, "HS0DIN4", 109, + 109, UNIPHIER_PIN_DRV_1BIT, + 109, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(110, "HS0DIN5", 110, + 110, UNIPHIER_PIN_DRV_1BIT, + 110, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(111, "HS0DIN6", 111, + 111, UNIPHIER_PIN_DRV_1BIT, + 111, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(112, "HS0DIN7", 112, + 112, UNIPHIER_PIN_DRV_1BIT, + 112, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(113, "HS0BCLKOUT", 113, + 113, UNIPHIER_PIN_DRV_1BIT, + 113, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(114, "HS0SYNCOUT", 114, + 114, UNIPHIER_PIN_DRV_1BIT, + 114, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(115, "HS0VALOUT", 115, + 115, UNIPHIER_PIN_DRV_1BIT, + 115, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(116, "HS0DOUT0", 116, + 116, UNIPHIER_PIN_DRV_1BIT, + 116, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(117, "HS0DOUT1", 117, + 117, UNIPHIER_PIN_DRV_1BIT, + 117, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(118, "HS0DOUT2", 118, + 118, UNIPHIER_PIN_DRV_1BIT, + 118, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(119, "HS0DOUT3", 119, + 119, UNIPHIER_PIN_DRV_1BIT, + 119, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(120, "HS0DOUT4", 120, + 120, UNIPHIER_PIN_DRV_1BIT, + 120, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(121, "HS0DOUT5", 121, + 121, UNIPHIER_PIN_DRV_1BIT, + 121, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(122, "HS0DOUT6", 122, + 122, UNIPHIER_PIN_DRV_1BIT, + 122, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(123, "HS0DOUT7", 123, + 123, UNIPHIER_PIN_DRV_1BIT, + 123, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(124, "HS1BCLKIN", 124, + 124, UNIPHIER_PIN_DRV_1BIT, + 124, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(125, "HS1SYNCIN", 125, + 125, UNIPHIER_PIN_DRV_1BIT, + 125, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(126, "HS1VALIN", 126, + 126, UNIPHIER_PIN_DRV_1BIT, + 126, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(127, "HS1DIN0", 127, + 127, UNIPHIER_PIN_DRV_1BIT, + 127, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(128, "HS1DIN1", 128, + 128, UNIPHIER_PIN_DRV_1BIT, + 128, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(129, "HS1DIN2", 129, + 129, UNIPHIER_PIN_DRV_1BIT, + 129, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(130, "HS1DIN3", 130, + 130, UNIPHIER_PIN_DRV_1BIT, + 130, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(131, "HS1DIN4", 131, + 131, UNIPHIER_PIN_DRV_1BIT, + 131, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(132, "HS1DIN5", 132, + 132, UNIPHIER_PIN_DRV_1BIT, + 132, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(133, "HS1DIN6", 133, + 133, UNIPHIER_PIN_DRV_1BIT, + 133, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(134, "HS1DIN7", 134, + 134, UNIPHIER_PIN_DRV_1BIT, + 134, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(135, "AO1IEC", 135, + 135, UNIPHIER_PIN_DRV_1BIT, + 135, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(136, "AO1ARC", 136, + 136, UNIPHIER_PIN_DRV_1BIT, + 136, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(137, "AO1DACCK", 137, + 137, UNIPHIER_PIN_DRV_1BIT, + 137, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(138, "AO1BCK", 138, + 138, UNIPHIER_PIN_DRV_1BIT, + 138, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(139, "AO1LRCK", 139, + 139, UNIPHIER_PIN_DRV_1BIT, + 139, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, + 140, UNIPHIER_PIN_DRV_1BIT, + 140, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(141, "TCON0", 141, + 141, UNIPHIER_PIN_DRV_1BIT, + 141, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(142, "TCON1", 142, + 142, UNIPHIER_PIN_DRV_1BIT, + 142, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(143, "TCON2", 143, + 143, UNIPHIER_PIN_DRV_1BIT, + 143, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(144, "TCON3", 144, + 144, UNIPHIER_PIN_DRV_1BIT, + 144, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(145, "TCON4", 145, + 145, UNIPHIER_PIN_DRV_1BIT, + 145, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(146, "TCON5", 146, + 146, UNIPHIER_PIN_DRV_1BIT, + 146, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, + 147, UNIPHIER_PIN_DRV_1BIT, + 147, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(148, "LR_GOUT", 148, + 148, UNIPHIER_PIN_DRV_1BIT, + 148, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(149, "XIRQ0", 149, + 149, UNIPHIER_PIN_DRV_1BIT, + 149, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(150, "XIRQ1", 150, + 150, UNIPHIER_PIN_DRV_1BIT, + 150, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(151, "XIRQ2", 151, + 151, UNIPHIER_PIN_DRV_1BIT, + 151, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(152, "XIRQ3", 152, + 152, UNIPHIER_PIN_DRV_1BIT, + 152, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(153, "XIRQ4", 153, + 153, UNIPHIER_PIN_DRV_1BIT, + 153, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(154, "XIRQ5", 154, + 154, UNIPHIER_PIN_DRV_1BIT, + 154, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(155, "XIRQ6", 155, + 155, UNIPHIER_PIN_DRV_1BIT, + 155, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(156, "XIRQ7", 156, + 156, UNIPHIER_PIN_DRV_1BIT, + 156, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(157, "XIRQ8", 157, + 157, UNIPHIER_PIN_DRV_1BIT, + 157, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(158, "AGCBS", 158, + 158, UNIPHIER_PIN_DRV_1BIT, + 158, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(159, "XIRQ21", 159, + 159, UNIPHIER_PIN_DRV_1BIT, + 159, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(160, "XIRQ22", 160, + 160, UNIPHIER_PIN_DRV_1BIT, + 160, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(161, "XIRQ23", 161, + 161, UNIPHIER_PIN_DRV_1BIT, + 161, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(162, "CH2CLK", 162, + 162, UNIPHIER_PIN_DRV_1BIT, + 162, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(163, "CH2PSYNC", 163, + 163, UNIPHIER_PIN_DRV_1BIT, + 163, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(164, "CH2VAL", 164, + 164, UNIPHIER_PIN_DRV_1BIT, + 164, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(165, "CH2DATA", 165, + 165, UNIPHIER_PIN_DRV_1BIT, + 165, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(166, "CK25O", 166, + 166, UNIPHIER_PIN_DRV_1BIT, + 166, UNIPHIER_PIN_PULL_DOWN), +}; + +static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25}; +static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29}; +static const int emmc_dat8_muxvals[] = {0, 0, 0, 0}; +static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17}; +static const int ether_rmii_muxvals[] = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}; +static const unsigned i2c0_pins[] = {63, 64}; +static const int i2c0_muxvals[] = {0, 0}; +static const unsigned i2c1_pins[] = {65, 66}; +static const int i2c1_muxvals[] = {0, 0}; +static const unsigned i2c3_pins[] = {67, 68}; +static const int i2c3_muxvals[] = {1, 1}; +static const unsigned i2c4_pins[] = {61, 62}; +static const int i2c4_muxvals[] = {1, 1}; +static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17}; +static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2}; +static const unsigned system_bus_cs1_pins[] = {0}; +static const int system_bus_cs1_muxvals[] = {0}; +static const unsigned uart0_pins[] = {54, 55}; +static const int uart0_muxvals[] = {0, 0}; +static const unsigned uart1_pins[] = {58, 59}; +static const int uart1_muxvals[] = {1, 1}; +static const unsigned uart2_pins[] = {90, 91}; +static const int uart2_muxvals[] = {1, 1}; +static const unsigned uart3_pins[] = {94, 95}; +static const int uart3_muxvals[] = {1, 1}; +static const unsigned usb0_pins[] = {46, 47}; +static const int usb0_muxvals[] = {0, 0}; +static const unsigned usb1_pins[] = {48, 49}; +static const int usb1_muxvals[] = {0, 0}; +static const unsigned usb2_pins[] = {50, 51}; +static const int usb2_muxvals[] = {0, 0}; +static const unsigned port_range_pins[] = { + 159, 160, 161, 162, 163, 164, 165, 166, /* PORT0x */ + 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */ + 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */ + 16, 17, 18, -1, -1, -1, -1, -1, /* PORT3x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */ + -1, -1, -1, 46, 47, 48, 49, 50, /* PORT5x */ + 51, -1, -1, 54, 55, 56, 57, 58, /* PORT6x */ + 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */ + 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */ + 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */ + 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ + 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ + 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ + 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ + 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ + 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ + 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ + 139, 140, 141, 142, -1, -1, -1, -1, /* PORT22x */ + 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ + 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */ +}; +static const int port_range_muxvals[] = { + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ + 15, 15, 15, -1, -1, -1, -1, -1, /* PORT3x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */ + -1, -1, -1, 15, 15, 15, 15, 15, /* PORT5x */ + 15, -1, -1, 15, 15, 15, 15, 15, /* PORT6x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */ + 15, 15, 15, 15, -1, -1, -1, -1, /* PORT22x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */ +}; +static const unsigned xirq_pins[] = { + 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */ + 157, 143, 144, 145, 85, 146, 158, 84, /* XIRQ8-15 */ + 141, 142, 148, 50, 51, 159, 160, 161, /* XIRQ16-23 */ +}; +static const int xirq_muxvals[] = { + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ + 14, 14, 14, 14, 13, 14, 14, 13, /* XIRQ8-15 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ +}; +static const unsigned xirq_alternatives_pins[] = { + 94, 95, 96, 97, 98, 99, 100, 101, /* XIRQ0-7 */ + 102, 103, 104, 105, 106, 107, /* XIRQ8-11,13,14 */ + 108, 109, 110, 111, 112, 113, 114, 115, /* XIRQ16-23 */ + 9, 10, 11, 12, 13, 14, 15, 16, /* XIRQ4-11 */ + 17, 0, 1, 2, 3, 4, 5, 6, 7, 8, /* XIRQ13,14,16-23 */ + 139, 140, 135, 147, /* XIRQ17,18,21,22 */ +}; +static const int xirq_alternatives_muxvals[] = { + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ + 14, 14, 14, 14, 14, 14, /* XIRQ8-11,13,14 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ4-11 */ + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ13,14,16-23 */ + 14, 14, 14, 14, /* XIRQ17,18,21,22 */ +}; + +static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = { + UNIPHIER_PINCTRL_GROUP(emmc), + UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_rmii), + UNIPHIER_PINCTRL_GROUP(i2c0), + UNIPHIER_PINCTRL_GROUP(i2c1), + UNIPHIER_PINCTRL_GROUP(i2c3), + UNIPHIER_PINCTRL_GROUP(i2c4), + UNIPHIER_PINCTRL_GROUP(nand), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(uart0), + UNIPHIER_PINCTRL_GROUP(uart1), + UNIPHIER_PINCTRL_GROUP(uart2), + UNIPHIER_PINCTRL_GROUP(uart3), + UNIPHIER_PINCTRL_GROUP(usb0), + UNIPHIER_PINCTRL_GROUP(usb1), + UNIPHIER_PINCTRL_GROUP(usb2), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives), + UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43), + UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44), + UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45), + UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46), + UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47), + UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48), + UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51), + UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52), + UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53), + UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54), + UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55), + UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56), + UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57), + UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58), + UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59), + UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60), + UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61), + UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62), + UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63), + UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64), + UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65), + UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66), + UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67), + UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68), + UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69), + UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70), + UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71), + UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72), + UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73), + UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74), + UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75), + UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76), + UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77), + UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78), + UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79), + UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80), + UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81), + UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82), + UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83), + UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84), + UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85), + UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86), + UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87), + UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96), + UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97), + UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98), + UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99), + UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100), + UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101), + UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102), + UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103), + UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104), + UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105), + UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106), + UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107), + UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108), + UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109), + UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110), + UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111), + UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112), + UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113), + UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114), + UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115), + UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116), + UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117), + UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118), + UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119), + UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144), + UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145), + UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146), + UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147), + UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148), + UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149), + UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150), + UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151), + UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160), + UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161), + UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162), + UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163), + UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164), + UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165), + UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166), + UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167), + UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168), + UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169), + UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170), + UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171), + UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172), + UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173), + UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174), + UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175), + UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176), + UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177), + UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178), + UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179), + UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184), + UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185), + UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186), + UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187), + UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188), + UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189), + UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190), + UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191), + UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192), + UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193), + UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194), + UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195), + UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196), + UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197), + UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198), + UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0b, xirq_alternatives, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1b, xirq_alternatives, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2b, xirq_alternatives, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5b, xirq_alternatives, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6b, xirq_alternatives, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7b, xirq_alternatives, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8b, xirq_alternatives, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9b, xirq_alternatives, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10b, xirq_alternatives, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11b, xirq_alternatives, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13b, xirq_alternatives, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21b, xirq_alternatives, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22b, xirq_alternatives, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23b, xirq_alternatives, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4c, xirq_alternatives, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5c, xirq_alternatives, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6c, xirq_alternatives, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7c, xirq_alternatives, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8c, xirq_alternatives, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9c, xirq_alternatives, 27), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10c, xirq_alternatives, 28), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11c, xirq_alternatives, 29), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13c, xirq_alternatives, 30), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14c, xirq_alternatives, 31), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16c, xirq_alternatives, 32), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 33), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 34), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19c, xirq_alternatives, 35), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20c, xirq_alternatives, 36), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21c, xirq_alternatives, 37), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22c, xirq_alternatives, 38), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23c, xirq_alternatives, 39), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17d, xirq_alternatives, 40), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18d, xirq_alternatives, 41), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21d, xirq_alternatives, 42), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22d, xirq_alternatives, 43), +}; + +static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; +static const char * const i2c0_groups[] = {"i2c0"}; +static const char * const i2c1_groups[] = {"i2c1"}; +static const char * const i2c3_groups[] = {"i2c3"}; +static const char * const i2c4_groups[] = {"i2c4"}; +static const char * const nand_groups[] = {"nand"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs1"}; +static const char * const uart0_groups[] = {"uart0"}; +static const char * const uart1_groups[] = {"uart1"}; +static const char * const uart2_groups[] = {"uart2"}; +static const char * const uart3_groups[] = {"uart3"}; +static const char * const usb0_groups[] = {"usb0"}; +static const char * const usb1_groups[] = {"usb1"}; +static const char * const usb2_groups[] = {"usb2"}; +static const char * const port_groups[] = { + "port00", "port01", "port02", "port03", + "port04", "port05", "port06", "port07", + "port10", "port11", "port12", "port13", + "port14", "port15", "port16", "port17", + "port20", "port21", "port22", "port23", + "port24", "port25", "port26", "port27", + "port30", "port31", "port32", + /* port33-52 missing */ "port53", + "port54", "port55", "port56", "port57", + "port60", /* port61-62 missing*/ "port63", + "port64", "port65", "port66", "port67", + "port70", "port71", "port72", "port73", + "port74", "port75", "port76", "port77", + "port80", "port81", "port82", "port83", + "port84", "port85", "port86", "port87", + "port90", "port91", "port92", "port93", + "port94", "port95", "port96", "port97", + "port100", "port101", "port102", "port103", + "port104", "port105", "port106", "port107", + /* port110-117 missing */ + "port120", "port121", "port122", "port123", + "port124", "port125", "port126", "port127", + "port130", "port131", "port132", "port133", + "port134", "port135", "port136", "port137", + "port140", "port141", "port142", "port143", + "port144", "port145", "port146", "port147", + /* port150-177 missing */ + "port180", "port181", "port182", "port183", + "port184", "port185", "port186", "port187", + /* port190-197 missing */ + "port200", "port201", "port202", "port203", + "port204", "port205", "port206", "port207", + "port210", "port211", "port212", "port213", + "port214", "port215", "port216", "port217", + "port220", "port221", "port222", "port223", + /* port224-227 missing */ + "port230", "port231", "port232", "port233", + "port234", "port235", "port236", "port237", + "port240", "port241", "port242", "port243", + "port244", "port245", "port246", "port247", +}; +static const char * const xirq_groups[] = { + "xirq0", "xirq1", "xirq2", "xirq3", + "xirq4", "xirq5", "xirq6", "xirq7", + "xirq8", "xirq9", "xirq10", "xirq11", + "xirq12", "xirq13", "xirq14", "xirq15", + "xirq16", "xirq17", "xirq18", "xirq19", + "xirq20", "xirq21", "xirq22", "xirq23", + "xirq0b", "xirq1b", "xirq2b", "xirq3b", + "xirq4b", "xirq5b", "xirq6b", "xirq7b", + "xirq8b", "xirq9b", "xirq10b", "xirq11b", + /* none */ "xirq13b", "xirq14b", /* none */ + "xirq16b", "xirq17b", "xirq18b", "xirq19b", + "xirq20b", "xirq21b", "xirq22b", "xirq23b", + "xirq4c", "xirq5c", "xirq6c", "xirq7c", + "xirq8c", "xirq9c", "xirq10c", "xirq11c", + /* none */ "xirq13c", "xirq14c", /* none */ + "xirq16c", "xirq17c", "xirq18c", "xirq19c", + "xirq20c", "xirq21c", "xirq22c", "xirq23c", + "xirq17d", "xirq18d", "xirq21d", "xirq22d", +}; + +static const struct uniphier_pinmux_function uniphier_ld11_functions[] = { + UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), + UNIPHIER_PINMUX_FUNCTION(i2c0), + UNIPHIER_PINMUX_FUNCTION(i2c1), + UNIPHIER_PINMUX_FUNCTION(i2c3), + UNIPHIER_PINMUX_FUNCTION(i2c4), + UNIPHIER_PINMUX_FUNCTION(nand), + UNIPHIER_PINMUX_FUNCTION(system_bus), + UNIPHIER_PINMUX_FUNCTION(uart0), + UNIPHIER_PINMUX_FUNCTION(uart1), + UNIPHIER_PINMUX_FUNCTION(uart2), + UNIPHIER_PINMUX_FUNCTION(uart3), + UNIPHIER_PINMUX_FUNCTION(usb0), + UNIPHIER_PINMUX_FUNCTION(usb1), + UNIPHIER_PINMUX_FUNCTION(usb2), + UNIPHIER_PINMUX_FUNCTION(port), + UNIPHIER_PINMUX_FUNCTION(xirq), +}; + +static struct uniphier_pinctrl_socdata uniphier_ld11_pindata = { + .pins = uniphier_ld11_pins, + .npins = ARRAY_SIZE(uniphier_ld11_pins), + .groups = uniphier_ld11_groups, + .groups_count = ARRAY_SIZE(uniphier_ld11_groups), + .functions = uniphier_ld11_functions, + .functions_count = ARRAY_SIZE(uniphier_ld11_functions), + .caps = UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL, +}; + +static int uniphier_ld11_pinctrl_probe(struct platform_device *pdev) +{ + return uniphier_pinctrl_probe(pdev, &uniphier_ld11_pindata); +} + +static const struct of_device_id uniphier_ld11_pinctrl_match[] = { + { .compatible = "socionext,uniphier-ld11-pinctrl" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_ld11_pinctrl_match); + +static struct platform_driver uniphier_ld11_pinctrl_driver = { + .probe = uniphier_ld11_pinctrl_probe, + .driver = { + .name = "uniphier-ld11-pinctrl", + .of_match_table = uniphier_ld11_pinctrl_match, + }, +}; +module_platform_driver(uniphier_ld11_pinctrl_driver); + +MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); +MODULE_DESCRIPTION("UniPhier PH1-LD11 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c new file mode 100644 index 000000000000..aa8bd9794683 --- /dev/null +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c @@ -0,0 +1,1050 @@ +/* + * Copyright (C) 2016 Socionext Inc. + * Author: Masahiro Yamada <yamada.masahiro@socionext.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/platform_device.h> + +#include "pinctrl-uniphier.h" + +static const struct pinctrl_pin_desc uniphier_ld20_pins[] = { + UNIPHIER_PINCTRL_PIN(0, "XECS1", 0, + 0, UNIPHIER_PIN_DRV_3BIT, + 0, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(1, "ERXW", 1, + 1, UNIPHIER_PIN_DRV_3BIT, + 1, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(2, "XERWE1", 2, + 2, UNIPHIER_PIN_DRV_3BIT, + 2, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(3, "XNFWP", 3, + 3, UNIPHIER_PIN_DRV_3BIT, + 3, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(4, "XNFCE0", 4, + 4, UNIPHIER_PIN_DRV_3BIT, + 4, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(5, "NFRYBY0", 5, + 5, UNIPHIER_PIN_DRV_3BIT, + 5, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(6, "XNFRE", 6, + 6, UNIPHIER_PIN_DRV_3BIT, + 6, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(7, "XNFWE", 7, + 7, UNIPHIER_PIN_DRV_3BIT, + 7, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(8, "NFALE", 8, + 8, UNIPHIER_PIN_DRV_3BIT, + 8, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(9, "NFCLE", 9, + 9, UNIPHIER_PIN_DRV_3BIT, + 9, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(10, "NFD0", 10, + 10, UNIPHIER_PIN_DRV_3BIT, + 10, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(11, "NFD1", 11, + 11, UNIPHIER_PIN_DRV_3BIT, + 11, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(12, "NFD2", 12, + 12, UNIPHIER_PIN_DRV_3BIT, + 12, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(13, "NFD3", 13, + 13, UNIPHIER_PIN_DRV_3BIT, + 13, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(14, "NFD4", 14, + 14, UNIPHIER_PIN_DRV_3BIT, + 14, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(15, "NFD5", 15, + 15, UNIPHIER_PIN_DRV_3BIT, + 15, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(16, "NFD6", 16, + 16, UNIPHIER_PIN_DRV_3BIT, + 16, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(17, "NFD7", 17, + 17, UNIPHIER_PIN_DRV_3BIT, + 17, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(18, "XERST", 18, + 0, UNIPHIER_PIN_DRV_2BIT, + 18, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(19, "MMCCLK", 19, + 1, UNIPHIER_PIN_DRV_2BIT, + 19, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(20, "MMCCMD", 20, + 2, UNIPHIER_PIN_DRV_2BIT, + 20, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(21, "MMCDS", 21, + 3, UNIPHIER_PIN_DRV_2BIT, + 21, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(22, "MMCDAT0", 22, + 4, UNIPHIER_PIN_DRV_2BIT, + 22, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(23, "MMCDAT1", 23, + 5, UNIPHIER_PIN_DRV_2BIT, + 23, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(24, "MMCDAT2", 24, + 6, UNIPHIER_PIN_DRV_2BIT, + 24, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(25, "MMCDAT3", 25, + 7, UNIPHIER_PIN_DRV_2BIT, + 25, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(26, "MMCDAT4", 26, + 8, UNIPHIER_PIN_DRV_2BIT, + 26, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(27, "MMCDAT5", 27, + 9, UNIPHIER_PIN_DRV_2BIT, + 27, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(28, "MMCDAT6", 28, + 10, UNIPHIER_PIN_DRV_2BIT, + 28, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(29, "MMCDAT7", 29, + 11, UNIPHIER_PIN_DRV_2BIT, + 29, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(30, "MDC", 30, + 18, UNIPHIER_PIN_DRV_3BIT, + 30, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(31, "MDIO", 31, + 19, UNIPHIER_PIN_DRV_3BIT, + 31, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(32, "MDIO_INTL", 32, + 20, UNIPHIER_PIN_DRV_3BIT, + 32, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(33, "PHYRSTL", 33, + 21, UNIPHIER_PIN_DRV_3BIT, + 33, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(34, "RGMII_RXCLK", 34, + 22, UNIPHIER_PIN_DRV_3BIT, + 34, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(35, "RGMII_RXD0", 35, + 23, UNIPHIER_PIN_DRV_3BIT, + 35, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(36, "RGMII_RXD1", 36, + 24, UNIPHIER_PIN_DRV_3BIT, + 36, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(37, "RGMII_RXD2", 37, + 25, UNIPHIER_PIN_DRV_3BIT, + 37, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(38, "RGMII_RXD3", 38, + 26, UNIPHIER_PIN_DRV_3BIT, + 38, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(39, "RGMII_RXCTL", 39, + 27, UNIPHIER_PIN_DRV_3BIT, + 39, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(40, "RGMII_TXCLK", 40, + 28, UNIPHIER_PIN_DRV_3BIT, + 40, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(41, "RGMII_TXD0", 41, + 29, UNIPHIER_PIN_DRV_3BIT, + 41, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(42, "RGMII_TXD1", 42, + 30, UNIPHIER_PIN_DRV_3BIT, + 42, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(43, "RGMII_TXD2", 43, + 31, UNIPHIER_PIN_DRV_3BIT, + 43, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(44, "RGMII_TXD3", 44, + 32, UNIPHIER_PIN_DRV_3BIT, + 44, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(45, "RGMII_TXCTL", 45, + 33, UNIPHIER_PIN_DRV_3BIT, + 45, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(46, "USB0VBUS", 46, + 34, UNIPHIER_PIN_DRV_3BIT, + 46, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(47, "USB0OD", 47, + 35, UNIPHIER_PIN_DRV_3BIT, + 47, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(48, "USB1VBUS", 48, + 36, UNIPHIER_PIN_DRV_3BIT, + 48, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(49, "USB1OD", 49, + 37, UNIPHIER_PIN_DRV_3BIT, + 49, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(50, "USB2VBUS", 50, + 38, UNIPHIER_PIN_DRV_3BIT, + 50, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(51, "USB2OD", 51, + 39, UNIPHIER_PIN_DRV_3BIT, + 51, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(52, "USB3VBUS", 52, + 40, UNIPHIER_PIN_DRV_3BIT, + 52, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(53, "USB3OD", 53, + 41, UNIPHIER_PIN_DRV_3BIT, + 53, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(54, "TXD0", 54, + 42, UNIPHIER_PIN_DRV_3BIT, + 54, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(55, "RXD0", 55, + 43, UNIPHIER_PIN_DRV_3BIT, + 55, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(56, "SPISYNC0", 56, + 44, UNIPHIER_PIN_DRV_3BIT, + 56, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(57, "SPISCLK0", 57, + 45, UNIPHIER_PIN_DRV_3BIT, + 57, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(58, "SPITXD0", 58, + 46, UNIPHIER_PIN_DRV_3BIT, + 58, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(59, "SPIRXD0", 59, + 47, UNIPHIER_PIN_DRV_3BIT, + 59, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(60, "AGCI", 60, + 48, UNIPHIER_PIN_DRV_3BIT, + 60, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(61, "DMDSDA0", 61, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(62, "DMDSCL0", 62, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(63, "SDA0", 63, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(64, "SCL0", 64, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(65, "SDA1", 65, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(66, "SCL1", 66, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(67, "HIN", 67, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(68, "VIN", 68, + -1, UNIPHIER_PIN_DRV_FIXED4, + -1, UNIPHIER_PIN_PULL_NONE), + UNIPHIER_PINCTRL_PIN(69, "PCA00", 69, + 49, UNIPHIER_PIN_DRV_3BIT, + 69, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(70, "PCA01", 70, + 50, UNIPHIER_PIN_DRV_3BIT, + 70, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(71, "PCA02", 71, + 51, UNIPHIER_PIN_DRV_3BIT, + 71, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(72, "PCA03", 72, + 52, UNIPHIER_PIN_DRV_3BIT, + 72, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(73, "PCA04", 73, + 53, UNIPHIER_PIN_DRV_3BIT, + 73, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(74, "PCA05", 74, + 54, UNIPHIER_PIN_DRV_3BIT, + 74, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(75, "PCA06", 75, + 55, UNIPHIER_PIN_DRV_3BIT, + 75, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(76, "PCA07", 76, + 56, UNIPHIER_PIN_DRV_3BIT, + 76, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(77, "PCA08", 77, + 57, UNIPHIER_PIN_DRV_3BIT, + 77, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(78, "PCA09", 78, + 58, UNIPHIER_PIN_DRV_3BIT, + 78, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(79, "PCA10", 79, + 59, UNIPHIER_PIN_DRV_3BIT, + 79, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(80, "PCA11", 80, + 60, UNIPHIER_PIN_DRV_3BIT, + 80, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(81, "PCA12", 81, + 61, UNIPHIER_PIN_DRV_3BIT, + 81, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(82, "PCA13", 82, + 62, UNIPHIER_PIN_DRV_3BIT, + 82, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(83, "PCA14", 83, + 63, UNIPHIER_PIN_DRV_3BIT, + 83, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(84, "PC0READY", 84, + 0, UNIPHIER_PIN_DRV_1BIT, + 84, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(85, "PC0CD1", 85, + 1, UNIPHIER_PIN_DRV_1BIT, + 85, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(86, "PC0CD2", 86, + 2, UNIPHIER_PIN_DRV_1BIT, + 86, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(87, "PC0WAIT", 87, + 3, UNIPHIER_PIN_DRV_1BIT, + 87, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(88, "PC0RESET", 88, + 4, UNIPHIER_PIN_DRV_1BIT, + 88, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(89, "PC0CE1", 89, + 5, UNIPHIER_PIN_DRV_1BIT, + 89, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(90, "PC0WE", 90, + 6, UNIPHIER_PIN_DRV_1BIT, + 90, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(91, "PC0OE", 91, + 7, UNIPHIER_PIN_DRV_1BIT, + 91, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(92, "PC0IOWR", 92, + 8, UNIPHIER_PIN_DRV_1BIT, + 92, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(93, "PC0IORD", 93, + 9, UNIPHIER_PIN_DRV_1BIT, + 93, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(94, "PCD00", 94, + 10, UNIPHIER_PIN_DRV_1BIT, + 94, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(95, "PCD01", 95, + 11, UNIPHIER_PIN_DRV_1BIT, + 95, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(96, "PCD02", 96, + 12, UNIPHIER_PIN_DRV_1BIT, + 96, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(97, "PCD03", 97, + 13, UNIPHIER_PIN_DRV_1BIT, + 97, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(98, "PCD04", 98, + 14, UNIPHIER_PIN_DRV_1BIT, + 98, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(99, "PCD05", 99, + 15, UNIPHIER_PIN_DRV_1BIT, + 99, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(100, "PCD06", 100, + 16, UNIPHIER_PIN_DRV_1BIT, + 100, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(101, "PCD07", 101, + 17, UNIPHIER_PIN_DRV_1BIT, + 101, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(102, "HS0BCLKIN", 102, + 18, UNIPHIER_PIN_DRV_1BIT, + 102, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(103, "HS0SYNCIN", 103, + 19, UNIPHIER_PIN_DRV_1BIT, + 103, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(104, "HS0VALIN", 104, + 20, UNIPHIER_PIN_DRV_1BIT, + 104, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(105, "HS0DIN0", 105, + 21, UNIPHIER_PIN_DRV_1BIT, + 105, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(106, "HS0DIN1", 106, + 22, UNIPHIER_PIN_DRV_1BIT, + 106, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(107, "HS0DIN2", 107, + 23, UNIPHIER_PIN_DRV_1BIT, + 107, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(108, "HS0DIN3", 108, + 24, UNIPHIER_PIN_DRV_1BIT, + 108, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(109, "HS0DIN4", 109, + 25, UNIPHIER_PIN_DRV_1BIT, + 109, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(110, "HS0DIN5", 110, + 26, UNIPHIER_PIN_DRV_1BIT, + 110, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(111, "HS0DIN6", 111, + 27, UNIPHIER_PIN_DRV_1BIT, + 111, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(112, "HS0DIN7", 112, + 28, UNIPHIER_PIN_DRV_1BIT, + 112, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(113, "HS0BCLKOUT", 113, + 64, UNIPHIER_PIN_DRV_3BIT, + 113, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(114, "HS0SYNCOUT", 114, + 65, UNIPHIER_PIN_DRV_3BIT, + 114, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(115, "HS0VALOUT", 115, + 66, UNIPHIER_PIN_DRV_3BIT, + 115, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(116, "HS0DOUT0", 116, + 67, UNIPHIER_PIN_DRV_3BIT, + 116, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(117, "HS0DOUT1", 117, + 68, UNIPHIER_PIN_DRV_3BIT, + 117, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(118, "HS0DOUT2", 118, + 69, UNIPHIER_PIN_DRV_3BIT, + 118, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(119, "HS0DOUT3", 119, + 70, UNIPHIER_PIN_DRV_3BIT, + 119, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(120, "HS0DOUT4", 120, + 71, UNIPHIER_PIN_DRV_3BIT, + 120, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(121, "HS0DOUT5", 121, + 72, UNIPHIER_PIN_DRV_3BIT, + 121, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(122, "HS0DOUT6", 122, + 73, UNIPHIER_PIN_DRV_3BIT, + 122, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(123, "HS0DOUT7", 123, + 74, UNIPHIER_PIN_DRV_3BIT, + 123, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(124, "HS1BCLKIN", 124, + 75, UNIPHIER_PIN_DRV_3BIT, + 124, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(125, "HS1SYNCIN", 125, + 76, UNIPHIER_PIN_DRV_3BIT, + 125, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(126, "HS1VALIN", 126, + 77, UNIPHIER_PIN_DRV_3BIT, + 126, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(127, "HS1DIN0", 127, + 78, UNIPHIER_PIN_DRV_3BIT, + 127, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(128, "HS1DIN1", 128, + 79, UNIPHIER_PIN_DRV_3BIT, + 128, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(129, "HS1DIN2", 129, + 80, UNIPHIER_PIN_DRV_3BIT, + 129, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(130, "HS1DIN3", 130, + 81, UNIPHIER_PIN_DRV_3BIT, + 130, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(131, "HS1DIN4", 131, + 82, UNIPHIER_PIN_DRV_3BIT, + 131, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(132, "HS1DIN5", 132, + 83, UNIPHIER_PIN_DRV_3BIT, + 132, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(133, "HS1DIN6", 133, + 84, UNIPHIER_PIN_DRV_3BIT, + 133, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(134, "HS1DIN7", 134, + 85, UNIPHIER_PIN_DRV_3BIT, + 134, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(135, "AO1IEC", 135, + 86, UNIPHIER_PIN_DRV_3BIT, + 135, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(136, "AO1ARC", 136, + 87, UNIPHIER_PIN_DRV_3BIT, + 136, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(137, "AO1DACCK", 137, + 88, UNIPHIER_PIN_DRV_3BIT, + 137, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(138, "AO1BCK", 138, + 89, UNIPHIER_PIN_DRV_3BIT, + 138, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(139, "AO1LRCK", 139, + 90, UNIPHIER_PIN_DRV_3BIT, + 139, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, + 91, UNIPHIER_PIN_DRV_3BIT, + 140, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141, + 92, UNIPHIER_PIN_DRV_3BIT, + 141, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142, + 93, UNIPHIER_PIN_DRV_3BIT, + 142, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(143, "HTPDN0", 143, + 94, UNIPHIER_PIN_DRV_3BIT, + 143, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(144, "LOCKN0", 144, + 95, UNIPHIER_PIN_DRV_3BIT, + 144, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(145, "HTPDN1", 145, + 96, UNIPHIER_PIN_DRV_3BIT, + 145, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(146, "LOCKN1", 146, + 97, UNIPHIER_PIN_DRV_3BIT, + 146, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, + 98, UNIPHIER_PIN_DRV_3BIT, + 147, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(148, "LR_GOUT", 148, + 99, UNIPHIER_PIN_DRV_3BIT, + 148, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(149, "XIRQ0", 149, + 100, UNIPHIER_PIN_DRV_3BIT, + 149, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(150, "XIRQ1", 150, + 101, UNIPHIER_PIN_DRV_3BIT, + 150, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(151, "XIRQ2", 151, + 102, UNIPHIER_PIN_DRV_3BIT, + 151, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(152, "XIRQ3", 152, + 103, UNIPHIER_PIN_DRV_3BIT, + 152, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(153, "XIRQ4", 153, + 104, UNIPHIER_PIN_DRV_3BIT, + 153, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(154, "XIRQ5", 154, + 105, UNIPHIER_PIN_DRV_3BIT, + 154, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(155, "XIRQ6", 155, + 106, UNIPHIER_PIN_DRV_3BIT, + 155, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(156, "XIRQ7", 156, + 107, UNIPHIER_PIN_DRV_3BIT, + 156, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(157, "XIRQ8", 157, + 108, UNIPHIER_PIN_DRV_3BIT, + 157, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(158, "XIRQ9", 158, + 109, UNIPHIER_PIN_DRV_3BIT, + 158, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(159, "XIRQ10", 159, + 110, UNIPHIER_PIN_DRV_3BIT, + 159, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(160, "XIRQ11", 160, + 111, UNIPHIER_PIN_DRV_3BIT, + 160, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(161, "XIRQ13", 161, + 112, UNIPHIER_PIN_DRV_3BIT, + 161, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(162, "XIRQ14", 162, + 113, UNIPHIER_PIN_DRV_3BIT, + 162, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(163, "XIRQ16", 163, + 114, UNIPHIER_PIN_DRV_3BIT, + 163, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(164, "XIRQ17", 164, + 115, UNIPHIER_PIN_DRV_3BIT, + 164, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(165, "XIRQ18", 165, + 116, UNIPHIER_PIN_DRV_3BIT, + 165, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(166, "XIRQ19", 166, + 117, UNIPHIER_PIN_DRV_3BIT, + 166, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(167, "XIRQ20", 167, + 118, UNIPHIER_PIN_DRV_3BIT, + 167, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(168, "PORT00", 168, + 119, UNIPHIER_PIN_DRV_3BIT, + 168, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(169, "PORT01", 169, + 120, UNIPHIER_PIN_DRV_3BIT, + 169, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(170, "PORT02", 170, + 121, UNIPHIER_PIN_DRV_3BIT, + 170, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(171, "PORT03", 171, + 122, UNIPHIER_PIN_DRV_3BIT, + 171, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(172, "PORT04", 172, + 123, UNIPHIER_PIN_DRV_3BIT, + 172, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(173, "CK27FO", 173, + 124, UNIPHIER_PIN_DRV_3BIT, + 173, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(174, "PHSYNCO", 174, + 125, UNIPHIER_PIN_DRV_3BIT, + 174, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(175, "PVSYNCO", 175, + 126, UNIPHIER_PIN_DRV_3BIT, + 175, UNIPHIER_PIN_PULL_DOWN), +}; + +static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25}; +static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29}; +static const int emmc_dat8_muxvals[] = {0, 0, 0, 0}; +static const unsigned ether_rgmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45}; +static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; +static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, + 41, 42, 45}; +static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; +static const unsigned i2c0_pins[] = {63, 64}; +static const int i2c0_muxvals[] = {0, 0}; +static const unsigned i2c1_pins[] = {65, 66}; +static const int i2c1_muxvals[] = {0, 0}; +static const unsigned i2c3_pins[] = {67, 68}; +static const int i2c3_muxvals[] = {1, 1}; +static const unsigned i2c4_pins[] = {61, 62}; +static const int i2c4_muxvals[] = {1, 1}; +static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned sd_pins[] = {10, 11, 12, 13, 14, 15, 16, 17}; +static const int sd_muxvals[] = {3, 3, 3, 3, 3, 3, 3, 3}; /* No SDVOLC */ +static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17}; +static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2}; +static const unsigned system_bus_cs1_pins[] = {0}; +static const int system_bus_cs1_muxvals[] = {0}; +static const unsigned uart0_pins[] = {54, 55}; +static const int uart0_muxvals[] = {0, 0}; +static const unsigned uart1_pins[] = {58, 59}; +static const int uart1_muxvals[] = {1, 1}; +static const unsigned uart2_pins[] = {90, 91}; +static const int uart2_muxvals[] = {1, 1}; +static const unsigned uart3_pins[] = {94, 95}; +static const int uart3_muxvals[] = {1, 1}; +static const unsigned usb0_pins[] = {46, 47}; +static const int usb0_muxvals[] = {0, 0}; +static const unsigned usb1_pins[] = {48, 49}; +static const int usb1_muxvals[] = {0, 0}; +static const unsigned usb2_pins[] = {50, 51}; +static const int usb2_muxvals[] = {0, 0}; +static const unsigned usb3_pins[] = {52, 53}; +static const int usb3_muxvals[] = {0, 0}; +static const unsigned port_range_pins[] = { + 168, 169, 170, 171, 172, 173, 174, 175, /* PORT0x */ + 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */ + 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */ + 16, 17, 18, 30, 31, 32, 33, 34, /* PORT3x */ + 35, 36, 37, 38, 39, 40, 41, 42, /* PORT4x */ + 43, 44, 45, 46, 47, 48, 49, 50, /* PORT5x */ + 51, 52, 53, 54, 55, 56, 57, 58, /* PORT6x */ + 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */ + 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */ + 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */ + 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ + 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ + 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ + 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ + 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ + 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ + 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ + 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */ + 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ + 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */ + 163, 164, 165, 166, 167, /* PORT25x */ +}; +static const int port_range_muxvals[] = { + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ + -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */ + 15, 15, 15, 15, 15, /* PORT25x */ +}; +static const unsigned xirq_pins[] = { + 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */ + 157, 158, 159, 160, 85, 161, 162, 84, /* XIRQ8-15 */ + 163, 164, 165, 166, 167, 146, 52, 53, /* XIRQ16-23 */ +}; +static const int xirq_muxvals[] = { + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ + 14, 14, 14, 14, 13, 14, 14, 13, /* XIRQ8-15 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ +}; +static const unsigned xirq_alternatives_pins[] = { + 94, 95, 96, 97, 98, 99, 100, 101, /* XIRQ0-7 */ + 102, 103, 104, 105, 106, 107, /* XIRQ8-11,13,14 */ + 108, 109, 110, 111, 112, 147, 141, 142, /* XIRQ16-23 */ +}; +static const int xirq_alternatives_muxvals[] = { + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ + 14, 14, 14, 14, 14, 14, /* XIRQ8-11,13,14 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ +}; + +static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = { + UNIPHIER_PINCTRL_GROUP(emmc), + UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_rgmii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), + UNIPHIER_PINCTRL_GROUP(i2c0), + UNIPHIER_PINCTRL_GROUP(i2c1), + UNIPHIER_PINCTRL_GROUP(i2c3), + UNIPHIER_PINCTRL_GROUP(i2c4), + UNIPHIER_PINCTRL_GROUP(nand), + UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(uart0), + UNIPHIER_PINCTRL_GROUP(uart1), + UNIPHIER_PINCTRL_GROUP(uart2), + UNIPHIER_PINCTRL_GROUP(uart3), + UNIPHIER_PINCTRL_GROUP(usb0), + UNIPHIER_PINCTRL_GROUP(usb1), + UNIPHIER_PINCTRL_GROUP(usb2), + UNIPHIER_PINCTRL_GROUP(usb3), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives), + UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27), + UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28), + UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29), + UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30), + UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31), + UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32), + UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33), + UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34), + UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35), + UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36), + UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37), + UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38), + UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39), + UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40), + UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41), + UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42), + UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43), + UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44), + UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45), + UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46), + UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47), + UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48), + UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49), + UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50), + UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51), + UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52), + UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53), + UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54), + UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55), + UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56), + UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57), + UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58), + UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59), + UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60), + UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61), + UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62), + UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63), + UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64), + UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65), + UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66), + UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67), + UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68), + UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69), + UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70), + UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71), + UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72), + UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73), + UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74), + UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75), + UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76), + UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77), + UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78), + UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79), + UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80), + UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81), + UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82), + UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83), + UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84), + UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85), + UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86), + UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87), + UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96), + UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97), + UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98), + UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99), + UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100), + UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101), + UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102), + UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103), + UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104), + UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105), + UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106), + UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107), + UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108), + UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109), + UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110), + UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111), + UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112), + UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113), + UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114), + UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115), + UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116), + UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117), + UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118), + UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119), + UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144), + UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145), + UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146), + UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147), + UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148), + UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149), + UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150), + UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151), + UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160), + UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161), + UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162), + UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163), + UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164), + UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165), + UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166), + UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167), + UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168), + UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169), + UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170), + UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171), + UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172), + UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173), + UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174), + UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175), + UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176), + UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177), + UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178), + UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179), + UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180), + UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181), + UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182), + UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183), + UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184), + UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185), + UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186), + UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187), + UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188), + UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189), + UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190), + UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191), + UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192), + UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193), + UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194), + UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195), + UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196), + UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197), + UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198), + UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199), + UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200), + UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201), + UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202), + UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203), + UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0b, xirq_alternatives, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1b, xirq_alternatives, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2b, xirq_alternatives, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5b, xirq_alternatives, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6b, xirq_alternatives, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7b, xirq_alternatives, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8b, xirq_alternatives, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9b, xirq_alternatives, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10b, xirq_alternatives, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11b, xirq_alternatives, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13b, xirq_alternatives, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21b, xirq_alternatives, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22b, xirq_alternatives, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23b, xirq_alternatives, 21), +}; + +static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_rgmii_groups[] = {"ether_rgmii"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; +static const char * const i2c0_groups[] = {"i2c0"}; +static const char * const i2c1_groups[] = {"i2c1"}; +static const char * const i2c3_groups[] = {"i2c3"}; +static const char * const i2c4_groups[] = {"i2c4"}; +static const char * const nand_groups[] = {"nand"}; +static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs1"}; +static const char * const uart0_groups[] = {"uart0"}; +static const char * const uart1_groups[] = {"uart1"}; +static const char * const uart2_groups[] = {"uart2"}; +static const char * const uart3_groups[] = {"uart3"}; +static const char * const usb0_groups[] = {"usb0"}; +static const char * const usb1_groups[] = {"usb1"}; +static const char * const usb2_groups[] = {"usb2"}; +static const char * const usb3_groups[] = {"usb3"}; +static const char * const port_groups[] = { + "port00", "port01", "port02", "port03", + "port04", "port05", "port06", "port07", + "port10", "port11", "port12", "port13", + "port14", "port15", "port16", "port17", + "port20", "port21", "port22", "port23", + "port24", "port25", "port26", "port27", + "port30", "port31", "port32", "port33", + "port34", "port35", "port36", "port37", + "port40", "port41", "port42", "port43", + "port44", "port45", "port46", "port47", + "port50", "port51", "port52", "port53", + "port54", "port55", "port56", "port57", + "port60", "port61", "port62", "port63", + "port64", "port65", "port66", "port67", + "port70", "port71", "port72", "port73", + "port74", "port75", "port76", "port77", + "port80", "port81", "port82", "port83", + "port84", "port85", "port86", "port87", + "port90", "port91", "port92", "port93", + "port94", "port95", "port96", "port97", + "port100", "port101", "port102", "port103", + "port104", "port105", "port106", "port107", + /* port110-117 missing */ + "port120", "port121", "port122", "port123", + "port124", "port125", "port126", "port127", + "port130", "port131", "port132", "port133", + "port134", "port135", "port136", "port137", + "port140", "port141", "port142", "port143", + "port144", "port145", "port146", "port147", + /* port150-177 missing */ + "port180", "port181", "port182", "port183", + "port184", "port185", "port186", "port187", + /* port190-197 missing */ + "port200", "port201", "port202", "port203", + "port204", "port205", "port206", "port207", + "port210", "port211", "port212", "port213", + "port214", "port215", "port216", "port217", + "port220", "port221", "port222", "port223", + "port224", "port225", "port226", "port227", + "port230", "port231", "port232", "port233", + "port234", "port235", "port236", "port237", + "port240", "port241", "port242", "port243", + "port244", "port245", "port246", "port247", + "port250", "port251", "port252", "port253", + "port254", +}; +static const char * const xirq_groups[] = { + "xirq0", "xirq1", "xirq2", "xirq3", + "xirq4", "xirq5", "xirq6", "xirq7", + "xirq8", "xirq9", "xirq10", "xirq11", + "xirq12", "xirq13", "xirq14", "xirq15", + "xirq16", "xirq17", "xirq18", "xirq19", + "xirq20", "xirq21", "xirq22", "xirq23", + "xirq0b", "xirq1b", "xirq2b", "xirq3b", + "xirq4b", "xirq5b", "xirq6b", "xirq7b", + "xirq8b", "xirq9b", "xirq10b", "xirq11b", + /* none */ "xirq13b", "xirq14b", /* none */ + "xirq16b", "xirq17b", "xirq18b", "xirq19b", + "xirq20b", "xirq21b", "xirq22b", "xirq23b", +}; + +static const struct uniphier_pinmux_function uniphier_ld20_functions[] = { + UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_rgmii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), + UNIPHIER_PINMUX_FUNCTION(i2c0), + UNIPHIER_PINMUX_FUNCTION(i2c1), + UNIPHIER_PINMUX_FUNCTION(i2c3), + UNIPHIER_PINMUX_FUNCTION(i2c4), + UNIPHIER_PINMUX_FUNCTION(nand), + UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), + UNIPHIER_PINMUX_FUNCTION(uart0), + UNIPHIER_PINMUX_FUNCTION(uart1), + UNIPHIER_PINMUX_FUNCTION(uart2), + UNIPHIER_PINMUX_FUNCTION(uart3), + UNIPHIER_PINMUX_FUNCTION(usb0), + UNIPHIER_PINMUX_FUNCTION(usb1), + UNIPHIER_PINMUX_FUNCTION(usb2), + UNIPHIER_PINMUX_FUNCTION(usb3), + UNIPHIER_PINMUX_FUNCTION(port), + UNIPHIER_PINMUX_FUNCTION(xirq), +}; + +static struct uniphier_pinctrl_socdata uniphier_ld20_pindata = { + .pins = uniphier_ld20_pins, + .npins = ARRAY_SIZE(uniphier_ld20_pins), + .groups = uniphier_ld20_groups, + .groups_count = ARRAY_SIZE(uniphier_ld20_groups), + .functions = uniphier_ld20_functions, + .functions_count = ARRAY_SIZE(uniphier_ld20_functions), + .caps = UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL, +}; + +static int uniphier_ld20_pinctrl_probe(struct platform_device *pdev) +{ + return uniphier_pinctrl_probe(pdev, &uniphier_ld20_pindata); +} + +static const struct of_device_id uniphier_ld20_pinctrl_match[] = { + { .compatible = "socionext,uniphier-ld20-pinctrl" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_ld20_pinctrl_match); + +static struct platform_driver uniphier_ld20_pinctrl_driver = { + .probe = uniphier_ld20_pinctrl_probe, + .driver = { + .name = "uniphier-ld20-pinctrl", + .of_match_table = uniphier_ld20_pinctrl_match, + }, +}; +module_platform_driver(uniphier_ld20_pinctrl_driver); + +MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); +MODULE_DESCRIPTION("UniPhier PH1-LD20 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c index 4a0439c80aa0..3edfb6f9d6df 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c @@ -19,544 +19,592 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "ph1-ld4-pinctrl" - -static const struct pinctrl_pin_desc ph1_ld4_pins[] = { +static const struct pinctrl_pin_desc uniphier_ld4_pins[] = { UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_4_8, + 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_4_8, + 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_4_8, + 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_4_8, + 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_4_8, + 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_4_8, + 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_4_8, + 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "EA8", 0, - 15, UNIPHIER_PIN_DRV_4_8, + 15, UNIPHIER_PIN_DRV_1BIT, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "EA9", 0, - 16, UNIPHIER_PIN_DRV_4_8, + 16, UNIPHIER_PIN_DRV_1BIT, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "EA10", 0, - 17, UNIPHIER_PIN_DRV_4_8, + 17, UNIPHIER_PIN_DRV_1BIT, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "EA11", 0, - 18, UNIPHIER_PIN_DRV_4_8, + 18, UNIPHIER_PIN_DRV_1BIT, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "EA12", 0, - 19, UNIPHIER_PIN_DRV_4_8, + 19, UNIPHIER_PIN_DRV_1BIT, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "EA13", 0, - 20, UNIPHIER_PIN_DRV_4_8, + 20, UNIPHIER_PIN_DRV_1BIT, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(13, "EA14", 0, - 21, UNIPHIER_PIN_DRV_4_8, + 21, UNIPHIER_PIN_DRV_1BIT, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "EA15", 0, - 22, UNIPHIER_PIN_DRV_4_8, + 22, UNIPHIER_PIN_DRV_1BIT, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE, - 23, UNIPHIER_PIN_DRV_4_8, + 23, UNIPHIER_PIN_DRV_1BIT, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, - 24, UNIPHIER_PIN_DRV_4_8, + 24, UNIPHIER_PIN_DRV_1BIT, 24, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, - 25, UNIPHIER_PIN_DRV_4_8, + 25, UNIPHIER_PIN_DRV_1BIT, 25, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE, - 27, UNIPHIER_PIN_DRV_4_8, + 27, UNIPHIER_PIN_DRV_1BIT, 27, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE, - 28, UNIPHIER_PIN_DRV_4_8, + 28, UNIPHIER_PIN_DRV_1BIT, 28, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE, - 29, UNIPHIER_PIN_DRV_4_8, + 29, UNIPHIER_PIN_DRV_1BIT, 29, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_8_12_16_20, + 0, UNIPHIER_PIN_DRV_2BIT, 146, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_8_12_16_20, + 1, UNIPHIER_PIN_DRV_2BIT, 147, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_8_12_16_20, + 2, UNIPHIER_PIN_DRV_2BIT, 148, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_8_12_16_20, + 3, UNIPHIER_PIN_DRV_2BIT, 149, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_8_12_16_20, + 4, UNIPHIER_PIN_DRV_2BIT, 150, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_8_12_16_20, + 5, UNIPHIER_PIN_DRV_2BIT, 151, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE, - 24, UNIPHIER_PIN_DRV_8_12_16_20, + 6, UNIPHIER_PIN_DRV_2BIT, 152, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE, - 28, UNIPHIER_PIN_DRV_8_12_16_20, + 7, UNIPHIER_PIN_DRV_2BIT, 153, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_8_12_16_20, + 8, UNIPHIER_PIN_DRV_2BIT, 154, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_8_12_16_20, + 9, UNIPHIER_PIN_DRV_2BIT, 155, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6, - 39, UNIPHIER_PIN_DRV_4_8, + 39, UNIPHIER_PIN_DRV_1BIT, 39, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6, - 40, UNIPHIER_PIN_DRV_4_8, + 40, UNIPHIER_PIN_DRV_1BIT, 40, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6, - 41, UNIPHIER_PIN_DRV_4_8, + 41, UNIPHIER_PIN_DRV_1BIT, 41, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6, - 42, UNIPHIER_PIN_DRV_4_8, + 42, UNIPHIER_PIN_DRV_1BIT, 42, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6, - 43, UNIPHIER_PIN_DRV_4_8, + 43, UNIPHIER_PIN_DRV_1BIT, 43, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6, - 44, UNIPHIER_PIN_DRV_4_8, + 44, UNIPHIER_PIN_DRV_1BIT, 44, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6, - 45, UNIPHIER_PIN_DRV_4_8, + 45, UNIPHIER_PIN_DRV_1BIT, 45, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6, - 46, UNIPHIER_PIN_DRV_4_8, + 46, UNIPHIER_PIN_DRV_1BIT, 46, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(40, "MDC", 6, - 47, UNIPHIER_PIN_DRV_4_8, + 47, UNIPHIER_PIN_DRV_1BIT, 47, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(41, "MDIO", 6, - 48, UNIPHIER_PIN_DRV_4_8, + 48, UNIPHIER_PIN_DRV_1BIT, 48, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6, - 49, UNIPHIER_PIN_DRV_4_8, + 49, UNIPHIER_PIN_DRV_1BIT, 49, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6, - 50, UNIPHIER_PIN_DRV_4_8, + 50, UNIPHIER_PIN_DRV_1BIT, 50, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, - 40, UNIPHIER_PIN_DRV_8_12_16_20, + 10, UNIPHIER_PIN_DRV_2BIT, 156, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, - 44, UNIPHIER_PIN_DRV_8_12_16_20, + 11, UNIPHIER_PIN_DRV_2BIT, 157, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, - 48, UNIPHIER_PIN_DRV_8_12_16_20, + 12, UNIPHIER_PIN_DRV_2BIT, 158, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, - 52, UNIPHIER_PIN_DRV_8_12_16_20, + 13, UNIPHIER_PIN_DRV_2BIT, 159, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, - 56, UNIPHIER_PIN_DRV_8_12_16_20, + 14, UNIPHIER_PIN_DRV_2BIT, 160, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, - 60, UNIPHIER_PIN_DRV_8_12_16_20, + 15, UNIPHIER_PIN_DRV_2BIT, 161, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE, - 51, UNIPHIER_PIN_DRV_4_8, + 51, UNIPHIER_PIN_DRV_1BIT, 51, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE, - 52, UNIPHIER_PIN_DRV_4_8, + 52, UNIPHIER_PIN_DRV_1BIT, 52, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, - 53, UNIPHIER_PIN_DRV_4_8, + 53, UNIPHIER_PIN_DRV_1BIT, 53, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0, - 54, UNIPHIER_PIN_DRV_4_8, + 54, UNIPHIER_PIN_DRV_1BIT, 54, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0, - 55, UNIPHIER_PIN_DRV_4_8, + 55, UNIPHIER_PIN_DRV_1BIT, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0, - 56, UNIPHIER_PIN_DRV_4_8, + 56, UNIPHIER_PIN_DRV_1BIT, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0, - 57, UNIPHIER_PIN_DRV_4_8, + 57, UNIPHIER_PIN_DRV_1BIT, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0, - 58, UNIPHIER_PIN_DRV_4_8, + 58, UNIPHIER_PIN_DRV_1BIT, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "PCREG", 0, - 59, UNIPHIER_PIN_DRV_4_8, + 59, UNIPHIER_PIN_DRV_1BIT, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0, - 60, UNIPHIER_PIN_DRV_4_8, + 60, UNIPHIER_PIN_DRV_1BIT, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0, - 61, UNIPHIER_PIN_DRV_4_8, + 61, UNIPHIER_PIN_DRV_1BIT, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0, - 62, UNIPHIER_PIN_DRV_4_8, + 62, UNIPHIER_PIN_DRV_1BIT, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0, - 63, UNIPHIER_PIN_DRV_4_8, + 63, UNIPHIER_PIN_DRV_1BIT, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0, - 64, UNIPHIER_PIN_DRV_4_8, + 64, UNIPHIER_PIN_DRV_1BIT, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0, - 65, UNIPHIER_PIN_DRV_4_8, + 65, UNIPHIER_PIN_DRV_1BIT, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0, - 66, UNIPHIER_PIN_DRV_4_8, + 66, UNIPHIER_PIN_DRV_1BIT, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "PCWE", 0, - 67, UNIPHIER_PIN_DRV_4_8, + 67, UNIPHIER_PIN_DRV_1BIT, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "PCOE", 0, - 68, UNIPHIER_PIN_DRV_4_8, + 68, UNIPHIER_PIN_DRV_1BIT, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0, - 69, UNIPHIER_PIN_DRV_4_8, + 69, UNIPHIER_PIN_DRV_1BIT, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0, - 70, UNIPHIER_PIN_DRV_4_8, + 70, UNIPHIER_PIN_DRV_1BIT, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0, - 71, UNIPHIER_PIN_DRV_4_8, + 71, UNIPHIER_PIN_DRV_1BIT, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0, - 72, UNIPHIER_PIN_DRV_4_8, + 72, UNIPHIER_PIN_DRV_1BIT, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0, - 73, UNIPHIER_PIN_DRV_4_8, + 73, UNIPHIER_PIN_DRV_1BIT, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0, - 74, UNIPHIER_PIN_DRV_4_8, + 74, UNIPHIER_PIN_DRV_1BIT, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0, - 75, UNIPHIER_PIN_DRV_4_8, + 75, UNIPHIER_PIN_DRV_1BIT, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0, - 76, UNIPHIER_PIN_DRV_4_8, + 76, UNIPHIER_PIN_DRV_1BIT, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0, - 77, UNIPHIER_PIN_DRV_4_8, + 77, UNIPHIER_PIN_DRV_1BIT, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0, - 78, UNIPHIER_PIN_DRV_4_8, + 78, UNIPHIER_PIN_DRV_1BIT, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0, - 79, UNIPHIER_PIN_DRV_4_8, + 79, UNIPHIER_PIN_DRV_1BIT, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0, - 80, UNIPHIER_PIN_DRV_4_8, + 80, UNIPHIER_PIN_DRV_1BIT, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0, - 81, UNIPHIER_PIN_DRV_4_8, + 81, UNIPHIER_PIN_DRV_1BIT, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0, - 82, UNIPHIER_PIN_DRV_4_8, + 82, UNIPHIER_PIN_DRV_1BIT, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0, - 83, UNIPHIER_PIN_DRV_4_8, + 83, UNIPHIER_PIN_DRV_1BIT, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0, - 84, UNIPHIER_PIN_DRV_4_8, + 84, UNIPHIER_PIN_DRV_1BIT, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0, - 85, UNIPHIER_PIN_DRV_4_8, + 85, UNIPHIER_PIN_DRV_1BIT, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0, - 86, UNIPHIER_PIN_DRV_4_8, + 86, UNIPHIER_PIN_DRV_1BIT, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0, - 87, UNIPHIER_PIN_DRV_4_8, + 87, UNIPHIER_PIN_DRV_1BIT, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0, - 88, UNIPHIER_PIN_DRV_4_8, + 88, UNIPHIER_PIN_DRV_1BIT, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0, - 89, UNIPHIER_PIN_DRV_4_8, + 89, UNIPHIER_PIN_DRV_1BIT, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0, - 90, UNIPHIER_PIN_DRV_4_8, + 90, UNIPHIER_PIN_DRV_1BIT, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0, - 91, UNIPHIER_PIN_DRV_4_8, + 91, UNIPHIER_PIN_DRV_1BIT, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0, - 92, UNIPHIER_PIN_DRV_4_8, + 92, UNIPHIER_PIN_DRV_1BIT, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0, - 93, UNIPHIER_PIN_DRV_4_8, + 93, UNIPHIER_PIN_DRV_1BIT, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "AGCI", 3, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 162, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "AGCR", 4, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 163, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 164, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0, - 94, UNIPHIER_PIN_DRV_4_8, + 94, UNIPHIER_PIN_DRV_1BIT, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0, - 95, UNIPHIER_PIN_DRV_4_8, + 95, UNIPHIER_PIN_DRV_1BIT, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, - 96, UNIPHIER_PIN_DRV_4_8, + 96, UNIPHIER_PIN_DRV_1BIT, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE, - 97, UNIPHIER_PIN_DRV_4_8, + 97, UNIPHIER_PIN_DRV_1BIT, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE, - 98, UNIPHIER_PIN_DRV_4_8, + 98, UNIPHIER_PIN_DRV_1BIT, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, - 99, UNIPHIER_PIN_DRV_4_8, + 99, UNIPHIER_PIN_DRV_1BIT, 99, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, - 100, UNIPHIER_PIN_DRV_4_8, + 100, UNIPHIER_PIN_DRV_1BIT, 100, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, - 101, UNIPHIER_PIN_DRV_4_8, + 101, UNIPHIER_PIN_DRV_1BIT, 101, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(112, "HIN", 1, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(113, "VIN", 2, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE, - 102, UNIPHIER_PIN_DRV_4_8, + 102, UNIPHIER_PIN_DRV_1BIT, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE, - 103, UNIPHIER_PIN_DRV_4_8, + 103, UNIPHIER_PIN_DRV_1BIT, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE, - 104, UNIPHIER_PIN_DRV_4_8, + 104, UNIPHIER_PIN_DRV_1BIT, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE, - 105, UNIPHIER_PIN_DRV_4_8, + 105, UNIPHIER_PIN_DRV_1BIT, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE, - 106, UNIPHIER_PIN_DRV_4_8, + 106, UNIPHIER_PIN_DRV_1BIT, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE, - 107, UNIPHIER_PIN_DRV_4_8, + 107, UNIPHIER_PIN_DRV_1BIT, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(120, "TCON6", 0, - 108, UNIPHIER_PIN_DRV_4_8, + 108, UNIPHIER_PIN_DRV_1BIT, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "TCON7", 0, - 109, UNIPHIER_PIN_DRV_4_8, + 109, UNIPHIER_PIN_DRV_1BIT, 109, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "PWMA", 0, - 110, UNIPHIER_PIN_DRV_4_8, + 110, UNIPHIER_PIN_DRV_1BIT, 110, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0, - 111, UNIPHIER_PIN_DRV_4_8, + 111, UNIPHIER_PIN_DRV_1BIT, 111, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0, - 112, UNIPHIER_PIN_DRV_4_8, + 112, UNIPHIER_PIN_DRV_1BIT, 112, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0, - 113, UNIPHIER_PIN_DRV_4_8, + 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0, - 114, UNIPHIER_PIN_DRV_4_8, + 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0, - 115, UNIPHIER_PIN_DRV_4_8, + 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0, - 116, UNIPHIER_PIN_DRV_4_8, + 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0, - 117, UNIPHIER_PIN_DRV_4_8, + 117, UNIPHIER_PIN_DRV_1BIT, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0, - 118, UNIPHIER_PIN_DRV_4_8, + 118, UNIPHIER_PIN_DRV_1BIT, 118, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0, - 119, UNIPHIER_PIN_DRV_4_8, + 119, UNIPHIER_PIN_DRV_1BIT, 119, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0, - 120, UNIPHIER_PIN_DRV_4_8, + 120, UNIPHIER_PIN_DRV_1BIT, 120, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0, - 121, UNIPHIER_PIN_DRV_4_8, + 121, UNIPHIER_PIN_DRV_1BIT, 121, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0, - 122, UNIPHIER_PIN_DRV_4_8, + 122, UNIPHIER_PIN_DRV_1BIT, 122, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "PORT00", 0, - 123, UNIPHIER_PIN_DRV_4_8, + 123, UNIPHIER_PIN_DRV_1BIT, 123, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(136, "PORT01", 0, - 124, UNIPHIER_PIN_DRV_4_8, + 124, UNIPHIER_PIN_DRV_1BIT, 124, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(137, "PORT02", 0, - 125, UNIPHIER_PIN_DRV_4_8, + 125, UNIPHIER_PIN_DRV_1BIT, 125, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(138, "PORT03", 0, - 126, UNIPHIER_PIN_DRV_4_8, + 126, UNIPHIER_PIN_DRV_1BIT, 126, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(139, "PORT04", 0, - 127, UNIPHIER_PIN_DRV_4_8, + 127, UNIPHIER_PIN_DRV_1BIT, 127, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(140, "PORT05", 0, - 128, UNIPHIER_PIN_DRV_4_8, + 128, UNIPHIER_PIN_DRV_1BIT, 128, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(141, "PORT06", 0, - 129, UNIPHIER_PIN_DRV_4_8, + 129, UNIPHIER_PIN_DRV_1BIT, 129, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "PORT07", 0, - 130, UNIPHIER_PIN_DRV_4_8, + 130, UNIPHIER_PIN_DRV_1BIT, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(143, "PORT10", 0, - 131, UNIPHIER_PIN_DRV_4_8, + 131, UNIPHIER_PIN_DRV_1BIT, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(144, "PORT11", 0, - 132, UNIPHIER_PIN_DRV_4_8, + 132, UNIPHIER_PIN_DRV_1BIT, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(145, "PORT12", 0, - 133, UNIPHIER_PIN_DRV_4_8, + 133, UNIPHIER_PIN_DRV_1BIT, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(146, "PORT13", 0, - 134, UNIPHIER_PIN_DRV_4_8, + 134, UNIPHIER_PIN_DRV_1BIT, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "PORT14", 0, - 135, UNIPHIER_PIN_DRV_4_8, + 135, UNIPHIER_PIN_DRV_1BIT, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(148, "PORT15", 0, - 136, UNIPHIER_PIN_DRV_4_8, + 136, UNIPHIER_PIN_DRV_1BIT, 136, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(149, "PORT16", 0, - 137, UNIPHIER_PIN_DRV_4_8, + 137, UNIPHIER_PIN_DRV_1BIT, 137, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE, - 138, UNIPHIER_PIN_DRV_4_8, + 138, UNIPHIER_PIN_DRV_1BIT, 138, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(151, "PORT20", 0, - 139, UNIPHIER_PIN_DRV_4_8, + 139, UNIPHIER_PIN_DRV_1BIT, 139, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(152, "PORT21", 0, - 140, UNIPHIER_PIN_DRV_4_8, + 140, UNIPHIER_PIN_DRV_1BIT, 140, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(153, "PORT22", 0, - 141, UNIPHIER_PIN_DRV_4_8, + 141, UNIPHIER_PIN_DRV_1BIT, 141, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(154, "PORT23", 0, - 142, UNIPHIER_PIN_DRV_4_8, + 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE, - 143, UNIPHIER_PIN_DRV_4_8, + 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(156, "PORT25", 0, - 144, UNIPHIER_PIN_DRV_4_8, + 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(157, "PORT26", 0, - 145, UNIPHIER_PIN_DRV_4_8, + 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_UP), + /* dedicated pins */ + UNIPHIER_PINCTRL_PIN(165, "ED0", -1, + 0, UNIPHIER_PIN_DRV_1BIT, + 0, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(166, "ED1", -1, + 1, UNIPHIER_PIN_DRV_1BIT, + 1, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(167, "ED2", -1, + 2, UNIPHIER_PIN_DRV_1BIT, + 2, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(168, "ED3", -1, + 3, UNIPHIER_PIN_DRV_1BIT, + 3, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(169, "ED4", -1, + 4, UNIPHIER_PIN_DRV_1BIT, + 4, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(170, "ED5", -1, + 5, UNIPHIER_PIN_DRV_1BIT, + 5, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(171, "ED6", -1, + 6, UNIPHIER_PIN_DRV_1BIT, + 6, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(172, "ED7", -1, + 7, UNIPHIER_PIN_DRV_1BIT, + 7, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(173, "ERXW", -1, + 26, UNIPHIER_PIN_DRV_1BIT, + 26, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(174, "XECS1", -1, + 30, UNIPHIER_PIN_DRV_1BIT, + 30, UNIPHIER_PIN_PULL_UP), }; static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27}; -static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1}; +static const int emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31}; -static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const int emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const unsigned ether_mii_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 136, 137, 138, 139, 140, + 141, 142}; +static const int ether_mii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 4, 4, 4, 4, 4}; +static const unsigned ether_rmii_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43}; +static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned i2c0_pins[] = {102, 103}; -static const unsigned i2c0_muxvals[] = {0, 0}; +static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {104, 105}; -static const unsigned i2c1_muxvals[] = {0, 0}; +static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {108, 109}; -static const unsigned i2c2_muxvals[] = {2, 2}; +static const int i2c2_muxvals[] = {2, 2}; static const unsigned i2c3_pins[] = {108, 109}; -static const unsigned i2c3_muxvals[] = {3, 3}; +static const int i2c3_muxvals[] = {3, 3}; static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159, 160, 161, 162, 163, 164}; -static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {22, 23}; -static const unsigned nand_cs1_muxvals[] = {0, 0}; +static const int nand_cs1_muxvals[] = {0, 0}; static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52}; -static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {16, 17, 18, 19, 20, 165, 166, 167, + 168, 169, 170, 171, 172, 173}; +static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, + -1, -1, -1}; +static const unsigned system_bus_cs0_pins[] = {155}; +static const int system_bus_cs0_muxvals[] = {1}; +static const unsigned system_bus_cs1_pins[] = {174}; +static const int system_bus_cs1_muxvals[] = {-1}; +static const unsigned system_bus_cs2_pins[] = {64}; +static const int system_bus_cs2_muxvals[] = {1}; +static const unsigned system_bus_cs3_pins[] = {156}; +static const int system_bus_cs3_muxvals[] = {1}; static const unsigned uart0_pins[] = {85, 88}; -static const unsigned uart0_muxvals[] = {1, 1}; +static const int uart0_muxvals[] = {1, 1}; static const unsigned uart1_pins[] = {155, 156}; -static const unsigned uart1_muxvals[] = {13, 13}; +static const int uart1_muxvals[] = {13, 13}; static const unsigned uart1b_pins[] = {69, 70}; -static const unsigned uart1b_muxvals[] = {23, 23}; +static const int uart1b_muxvals[] = {23, 23}; static const unsigned uart2_pins[] = {128, 129}; -static const unsigned uart2_muxvals[] = {13, 13}; +static const int uart2_muxvals[] = {13, 13}; static const unsigned uart3_pins[] = {110, 111}; -static const unsigned uart3_muxvals[] = {1, 1}; +static const int uart3_muxvals[] = {1, 1}; static const unsigned usb0_pins[] = {53, 54}; -static const unsigned usb0_muxvals[] = {0, 0}; +static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {55, 56}; -static const unsigned usb1_muxvals[] = {0, 0}; +static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {155, 156}; -static const unsigned usb2_muxvals[] = {4, 4}; +static const int usb2_muxvals[] = {4, 4}; static const unsigned usb2b_pins[] = {67, 68}; -static const unsigned usb2b_muxvals[] = {23, 23}; +static const int usb2b_muxvals[] = {23, 23}; static const unsigned port_range0_pins[] = { 135, 136, 137, 138, 139, 140, 141, 142, /* PORT0x */ 143, 144, 145, 146, 147, 148, 149, 150, /* PORT1x */ @@ -574,7 +622,7 @@ static const unsigned port_range0_pins[] = { 98, 99, 100, 6, 101, 114, 115, 116, /* PORT13x */ 103, 108, 21, 22, 23, 117, 118, 119, /* PORT14x */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 0, 0, 0, 0, 0, 0, 0, 0, /* PORT0x */ 0, 0, 0, 0, 0, 0, 0, 0, /* PORT1x */ 0, 0, 0, 0, 0, 0, 0, 15, /* PORT2x */ @@ -594,27 +642,29 @@ static const unsigned port_range0_muxvals[] = { static const unsigned port_range1_pins[] = { 7, /* PORT166 */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 15, /* PORT166 */ }; static const unsigned xirq_range0_pins[] = { 151, 123, 124, 125, 126, 127, 128, 129, /* XIRQ0-7 */ 130, 131, 132, 133, 62, /* XIRQ8-12 */ }; -static const unsigned xirq_range0_muxvals[] = { +static const int xirq_range0_muxvals[] = { 14, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */ 0, 0, 0, 0, 14, /* XIRQ8-12 */ }; static const unsigned xirq_range1_pins[] = { 134, 63, /* XIRQ14-15 */ }; -static const unsigned xirq_range1_muxvals[] = { +static const int xirq_range1_muxvals[] = { 0, 14, /* XIRQ14-15 */ }; -static const struct uniphier_pinctrl_group ph1_ld4_groups[] = { +static const struct uniphier_pinctrl_group uniphier_ld4_groups[] = { UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_mii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), @@ -622,6 +672,11 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = { UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs0), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(system_bus_cs2), + UNIPHIER_PINCTRL_GROUP(system_bus_cs3), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart1), UNIPHIER_PINCTRL_GROUP(uart1b), @@ -774,12 +829,19 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = { }; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_mii_groups[] = {"ether_mii"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; static const char * const i2c3_groups[] = {"i2c3"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs0", + "system_bus_cs1", + "system_bus_cs2", + "system_bus_cs3"}; static const char * const uart0_groups[] = {"uart0"}; static const char * const uart1_groups[] = {"uart1", "uart1b"}; static const char * const uart2_groups[] = {"uart2"}; @@ -828,14 +890,17 @@ static const char * const xirq_groups[] = { "xirq12", /* none*/ "xirq14", "xirq15", }; -static const struct uniphier_pinmux_function ph1_ld4_functions[] = { +static const struct uniphier_pinmux_function uniphier_ld4_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_mii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), UNIPHIER_PINMUX_FUNCTION(i2c3), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -847,43 +912,36 @@ static const struct uniphier_pinmux_function ph1_ld4_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata ph1_ld4_pindata = { - .groups = ph1_ld4_groups, - .groups_count = ARRAY_SIZE(ph1_ld4_groups), - .functions = ph1_ld4_functions, - .functions_count = ARRAY_SIZE(ph1_ld4_functions), - .mux_bits = 8, - .reg_stride = 4, - .load_pinctrl = false, -}; - -static struct pinctrl_desc ph1_ld4_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = ph1_ld4_pins, - .npins = ARRAY_SIZE(ph1_ld4_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_ld4_pindata = { + .pins = uniphier_ld4_pins, + .npins = ARRAY_SIZE(uniphier_ld4_pins), + .groups = uniphier_ld4_groups, + .groups_count = ARRAY_SIZE(uniphier_ld4_groups), + .functions = uniphier_ld4_functions, + .functions_count = ARRAY_SIZE(uniphier_ld4_functions), + .caps = 0, }; -static int ph1_ld4_pinctrl_probe(struct platform_device *pdev) +static int uniphier_ld4_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc, - &ph1_ld4_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_ld4_pindata); } -static const struct of_device_id ph1_ld4_pinctrl_match[] = { +static const struct of_device_id uniphier_ld4_pinctrl_match[] = { + { .compatible = "socionext,uniphier-ld4-pinctrl" }, { .compatible = "socionext,ph1-ld4-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_ld4_pinctrl_match); -static struct platform_driver ph1_ld4_pinctrl_driver = { - .probe = ph1_ld4_pinctrl_probe, +static struct platform_driver uniphier_ld4_pinctrl_driver = { + .probe = uniphier_ld4_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = ph1_ld4_pinctrl_match, + .name = "uniphier-ld4-pinctrl", + .of_match_table = uniphier_ld4_pinctrl_match, }, }; -module_platform_driver(ph1_ld4_pinctrl_driver); +module_platform_driver(uniphier_ld4_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c index 150d33928df2..708e5100cf34 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c @@ -19,713 +19,711 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "ph1-ld6b-pinctrl" - -static const struct pinctrl_pin_desc ph1_ld6b_pins[] = { +static const struct pinctrl_pin_desc uniphier_ld6b_pins[] = { UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_4_8, + 0, UNIPHIER_PIN_DRV_1BIT, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE, - 1, UNIPHIER_PIN_DRV_4_8, + 1, UNIPHIER_PIN_DRV_1BIT, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE, - 2, UNIPHIER_PIN_DRV_4_8, + 2, UNIPHIER_PIN_DRV_1BIT, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE, - 3, UNIPHIER_PIN_DRV_4_8, + 3, UNIPHIER_PIN_DRV_1BIT, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_4_8, + 4, UNIPHIER_PIN_DRV_1BIT, 4, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE, - 5, UNIPHIER_PIN_DRV_4_8, + 5, UNIPHIER_PIN_DRV_1BIT, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE, - 6, UNIPHIER_PIN_DRV_4_8, + 6, UNIPHIER_PIN_DRV_1BIT, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE, - 7, UNIPHIER_PIN_DRV_4_8, + 7, UNIPHIER_PIN_DRV_1BIT, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_4_8, + 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_4_8, + 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_4_8, + 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_4_8, + 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_4_8, + 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_4_8, + 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_4_8, + 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 24, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 25, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 26, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 27, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, - 30, UNIPHIER_PIN_DRV_4_8, + 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE, - 39, UNIPHIER_PIN_DRV_4_8, + 39, UNIPHIER_PIN_DRV_1BIT, 39, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE, - 40, UNIPHIER_PIN_DRV_4_8, + 40, UNIPHIER_PIN_DRV_1BIT, 40, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE, - 41, UNIPHIER_PIN_DRV_4_8, + 41, UNIPHIER_PIN_DRV_1BIT, 41, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE, - 42, UNIPHIER_PIN_DRV_4_8, + 42, UNIPHIER_PIN_DRV_1BIT, 42, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE, - 43, UNIPHIER_PIN_DRV_4_8, + 43, UNIPHIER_PIN_DRV_1BIT, 43, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE, - 44, UNIPHIER_PIN_DRV_4_8, + 44, UNIPHIER_PIN_DRV_1BIT, 44, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE, - 45, UNIPHIER_PIN_DRV_4_8, + 45, UNIPHIER_PIN_DRV_1BIT, 45, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE, - 46, UNIPHIER_PIN_DRV_4_8, + 46, UNIPHIER_PIN_DRV_1BIT, 46, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_8_12_16_20, + 0, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_8_12_16_20, + 1, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_8_12_16_20, + 2, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_8_12_16_20, + 3, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_8_12_16_20, + 4, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_8_12_16_20, + 5, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 53, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 54, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 99, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 100, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 109, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 110, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 111, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 112, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(113, "SBO0", 0, - 113, UNIPHIER_PIN_DRV_4_8, + 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(114, "SBI0", 0, - 114, UNIPHIER_PIN_DRV_4_8, + 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "TXD1", 0, - 115, UNIPHIER_PIN_DRV_4_8, + 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "RXD1", 0, - 116, UNIPHIER_PIN_DRV_4_8, + 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 118, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 119, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 120, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 121, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 122, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 123, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 124, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 125, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 126, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 127, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 128, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 129, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 136, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 137, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 138, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 139, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 140, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 141, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE, - 142, UNIPHIER_PIN_DRV_4_8, + 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(143, "MDC", 0, - 143, UNIPHIER_PIN_DRV_4_8, + 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(144, "MDIO", 0, - 144, UNIPHIER_PIN_DRV_4_8, + 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0, - 145, UNIPHIER_PIN_DRV_4_8, + 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0, - 146, UNIPHIER_PIN_DRV_4_8, + 146, UNIPHIER_PIN_DRV_1BIT, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0, - 147, UNIPHIER_PIN_DRV_4_8, + 147, UNIPHIER_PIN_DRV_1BIT, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0, - 148, UNIPHIER_PIN_DRV_4_8, + 148, UNIPHIER_PIN_DRV_1BIT, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0, - 149, UNIPHIER_PIN_DRV_4_8, + 149, UNIPHIER_PIN_DRV_1BIT, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0, - 150, UNIPHIER_PIN_DRV_4_8, + 150, UNIPHIER_PIN_DRV_1BIT, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0, - 151, UNIPHIER_PIN_DRV_4_8, + 151, UNIPHIER_PIN_DRV_1BIT, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0, - 152, UNIPHIER_PIN_DRV_4_8, + 152, UNIPHIER_PIN_DRV_1BIT, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0, - 153, UNIPHIER_PIN_DRV_4_8, + 153, UNIPHIER_PIN_DRV_1BIT, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0, - 154, UNIPHIER_PIN_DRV_4_8, + 154, UNIPHIER_PIN_DRV_1BIT, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0, - 155, UNIPHIER_PIN_DRV_4_8, + 155, UNIPHIER_PIN_DRV_1BIT, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0, - 156, UNIPHIER_PIN_DRV_4_8, + 156, UNIPHIER_PIN_DRV_1BIT, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0, - 157, UNIPHIER_PIN_DRV_4_8, + 157, UNIPHIER_PIN_DRV_1BIT, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0, - 158, UNIPHIER_PIN_DRV_4_8, + 158, UNIPHIER_PIN_DRV_1BIT, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 162, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 163, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 164, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 167, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 169, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 171, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 172, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 174, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0, - 187, UNIPHIER_PIN_DRV_4_8, + 187, UNIPHIER_PIN_DRV_1BIT, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0, - 188, UNIPHIER_PIN_DRV_4_8, + 188, UNIPHIER_PIN_DRV_1BIT, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0, - 189, UNIPHIER_PIN_DRV_4_8, + 189, UNIPHIER_PIN_DRV_1BIT, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0, - 190, UNIPHIER_PIN_DRV_4_8, + 190, UNIPHIER_PIN_DRV_1BIT, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0, - 191, UNIPHIER_PIN_DRV_4_8, + 191, UNIPHIER_PIN_DRV_1BIT, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0, - 192, UNIPHIER_PIN_DRV_4_8, + 192, UNIPHIER_PIN_DRV_1BIT, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0, - 193, UNIPHIER_PIN_DRV_4_8, + 193, UNIPHIER_PIN_DRV_1BIT, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0, - 194, UNIPHIER_PIN_DRV_4_8, + 194, UNIPHIER_PIN_DRV_1BIT, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0, - 195, UNIPHIER_PIN_DRV_4_8, + 195, UNIPHIER_PIN_DRV_1BIT, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0, - 196, UNIPHIER_PIN_DRV_4_8, + 196, UNIPHIER_PIN_DRV_1BIT, 196, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0, - 197, UNIPHIER_PIN_DRV_4_8, + 197, UNIPHIER_PIN_DRV_1BIT, 197, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0, - 198, UNIPHIER_PIN_DRV_4_8, + 198, UNIPHIER_PIN_DRV_1BIT, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE, - 199, UNIPHIER_PIN_DRV_4_8, + 199, UNIPHIER_PIN_DRV_1BIT, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE, - 200, UNIPHIER_PIN_DRV_4_8, + 200, UNIPHIER_PIN_DRV_1BIT, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE, - 201, UNIPHIER_PIN_DRV_4_8, + 201, UNIPHIER_PIN_DRV_1BIT, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE, - 202, UNIPHIER_PIN_DRV_4_8, + 202, UNIPHIER_PIN_DRV_1BIT, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0, - 207, UNIPHIER_PIN_DRV_4_8, + 207, UNIPHIER_PIN_DRV_1BIT, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0, - 208, UNIPHIER_PIN_DRV_4_8, + 208, UNIPHIER_PIN_DRV_1BIT, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0, - 209, UNIPHIER_PIN_DRV_4_8, + 209, UNIPHIER_PIN_DRV_1BIT, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE, - 210, UNIPHIER_PIN_DRV_4_8, + 210, UNIPHIER_PIN_DRV_1BIT, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE, - 211, UNIPHIER_PIN_DRV_4_8, + 211, UNIPHIER_PIN_DRV_1BIT, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE, - 212, UNIPHIER_PIN_DRV_4_8, + 212, UNIPHIER_PIN_DRV_1BIT, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE, - 215, UNIPHIER_PIN_DRV_4_8, + 215, UNIPHIER_PIN_DRV_1BIT, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0, - 216, UNIPHIER_PIN_DRV_4_8, + 216, UNIPHIER_PIN_DRV_1BIT, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0, - 217, UNIPHIER_PIN_DRV_4_8, + 217, UNIPHIER_PIN_DRV_1BIT, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0, - 218, UNIPHIER_PIN_DRV_4_8, + 218, UNIPHIER_PIN_DRV_1BIT, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0, - 219, UNIPHIER_PIN_DRV_4_8, + 219, UNIPHIER_PIN_DRV_1BIT, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0, - 220, UNIPHIER_PIN_DRV_4_8, + 220, UNIPHIER_PIN_DRV_1BIT, 220, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0, - 221, UNIPHIER_PIN_DRV_4_8, + 221, UNIPHIER_PIN_DRV_1BIT, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0, - 222, UNIPHIER_PIN_DRV_4_8, + 222, UNIPHIER_PIN_DRV_1BIT, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0, - 223, UNIPHIER_PIN_DRV_4_8, + 223, UNIPHIER_PIN_DRV_1BIT, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0, - 224, UNIPHIER_PIN_DRV_4_8, + 224, UNIPHIER_PIN_DRV_1BIT, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0, - 225, UNIPHIER_PIN_DRV_4_8, + 225, UNIPHIER_PIN_DRV_1BIT, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0, - 226, UNIPHIER_PIN_DRV_4_8, + 226, UNIPHIER_PIN_DRV_1BIT, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0, - 227, UNIPHIER_PIN_DRV_4_8, + 227, UNIPHIER_PIN_DRV_1BIT, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0, - 228, UNIPHIER_PIN_DRV_4_8, + 228, UNIPHIER_PIN_DRV_1BIT, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0, - 229, UNIPHIER_PIN_DRV_4_8, + 229, UNIPHIER_PIN_DRV_1BIT, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0, - 230, UNIPHIER_PIN_DRV_4_8, + 230, UNIPHIER_PIN_DRV_1BIT, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0, - 231, UNIPHIER_PIN_DRV_4_8, + 231, UNIPHIER_PIN_DRV_1BIT, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0, - 232, UNIPHIER_PIN_DRV_4_8, + 232, UNIPHIER_PIN_DRV_1BIT, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0, - 233, UNIPHIER_PIN_DRV_4_8, + 233, UNIPHIER_PIN_DRV_1BIT, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0, - 234, UNIPHIER_PIN_DRV_4_8, + 234, UNIPHIER_PIN_DRV_1BIT, 234, UNIPHIER_PIN_PULL_DOWN), }; @@ -737,52 +735,73 @@ static const unsigned adinter_pins[] = { 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, }; -static const unsigned adinter_muxvals[] = { +static const int adinter_muxvals[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42}; -static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; +static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46}; -static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const int emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const unsigned ether_rgmii_pins[] = {143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, + 157, 158}; +static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; +static const unsigned ether_rmii_pins[] = {143, 144, 145, 146, 147, 148, 149, + 150, 152, 154, 155, 158}; +static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}; static const unsigned i2c0_pins[] = {109, 110}; -static const unsigned i2c0_muxvals[] = {0, 0}; +static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {111, 112}; -static const unsigned i2c1_muxvals[] = {0, 0}; +static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {115, 116}; -static const unsigned i2c2_muxvals[] = {1, 1}; +static const int i2c2_muxvals[] = {1, 1}; static const unsigned i2c3_pins[] = {118, 119}; -static const unsigned i2c3_muxvals[] = {1, 1}; +static const int i2c3_muxvals[] = {1, 1}; static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41, 42, 43, 44, 45, 46}; -static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {37, 38}; -static const unsigned nand_cs1_muxvals[] = {0, 0}; +static const int nand_cs1_muxvals[] = {0, 0}; static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55}; -static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13}; +static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0}; +static const unsigned system_bus_cs1_pins[] = {14}; +static const int system_bus_cs1_muxvals[] = {0}; +static const unsigned system_bus_cs2_pins[] = {37}; +static const int system_bus_cs2_muxvals[] = {6}; +static const unsigned system_bus_cs3_pins[] = {38}; +static const int system_bus_cs3_muxvals[] = {6}; +static const unsigned system_bus_cs4_pins[] = {115}; +static const int system_bus_cs4_muxvals[] = {6}; +static const unsigned system_bus_cs5_pins[] = {55}; +static const int system_bus_cs5_muxvals[] = {6}; static const unsigned uart0_pins[] = {135, 136}; -static const unsigned uart0_muxvals[] = {3, 3}; +static const int uart0_muxvals[] = {3, 3}; static const unsigned uart0b_pins[] = {11, 12}; -static const unsigned uart0b_muxvals[] = {2, 2}; +static const int uart0b_muxvals[] = {2, 2}; static const unsigned uart1_pins[] = {115, 116}; -static const unsigned uart1_muxvals[] = {0, 0}; +static const int uart1_muxvals[] = {0, 0}; static const unsigned uart1b_pins[] = {113, 114}; -static const unsigned uart1b_muxvals[] = {1, 1}; +static const int uart1b_muxvals[] = {1, 1}; static const unsigned uart2_pins[] = {113, 114}; -static const unsigned uart2_muxvals[] = {2, 2}; +static const int uart2_muxvals[] = {2, 2}; static const unsigned uart2b_pins[] = {86, 87}; -static const unsigned uart2b_muxvals[] = {1, 1}; +static const int uart2b_muxvals[] = {1, 1}; static const unsigned usb0_pins[] = {56, 57}; -static const unsigned usb0_muxvals[] = {0, 0}; +static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {58, 59}; -static const unsigned usb1_muxvals[] = {0, 0}; +static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {60, 61}; -static const unsigned usb2_muxvals[] = {0, 0}; +static const int usb2_muxvals[] = {0, 0}; static const unsigned usb3_pins[] = {62, 63}; -static const unsigned usb3_muxvals[] = {0, 0}; +static const int usb3_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */ @@ -796,7 +815,7 @@ static const unsigned port_range0_pins[] = { 61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */ 69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ @@ -828,7 +847,7 @@ static const unsigned port_range1_pins[] = { 218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */ 227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ @@ -852,16 +871,18 @@ static const unsigned xirq_pins[] = { 126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */ 74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */ }; -static const unsigned xirq_muxvals[] = { +static const int xirq_muxvals[] = { 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ }; -static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = { +static const struct uniphier_pinctrl_group uniphier_ld6b_groups[] = { UNIPHIER_PINCTRL_GROUP(adinter), UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_rgmii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), @@ -869,6 +890,12 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = { UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(system_bus_cs2), + UNIPHIER_PINCTRL_GROUP(system_bus_cs3), + UNIPHIER_PINCTRL_GROUP(system_bus_cs4), + UNIPHIER_PINCTRL_GROUP(system_bus_cs5), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart0b), UNIPHIER_PINCTRL_GROUP(uart1), @@ -1134,12 +1161,20 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = { static const char * const adinter_groups[] = {"adinter"}; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_rgmii_groups[] = {"ether_rgmii"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; static const char * const i2c3_groups[] = {"i2c3"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs1", + "system_bus_cs2", + "system_bus_cs3", + "system_bus_cs4", + "system_bus_cs5"}; static const char * const uart0_groups[] = {"uart0", "uart0b"}; static const char * const uart1_groups[] = {"uart1", "uart1b"}; static const char * const uart2_groups[] = {"uart2", "uart2b"}; @@ -1215,15 +1250,18 @@ static const char * const xirq_groups[] = { "xirq20", "xirq21", "xirq22", "xirq23", }; -static const struct uniphier_pinmux_function ph1_ld6b_functions[] = { +static const struct uniphier_pinmux_function uniphier_ld6b_functions[] = { UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */ UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_rgmii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), UNIPHIER_PINMUX_FUNCTION(i2c3), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -1235,43 +1273,36 @@ static const struct uniphier_pinmux_function ph1_ld6b_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = { - .groups = ph1_ld6b_groups, - .groups_count = ARRAY_SIZE(ph1_ld6b_groups), - .functions = ph1_ld6b_functions, - .functions_count = ARRAY_SIZE(ph1_ld6b_functions), - .mux_bits = 8, - .reg_stride = 4, - .load_pinctrl = false, -}; - -static struct pinctrl_desc ph1_ld6b_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = ph1_ld6b_pins, - .npins = ARRAY_SIZE(ph1_ld6b_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_ld6b_pindata = { + .pins = uniphier_ld6b_pins, + .npins = ARRAY_SIZE(uniphier_ld6b_pins), + .groups = uniphier_ld6b_groups, + .groups_count = ARRAY_SIZE(uniphier_ld6b_groups), + .functions = uniphier_ld6b_functions, + .functions_count = ARRAY_SIZE(uniphier_ld6b_functions), + .caps = 0, }; -static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev) +static int uniphier_ld6b_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc, - &ph1_ld6b_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_ld6b_pindata); } -static const struct of_device_id ph1_ld6b_pinctrl_match[] = { +static const struct of_device_id uniphier_ld6b_pinctrl_match[] = { + { .compatible = "socionext,uniphier-ld6b-pinctrl" }, { .compatible = "socionext,ph1-ld6b-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_ld6b_pinctrl_match); -static struct platform_driver ph1_ld6b_pinctrl_driver = { - .probe = ph1_ld6b_pinctrl_probe, +static struct platform_driver uniphier_ld6b_pinctrl_driver = { + .probe = uniphier_ld6b_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = ph1_ld6b_pinctrl_match, + .name = "uniphier-ld6b-pinctrl", + .of_match_table = uniphier_ld6b_pinctrl_match, }, }; -module_platform_driver(ph1_ld6b_pinctrl_driver); +module_platform_driver(uniphier_ld6b_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c index b1f09e68f90e..c306e844f584 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c @@ -19,1039 +19,1072 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "ph1-pro4-pinctrl" - -static const struct pinctrl_pin_desc ph1_pro4_pins[] = { +static const struct pinctrl_pin_desc uniphier_pro4_pins[] = { UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_4_8, + 0, UNIPHIER_PIN_DRV_1BIT, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE, - 1, UNIPHIER_PIN_DRV_4_8, + 1, UNIPHIER_PIN_DRV_1BIT, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE, - 2, UNIPHIER_PIN_DRV_4_8, + 2, UNIPHIER_PIN_DRV_1BIT, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE, - 3, UNIPHIER_PIN_DRV_4_8, + 3, UNIPHIER_PIN_DRV_1BIT, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_4_8, + 4, UNIPHIER_PIN_DRV_1BIT, 4, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE, - 5, UNIPHIER_PIN_DRV_4_8, + 5, UNIPHIER_PIN_DRV_1BIT, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE, - 6, UNIPHIER_PIN_DRV_4_8, + 6, UNIPHIER_PIN_DRV_1BIT, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE, - 7, UNIPHIER_PIN_DRV_4_8, + 7, UNIPHIER_PIN_DRV_1BIT, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_4_8, + 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_4_8, + 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_4_8, + 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_4_8, + 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_4_8, + 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_4_8, + 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_4_8, + 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE, - 15, UNIPHIER_PIN_DRV_4_8, + 15, UNIPHIER_PIN_DRV_1BIT, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_4_8, + 16, UNIPHIER_PIN_DRV_1BIT, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE, - 17, UNIPHIER_PIN_DRV_4_8, + 17, UNIPHIER_PIN_DRV_1BIT, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE, - 18, UNIPHIER_PIN_DRV_4_8, + 18, UNIPHIER_PIN_DRV_1BIT, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE, - 19, UNIPHIER_PIN_DRV_4_8, + 19, UNIPHIER_PIN_DRV_1BIT, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_4_8, + 20, UNIPHIER_PIN_DRV_1BIT, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE, - 21, UNIPHIER_PIN_DRV_4_8, + 21, UNIPHIER_PIN_DRV_1BIT, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE, - 22, UNIPHIER_PIN_DRV_4_8, + 22, UNIPHIER_PIN_DRV_1BIT, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE, - 23, UNIPHIER_PIN_DRV_4_8, + 23, UNIPHIER_PIN_DRV_1BIT, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE, - 24, UNIPHIER_PIN_DRV_4_8, + 24, UNIPHIER_PIN_DRV_1BIT, 24, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE, - 25, UNIPHIER_PIN_DRV_4_8, + 25, UNIPHIER_PIN_DRV_1BIT, 25, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, - 26, UNIPHIER_PIN_DRV_4_8, + 26, UNIPHIER_PIN_DRV_1BIT, 26, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, - 27, UNIPHIER_PIN_DRV_4_8, + 27, UNIPHIER_PIN_DRV_1BIT, 27, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE, - 28, UNIPHIER_PIN_DRV_4_8, + 28, UNIPHIER_PIN_DRV_1BIT, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE, - 29, UNIPHIER_PIN_DRV_4_8, + 29, UNIPHIER_PIN_DRV_1BIT, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE, - 30, UNIPHIER_PIN_DRV_4_8, + 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE, - 39, UNIPHIER_PIN_DRV_NONE, + -1, UNIPHIER_PIN_DRV_NONE, 39, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE, - 2, UNIPHIER_PIN_DRV_8_12_16_20, + 2, UNIPHIER_PIN_DRV_2BIT, 40, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE, - 3, UNIPHIER_PIN_DRV_8_12_16_20, + 3, UNIPHIER_PIN_DRV_2BIT, 41, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_8_12_16_20, + 4, UNIPHIER_PIN_DRV_2BIT, 42, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE, - 5, UNIPHIER_PIN_DRV_8_12_16_20, + 5, UNIPHIER_PIN_DRV_2BIT, 43, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE, - 6, UNIPHIER_PIN_DRV_8_12_16_20, + 6, UNIPHIER_PIN_DRV_2BIT, 44, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE, - 7, UNIPHIER_PIN_DRV_8_12_16_20, + 7, UNIPHIER_PIN_DRV_2BIT, 45, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_8_12_16_20, + 8, UNIPHIER_PIN_DRV_2BIT, 46, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_8_12_16_20, + 9, UNIPHIER_PIN_DRV_2BIT, 47, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE, - 48, UNIPHIER_PIN_DRV_4_8, + 48, UNIPHIER_PIN_DRV_1BIT, 48, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, - 49, UNIPHIER_PIN_DRV_4_8, + 49, UNIPHIER_PIN_DRV_1BIT, 49, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, - 50, UNIPHIER_PIN_DRV_4_8, + 50, UNIPHIER_PIN_DRV_1BIT, 50, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_8_12_16_20, + 0, UNIPHIER_PIN_DRV_2BIT, 51, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, - 52, UNIPHIER_PIN_DRV_4_8, + 52, UNIPHIER_PIN_DRV_1BIT, 52, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, - 1, UNIPHIER_PIN_DRV_8_12_16_20, + 1, UNIPHIER_PIN_DRV_2BIT, 53, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE, - 54, UNIPHIER_PIN_DRV_4_8, + 54, UNIPHIER_PIN_DRV_1BIT, 54, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_NONE, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE, - 57, UNIPHIER_PIN_DRV_4_8, + 57, UNIPHIER_PIN_DRV_1BIT, 57, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE, - 58, UNIPHIER_PIN_DRV_4_8, + 58, UNIPHIER_PIN_DRV_1BIT, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE, - 59, UNIPHIER_PIN_DRV_4_8, + 59, UNIPHIER_PIN_DRV_1BIT, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 60, UNIPHIER_PIN_DRV_4_8, + 60, UNIPHIER_PIN_DRV_1BIT, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE, - 61, UNIPHIER_PIN_DRV_4_8, + 61, UNIPHIER_PIN_DRV_1BIT, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE, - 62, UNIPHIER_PIN_DRV_4_8, + 62, UNIPHIER_PIN_DRV_1BIT, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE, - 63, UNIPHIER_PIN_DRV_4_8, + 63, UNIPHIER_PIN_DRV_1BIT, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 64, UNIPHIER_PIN_DRV_4_8, + 64, UNIPHIER_PIN_DRV_1BIT, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE, - 65, UNIPHIER_PIN_DRV_4_8, + 65, UNIPHIER_PIN_DRV_1BIT, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE, - 66, UNIPHIER_PIN_DRV_4_8, + 66, UNIPHIER_PIN_DRV_1BIT, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE, - 67, UNIPHIER_PIN_DRV_4_8, + 67, UNIPHIER_PIN_DRV_1BIT, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 68, UNIPHIER_PIN_DRV_4_8, + 68, UNIPHIER_PIN_DRV_1BIT, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE, - 69, UNIPHIER_PIN_DRV_4_8, + 69, UNIPHIER_PIN_DRV_1BIT, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE, - 70, UNIPHIER_PIN_DRV_4_8, + 70, UNIPHIER_PIN_DRV_1BIT, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE, - 71, UNIPHIER_PIN_DRV_4_8, + 71, UNIPHIER_PIN_DRV_1BIT, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 72, UNIPHIER_PIN_DRV_4_8, + 72, UNIPHIER_PIN_DRV_1BIT, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE, - 73, UNIPHIER_PIN_DRV_4_8, + 73, UNIPHIER_PIN_DRV_1BIT, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE, - 74, UNIPHIER_PIN_DRV_4_8, + 74, UNIPHIER_PIN_DRV_1BIT, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE, - 75, UNIPHIER_PIN_DRV_4_8, + 75, UNIPHIER_PIN_DRV_1BIT, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 76, UNIPHIER_PIN_DRV_4_8, + 76, UNIPHIER_PIN_DRV_1BIT, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE, - 77, UNIPHIER_PIN_DRV_4_8, + 77, UNIPHIER_PIN_DRV_1BIT, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE, - 78, UNIPHIER_PIN_DRV_4_8, + 78, UNIPHIER_PIN_DRV_1BIT, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE, - 79, UNIPHIER_PIN_DRV_4_8, + 79, UNIPHIER_PIN_DRV_1BIT, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 80, UNIPHIER_PIN_DRV_4_8, + 80, UNIPHIER_PIN_DRV_1BIT, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE, - 81, UNIPHIER_PIN_DRV_4_8, + 81, UNIPHIER_PIN_DRV_1BIT, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE, - 82, UNIPHIER_PIN_DRV_4_8, + 82, UNIPHIER_PIN_DRV_1BIT, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE, - 83, UNIPHIER_PIN_DRV_4_8, + 83, UNIPHIER_PIN_DRV_1BIT, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE, - 84, UNIPHIER_PIN_DRV_4_8, + 84, UNIPHIER_PIN_DRV_1BIT, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE, - 85, UNIPHIER_PIN_DRV_4_8, + 85, UNIPHIER_PIN_DRV_1BIT, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE, - 86, UNIPHIER_PIN_DRV_4_8, + 86, UNIPHIER_PIN_DRV_1BIT, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE, - 87, UNIPHIER_PIN_DRV_4_8, + 87, UNIPHIER_PIN_DRV_1BIT, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE, - 88, UNIPHIER_PIN_DRV_4_8, + 88, UNIPHIER_PIN_DRV_1BIT, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE, - 89, UNIPHIER_PIN_DRV_4_8, + 89, UNIPHIER_PIN_DRV_1BIT, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE, - 90, UNIPHIER_PIN_DRV_4_8, + 90, UNIPHIER_PIN_DRV_1BIT, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE, - 91, UNIPHIER_PIN_DRV_4_8, + 91, UNIPHIER_PIN_DRV_1BIT, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE, - 92, UNIPHIER_PIN_DRV_4_8, + 92, UNIPHIER_PIN_DRV_1BIT, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE, - 93, UNIPHIER_PIN_DRV_4_8, + 93, UNIPHIER_PIN_DRV_1BIT, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE, - 94, UNIPHIER_PIN_DRV_4_8, + 94, UNIPHIER_PIN_DRV_1BIT, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE, - 95, UNIPHIER_PIN_DRV_4_8, + 95, UNIPHIER_PIN_DRV_1BIT, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE, - 96, UNIPHIER_PIN_DRV_4_8, + 96, UNIPHIER_PIN_DRV_1BIT, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE, - 97, UNIPHIER_PIN_DRV_4_8, + 97, UNIPHIER_PIN_DRV_1BIT, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE, - 98, UNIPHIER_PIN_DRV_4_8, + 98, UNIPHIER_PIN_DRV_1BIT, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE, - 99, UNIPHIER_PIN_DRV_4_8, + 99, UNIPHIER_PIN_DRV_1BIT, 99, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE, - 100, UNIPHIER_PIN_DRV_4_8, + 100, UNIPHIER_PIN_DRV_1BIT, 100, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE, - 101, UNIPHIER_PIN_DRV_4_8, + 101, UNIPHIER_PIN_DRV_1BIT, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE, - 102, UNIPHIER_PIN_DRV_4_8, + 102, UNIPHIER_PIN_DRV_1BIT, 102, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE, - 103, UNIPHIER_PIN_DRV_4_8, + 103, UNIPHIER_PIN_DRV_1BIT, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE, - 104, UNIPHIER_PIN_DRV_4_8, + 104, UNIPHIER_PIN_DRV_1BIT, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE, - 105, UNIPHIER_PIN_DRV_4_8, + 105, UNIPHIER_PIN_DRV_1BIT, 105, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE, - 106, UNIPHIER_PIN_DRV_4_8, + 106, UNIPHIER_PIN_DRV_1BIT, 106, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE, - 107, UNIPHIER_PIN_DRV_4_8, + 107, UNIPHIER_PIN_DRV_1BIT, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE, - 108, UNIPHIER_PIN_DRV_4_8, + 108, UNIPHIER_PIN_DRV_1BIT, 108, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE, - 109, UNIPHIER_PIN_DRV_4_8, + 109, UNIPHIER_PIN_DRV_1BIT, 109, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE, - 110, UNIPHIER_PIN_DRV_4_8, + 110, UNIPHIER_PIN_DRV_1BIT, 110, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE, - 111, UNIPHIER_PIN_DRV_4_8, + 111, UNIPHIER_PIN_DRV_1BIT, 111, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE, - 112, UNIPHIER_PIN_DRV_4_8, + 112, UNIPHIER_PIN_DRV_1BIT, 112, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE, - 113, UNIPHIER_PIN_DRV_4_8, + 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE, - 114, UNIPHIER_PIN_DRV_4_8, + 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE, - 115, UNIPHIER_PIN_DRV_4_8, + 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE, - 116, UNIPHIER_PIN_DRV_4_8, + 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE, - 117, UNIPHIER_PIN_DRV_4_8, + 117, UNIPHIER_PIN_DRV_1BIT, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE, - 118, UNIPHIER_PIN_DRV_4_8, + 118, UNIPHIER_PIN_DRV_1BIT, 118, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE, - 119, UNIPHIER_PIN_DRV_4_8, + 119, UNIPHIER_PIN_DRV_1BIT, 119, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE, - 120, UNIPHIER_PIN_DRV_4_8, + 120, UNIPHIER_PIN_DRV_1BIT, 120, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE, - 121, UNIPHIER_PIN_DRV_4_8, + 121, UNIPHIER_PIN_DRV_1BIT, 121, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE, - 122, UNIPHIER_PIN_DRV_4_8, + 122, UNIPHIER_PIN_DRV_1BIT, 122, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE, - 123, UNIPHIER_PIN_DRV_4_8, + 123, UNIPHIER_PIN_DRV_1BIT, 123, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE, - 124, UNIPHIER_PIN_DRV_4_8, + 124, UNIPHIER_PIN_DRV_1BIT, 124, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE, - 125, UNIPHIER_PIN_DRV_4_8, + 125, UNIPHIER_PIN_DRV_1BIT, 125, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE, - 126, UNIPHIER_PIN_DRV_4_8, + 126, UNIPHIER_PIN_DRV_1BIT, 126, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE, - 127, UNIPHIER_PIN_DRV_4_8, + 127, UNIPHIER_PIN_DRV_1BIT, 127, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE, - 128, UNIPHIER_PIN_DRV_4_8, + 128, UNIPHIER_PIN_DRV_1BIT, 128, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE, - 129, UNIPHIER_PIN_DRV_4_8, + 129, UNIPHIER_PIN_DRV_1BIT, 129, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE, - 130, UNIPHIER_PIN_DRV_4_8, + 130, UNIPHIER_PIN_DRV_1BIT, 130, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE, - 131, UNIPHIER_PIN_DRV_4_8, + 131, UNIPHIER_PIN_DRV_1BIT, 131, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE, - 132, UNIPHIER_PIN_DRV_4_8, + 132, UNIPHIER_PIN_DRV_1BIT, 132, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE, - 133, UNIPHIER_PIN_DRV_4_8, + 133, UNIPHIER_PIN_DRV_1BIT, 133, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE, - 134, UNIPHIER_PIN_DRV_4_8, + 134, UNIPHIER_PIN_DRV_1BIT, 134, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE, - 135, UNIPHIER_PIN_DRV_4_8, + 135, UNIPHIER_PIN_DRV_1BIT, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_8_12_16_20, + 12, UNIPHIER_PIN_DRV_2BIT, 136, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_8_12_16_20, + 13, UNIPHIER_PIN_DRV_2BIT, 137, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_8_12_16_20, + 14, UNIPHIER_PIN_DRV_2BIT, 138, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE, - 15, UNIPHIER_PIN_DRV_8_12_16_20, + 15, UNIPHIER_PIN_DRV_2BIT, 139, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_8_12_16_20, + 11, UNIPHIER_PIN_DRV_2BIT, 141, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_8_12_16_20, + 10, UNIPHIER_PIN_DRV_2BIT, 140, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE, - 142, UNIPHIER_PIN_DRV_4_8, + 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE, - 143, UNIPHIER_PIN_DRV_4_8, + 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE, - 144, UNIPHIER_PIN_DRV_4_8, + 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE, - 145, UNIPHIER_PIN_DRV_4_8, + 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6, - 146, UNIPHIER_PIN_DRV_4_8, + 146, UNIPHIER_PIN_DRV_1BIT, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6, - 147, UNIPHIER_PIN_DRV_4_8, + 147, UNIPHIER_PIN_DRV_1BIT, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6, - 148, UNIPHIER_PIN_DRV_4_8, + 148, UNIPHIER_PIN_DRV_1BIT, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6, - 149, UNIPHIER_PIN_DRV_4_8, + 149, UNIPHIER_PIN_DRV_1BIT, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6, - 150, UNIPHIER_PIN_DRV_4_8, + 150, UNIPHIER_PIN_DRV_1BIT, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6, - 151, UNIPHIER_PIN_DRV_4_8, + 151, UNIPHIER_PIN_DRV_1BIT, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE, - 152, UNIPHIER_PIN_DRV_4_8, + 152, UNIPHIER_PIN_DRV_1BIT, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6, - 153, UNIPHIER_PIN_DRV_4_8, + 153, UNIPHIER_PIN_DRV_1BIT, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6, - 154, UNIPHIER_PIN_DRV_4_8, + 154, UNIPHIER_PIN_DRV_1BIT, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6, - 155, UNIPHIER_PIN_DRV_4_8, + 155, UNIPHIER_PIN_DRV_1BIT, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6, - 156, UNIPHIER_PIN_DRV_4_8, + 156, UNIPHIER_PIN_DRV_1BIT, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6, - 157, UNIPHIER_PIN_DRV_4_8, + 157, UNIPHIER_PIN_DRV_1BIT, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6, - 158, UNIPHIER_PIN_DRV_4_8, + 158, UNIPHIER_PIN_DRV_1BIT, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6, - 159, UNIPHIER_PIN_DRV_4_8, + 159, UNIPHIER_PIN_DRV_1BIT, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6, - 160, UNIPHIER_PIN_DRV_4_8, + 160, UNIPHIER_PIN_DRV_1BIT, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6, - 161, UNIPHIER_PIN_DRV_4_8, + 161, UNIPHIER_PIN_DRV_1BIT, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "MDC", 6, - 162, UNIPHIER_PIN_DRV_4_8, + 162, UNIPHIER_PIN_DRV_1BIT, 162, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(177, "MDIO", 6, - 163, UNIPHIER_PIN_DRV_4_8, + 163, UNIPHIER_PIN_DRV_1BIT, 163, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6, - 164, UNIPHIER_PIN_DRV_4_8, + 164, UNIPHIER_PIN_DRV_1BIT, 164, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6, - 165, UNIPHIER_PIN_DRV_4_8, + 165, UNIPHIER_PIN_DRV_1BIT, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, - 166, UNIPHIER_PIN_DRV_4_8, + 166, UNIPHIER_PIN_DRV_1BIT, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, - 167, UNIPHIER_PIN_DRV_4_8, + 167, UNIPHIER_PIN_DRV_1BIT, 167, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, - 168, UNIPHIER_PIN_DRV_4_8, + 168, UNIPHIER_PIN_DRV_1BIT, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, - 169, UNIPHIER_PIN_DRV_4_8, + 169, UNIPHIER_PIN_DRV_1BIT, 169, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, - 170, UNIPHIER_PIN_DRV_4_8, + 170, UNIPHIER_PIN_DRV_1BIT, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, - 171, UNIPHIER_PIN_DRV_4_8, + 171, UNIPHIER_PIN_DRV_1BIT, 171, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE, - 172, UNIPHIER_PIN_DRV_4_8, + 172, UNIPHIER_PIN_DRV_1BIT, 172, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE, - 173, UNIPHIER_PIN_DRV_4_8, + 173, UNIPHIER_PIN_DRV_1BIT, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE, - 174, UNIPHIER_PIN_DRV_4_8, + 174, UNIPHIER_PIN_DRV_1BIT, 174, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE, - 175, UNIPHIER_PIN_DRV_4_8, + 175, UNIPHIER_PIN_DRV_1BIT, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE, - 176, UNIPHIER_PIN_DRV_4_8, + 176, UNIPHIER_PIN_DRV_1BIT, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE, - 177, UNIPHIER_PIN_DRV_4_8, + 177, UNIPHIER_PIN_DRV_1BIT, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE, - 178, UNIPHIER_PIN_DRV_4_8, + 178, UNIPHIER_PIN_DRV_1BIT, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE, - 179, UNIPHIER_PIN_DRV_4_8, + 179, UNIPHIER_PIN_DRV_1BIT, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE, - 180, UNIPHIER_PIN_DRV_4_8, + 180, UNIPHIER_PIN_DRV_1BIT, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE, - 181, UNIPHIER_PIN_DRV_4_8, + 181, UNIPHIER_PIN_DRV_1BIT, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE, - 182, UNIPHIER_PIN_DRV_4_8, + 182, UNIPHIER_PIN_DRV_1BIT, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE, - 183, UNIPHIER_PIN_DRV_4_8, + 183, UNIPHIER_PIN_DRV_1BIT, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE, - 184, UNIPHIER_PIN_DRV_4_8, + 184, UNIPHIER_PIN_DRV_1BIT, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE, - 185, UNIPHIER_PIN_DRV_4_8, + 185, UNIPHIER_PIN_DRV_1BIT, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE, - 186, UNIPHIER_PIN_DRV_4_8, + 186, UNIPHIER_PIN_DRV_1BIT, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE, - 187, UNIPHIER_PIN_DRV_4_8, + 187, UNIPHIER_PIN_DRV_1BIT, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE, - 188, UNIPHIER_PIN_DRV_4_8, + 188, UNIPHIER_PIN_DRV_1BIT, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE, - 189, UNIPHIER_PIN_DRV_4_8, + 189, UNIPHIER_PIN_DRV_1BIT, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE, - 190, UNIPHIER_PIN_DRV_4_8, + 190, UNIPHIER_PIN_DRV_1BIT, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE, - 191, UNIPHIER_PIN_DRV_4_8, + 191, UNIPHIER_PIN_DRV_1BIT, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE, - 192, UNIPHIER_PIN_DRV_NONE, + -1, UNIPHIER_PIN_DRV_NONE, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE, - 193, UNIPHIER_PIN_DRV_4_8, + 193, UNIPHIER_PIN_DRV_1BIT, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE, - 194, UNIPHIER_PIN_DRV_4_8, + 194, UNIPHIER_PIN_DRV_1BIT, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE, - 195, UNIPHIER_PIN_DRV_4_8, + 195, UNIPHIER_PIN_DRV_1BIT, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE, - 196, UNIPHIER_PIN_DRV_4_8, + 196, UNIPHIER_PIN_DRV_1BIT, 196, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE, - 197, UNIPHIER_PIN_DRV_4_8, + 197, UNIPHIER_PIN_DRV_1BIT, 197, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE, - 198, UNIPHIER_PIN_DRV_4_8, + 198, UNIPHIER_PIN_DRV_1BIT, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE, - 199, UNIPHIER_PIN_DRV_4_8, + 199, UNIPHIER_PIN_DRV_1BIT, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE, - 200, UNIPHIER_PIN_DRV_4_8, + 200, UNIPHIER_PIN_DRV_1BIT, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE, - 201, UNIPHIER_PIN_DRV_4_8, + 201, UNIPHIER_PIN_DRV_1BIT, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE, - 202, UNIPHIER_PIN_DRV_4_8, + 202, UNIPHIER_PIN_DRV_1BIT, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE, - 203, UNIPHIER_PIN_DRV_4_8, + 203, UNIPHIER_PIN_DRV_1BIT, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE, - 204, UNIPHIER_PIN_DRV_4_8, + 204, UNIPHIER_PIN_DRV_1BIT, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE, - 205, UNIPHIER_PIN_DRV_4_8, + 205, UNIPHIER_PIN_DRV_1BIT, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE, - 206, UNIPHIER_PIN_DRV_4_8, + 206, UNIPHIER_PIN_DRV_1BIT, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE, - 207, UNIPHIER_PIN_DRV_4_8, + 207, UNIPHIER_PIN_DRV_1BIT, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE, - 208, UNIPHIER_PIN_DRV_4_8, + 208, UNIPHIER_PIN_DRV_1BIT, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE, - 209, UNIPHIER_PIN_DRV_4_8, + 209, UNIPHIER_PIN_DRV_1BIT, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE, - 210, UNIPHIER_PIN_DRV_4_8, + 210, UNIPHIER_PIN_DRV_1BIT, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE, - 211, UNIPHIER_PIN_DRV_4_8, + 211, UNIPHIER_PIN_DRV_1BIT, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE, - 212, UNIPHIER_PIN_DRV_4_8, + 212, UNIPHIER_PIN_DRV_1BIT, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE, - 213, UNIPHIER_PIN_DRV_4_8, + 213, UNIPHIER_PIN_DRV_1BIT, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE, - 214, UNIPHIER_PIN_DRV_4_8, + 214, UNIPHIER_PIN_DRV_1BIT, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE, - 215, UNIPHIER_PIN_DRV_4_8, + 215, UNIPHIER_PIN_DRV_1BIT, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE, - 216, UNIPHIER_PIN_DRV_4_8, + 216, UNIPHIER_PIN_DRV_1BIT, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE, - 217, UNIPHIER_PIN_DRV_4_8, + 217, UNIPHIER_PIN_DRV_1BIT, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE, - 218, UNIPHIER_PIN_DRV_4_8, + 218, UNIPHIER_PIN_DRV_1BIT, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE, - 219, UNIPHIER_PIN_DRV_4_8, + 219, UNIPHIER_PIN_DRV_1BIT, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE, - 220, UNIPHIER_PIN_DRV_4_8, + 220, UNIPHIER_PIN_DRV_1BIT, 220, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE, - 221, UNIPHIER_PIN_DRV_4_8, + 221, UNIPHIER_PIN_DRV_1BIT, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE, - 222, UNIPHIER_PIN_DRV_4_8, + 222, UNIPHIER_PIN_DRV_1BIT, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE, - 223, UNIPHIER_PIN_DRV_4_8, + 223, UNIPHIER_PIN_DRV_1BIT, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE, - 224, UNIPHIER_PIN_DRV_4_8, + 224, UNIPHIER_PIN_DRV_1BIT, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE, - 225, UNIPHIER_PIN_DRV_4_8, + 225, UNIPHIER_PIN_DRV_1BIT, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE, - 226, UNIPHIER_PIN_DRV_4_8, + 226, UNIPHIER_PIN_DRV_1BIT, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE, - 227, UNIPHIER_PIN_DRV_4_8, + 227, UNIPHIER_PIN_DRV_1BIT, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE, - 228, UNIPHIER_PIN_DRV_4_8, + 228, UNIPHIER_PIN_DRV_1BIT, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE, - 229, UNIPHIER_PIN_DRV_4_8, + 229, UNIPHIER_PIN_DRV_1BIT, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE, - 230, UNIPHIER_PIN_DRV_4_8, + 230, UNIPHIER_PIN_DRV_1BIT, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE, - 231, UNIPHIER_PIN_DRV_4_8, + 231, UNIPHIER_PIN_DRV_1BIT, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE, - 232, UNIPHIER_PIN_DRV_4_8, + 232, UNIPHIER_PIN_DRV_1BIT, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE, - 233, UNIPHIER_PIN_DRV_4_8, + 233, UNIPHIER_PIN_DRV_1BIT, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE, - 234, UNIPHIER_PIN_DRV_4_8, + 234, UNIPHIER_PIN_DRV_1BIT, 234, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE, - 235, UNIPHIER_PIN_DRV_4_8, + 235, UNIPHIER_PIN_DRV_1BIT, 235, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE, - 236, UNIPHIER_PIN_DRV_4_8, + 236, UNIPHIER_PIN_DRV_1BIT, 236, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE, - 237, UNIPHIER_PIN_DRV_4_8, + 237, UNIPHIER_PIN_DRV_1BIT, 237, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE, - 238, UNIPHIER_PIN_DRV_4_8, + 238, UNIPHIER_PIN_DRV_1BIT, 238, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE, - 239, UNIPHIER_PIN_DRV_4_8, + 239, UNIPHIER_PIN_DRV_1BIT, 239, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE, - 240, UNIPHIER_PIN_DRV_4_8, + 240, UNIPHIER_PIN_DRV_1BIT, 240, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE, - 241, UNIPHIER_PIN_DRV_4_8, + 241, UNIPHIER_PIN_DRV_1BIT, 241, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE, - 242, UNIPHIER_PIN_DRV_4_8, + 242, UNIPHIER_PIN_DRV_1BIT, 242, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE, - 243, UNIPHIER_PIN_DRV_4_8, + 243, UNIPHIER_PIN_DRV_1BIT, 243, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE, - 244, UNIPHIER_PIN_DRV_4_8, + 244, UNIPHIER_PIN_DRV_1BIT, 244, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE, - 245, UNIPHIER_PIN_DRV_4_8, + 245, UNIPHIER_PIN_DRV_1BIT, 245, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE, - 246, UNIPHIER_PIN_DRV_4_8, + 246, UNIPHIER_PIN_DRV_1BIT, 246, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE, - 247, UNIPHIER_PIN_DRV_4_8, + 247, UNIPHIER_PIN_DRV_1BIT, 247, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE, - 248, UNIPHIER_PIN_DRV_4_8, + 248, UNIPHIER_PIN_DRV_1BIT, 248, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE, - 249, UNIPHIER_PIN_DRV_4_8, + 249, UNIPHIER_PIN_DRV_1BIT, 249, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE, - 250, UNIPHIER_PIN_DRV_4_8, + 250, UNIPHIER_PIN_DRV_1BIT, 250, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE, - 251, UNIPHIER_PIN_DRV_4_8, + 251, UNIPHIER_PIN_DRV_1BIT, 251, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE, - 252, UNIPHIER_PIN_DRV_4_8, + 252, UNIPHIER_PIN_DRV_1BIT, 252, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE, - 253, UNIPHIER_PIN_DRV_4_8, + 253, UNIPHIER_PIN_DRV_1BIT, 253, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE, - 254, UNIPHIER_PIN_DRV_4_8, + 254, UNIPHIER_PIN_DRV_1BIT, 254, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE, - 255, UNIPHIER_PIN_DRV_4_8, + 255, UNIPHIER_PIN_DRV_1BIT, 255, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE, - 256, UNIPHIER_PIN_DRV_4_8, + 256, UNIPHIER_PIN_DRV_1BIT, 256, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE, - 257, UNIPHIER_PIN_DRV_4_8, + 257, UNIPHIER_PIN_DRV_1BIT, 257, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE, - 258, UNIPHIER_PIN_DRV_4_8, + 258, UNIPHIER_PIN_DRV_1BIT, 258, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE, - 259, UNIPHIER_PIN_DRV_4_8, + 259, UNIPHIER_PIN_DRV_1BIT, 259, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE, - 260, UNIPHIER_PIN_DRV_4_8, + 260, UNIPHIER_PIN_DRV_1BIT, 260, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE, - 261, UNIPHIER_PIN_DRV_4_8, + 261, UNIPHIER_PIN_DRV_1BIT, 261, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE, - 262, UNIPHIER_PIN_DRV_4_8, + 262, UNIPHIER_PIN_DRV_1BIT, 262, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE, - 263, UNIPHIER_PIN_DRV_4_8, + 263, UNIPHIER_PIN_DRV_1BIT, 263, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE, - 264, UNIPHIER_PIN_DRV_4_8, + 264, UNIPHIER_PIN_DRV_1BIT, 264, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE, - 265, UNIPHIER_PIN_DRV_4_8, + 265, UNIPHIER_PIN_DRV_1BIT, 265, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE, - 266, UNIPHIER_PIN_DRV_4_8, + 266, UNIPHIER_PIN_DRV_1BIT, 266, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE, - 267, UNIPHIER_PIN_DRV_4_8, + 267, UNIPHIER_PIN_DRV_1BIT, 267, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE, - 268, UNIPHIER_PIN_DRV_4_8, + 268, UNIPHIER_PIN_DRV_1BIT, 268, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE, - 269, UNIPHIER_PIN_DRV_4_8, + 269, UNIPHIER_PIN_DRV_1BIT, 269, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE, - 270, UNIPHIER_PIN_DRV_4_8, + 270, UNIPHIER_PIN_DRV_1BIT, 270, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE, - 271, UNIPHIER_PIN_DRV_4_8, + 271, UNIPHIER_PIN_DRV_1BIT, 271, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE, - 272, UNIPHIER_PIN_DRV_4_8, + 272, UNIPHIER_PIN_DRV_1BIT, 272, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE, - 273, UNIPHIER_PIN_DRV_4_8, + 273, UNIPHIER_PIN_DRV_1BIT, 273, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE, - 274, UNIPHIER_PIN_DRV_4_8, + 274, UNIPHIER_PIN_DRV_1BIT, 274, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE, - 275, UNIPHIER_PIN_DRV_4_8, + 275, UNIPHIER_PIN_DRV_1BIT, 275, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE, - 276, UNIPHIER_PIN_DRV_4_8, + 276, UNIPHIER_PIN_DRV_1BIT, 276, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE, - 277, UNIPHIER_PIN_DRV_4_8, + 277, UNIPHIER_PIN_DRV_1BIT, 277, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, - 278, UNIPHIER_PIN_DRV_4_8, + 278, UNIPHIER_PIN_DRV_1BIT, 278, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, - 279, UNIPHIER_PIN_DRV_4_8, + 279, UNIPHIER_PIN_DRV_1BIT, 279, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, - 280, UNIPHIER_PIN_DRV_4_8, + 280, UNIPHIER_PIN_DRV_1BIT, 280, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE, - 281, UNIPHIER_PIN_DRV_4_8, + 281, UNIPHIER_PIN_DRV_1BIT, 281, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE, - 282, UNIPHIER_PIN_DRV_4_8, + 282, UNIPHIER_PIN_DRV_1BIT, 282, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE, - 284, UNIPHIER_PIN_DRV_4_8, + 284, UNIPHIER_PIN_DRV_1BIT, 284, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE, - 285, UNIPHIER_PIN_DRV_4_8, + 285, UNIPHIER_PIN_DRV_1BIT, 285, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE, - 286, UNIPHIER_PIN_DRV_4_8, + 286, UNIPHIER_PIN_DRV_1BIT, 286, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE, - 287, UNIPHIER_PIN_DRV_4_8, + 287, UNIPHIER_PIN_DRV_1BIT, 287, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE, - 288, UNIPHIER_PIN_DRV_4_8, + 288, UNIPHIER_PIN_DRV_1BIT, 288, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE, - 289, UNIPHIER_PIN_DRV_4_8, + 289, UNIPHIER_PIN_DRV_1BIT, 289, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE, - 290, UNIPHIER_PIN_DRV_4_8, + 290, UNIPHIER_PIN_DRV_1BIT, 290, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE, - 291, UNIPHIER_PIN_DRV_4_8, + 291, UNIPHIER_PIN_DRV_1BIT, 291, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE, - 292, UNIPHIER_PIN_DRV_4_8, + 292, UNIPHIER_PIN_DRV_1BIT, 292, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE, - 293, UNIPHIER_PIN_DRV_4_8, + 293, UNIPHIER_PIN_DRV_1BIT, 293, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE, - 294, UNIPHIER_PIN_DRV_4_8, + 294, UNIPHIER_PIN_DRV_1BIT, 294, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE, - 295, UNIPHIER_PIN_DRV_4_8, + 295, UNIPHIER_PIN_DRV_1BIT, 295, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE, - 296, UNIPHIER_PIN_DRV_4_8, + 296, UNIPHIER_PIN_DRV_1BIT, 296, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE, - 297, UNIPHIER_PIN_DRV_4_8, + 297, UNIPHIER_PIN_DRV_1BIT, 297, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE, - 298, UNIPHIER_PIN_DRV_4_8, + 298, UNIPHIER_PIN_DRV_1BIT, 298, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE, - 299, UNIPHIER_PIN_DRV_4_8, + 299, UNIPHIER_PIN_DRV_1BIT, 299, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE, - 300, UNIPHIER_PIN_DRV_4_8, + 300, UNIPHIER_PIN_DRV_1BIT, 300, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE, - 301, UNIPHIER_PIN_DRV_4_8, + 301, UNIPHIER_PIN_DRV_1BIT, 301, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE, - 302, UNIPHIER_PIN_DRV_4_8, + 302, UNIPHIER_PIN_DRV_1BIT, 302, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE, - 303, UNIPHIER_PIN_DRV_4_8, + 303, UNIPHIER_PIN_DRV_1BIT, 303, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE, - 304, UNIPHIER_PIN_DRV_4_8, + 304, UNIPHIER_PIN_DRV_1BIT, 304, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE, - 305, UNIPHIER_PIN_DRV_4_8, + 305, UNIPHIER_PIN_DRV_1BIT, 305, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE, - 306, UNIPHIER_PIN_DRV_4_8, + 306, UNIPHIER_PIN_DRV_1BIT, 306, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE, - 307, UNIPHIER_PIN_DRV_4_8, + 307, UNIPHIER_PIN_DRV_1BIT, 307, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE, - 308, UNIPHIER_PIN_DRV_4_8, + 308, UNIPHIER_PIN_DRV_1BIT, 308, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE, - 309, UNIPHIER_PIN_DRV_4_8, + 309, UNIPHIER_PIN_DRV_1BIT, 309, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE, - 310, UNIPHIER_PIN_DRV_4_8, + 310, UNIPHIER_PIN_DRV_1BIT, 310, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE, - 311, UNIPHIER_PIN_DRV_4_8, + 311, UNIPHIER_PIN_DRV_1BIT, 311, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE, - 312, UNIPHIER_PIN_DRV_NONE, + -1, UNIPHIER_PIN_DRV_NONE, 312, UNIPHIER_PIN_PULL_DOWN), }; static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53}; -static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; +static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47}; -static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const int emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const unsigned ether_mii_pins[] = {160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179}; +static const int ether_mii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0}; +static const unsigned ether_rgmii_pins[] = {160, 161, 162, 163, 164, 165, 167, + 168, 169, 170, 171, 172, 176, 177, + 178, 179}; +static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; +static const unsigned ether_rmii_pins[] = {160, 161, 162, 165, 168, 169, 172, + 173, 176, 177, 178, 179}; +static const int ether_rmii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned ether_rmiib_pins[] = {161, 162, 165, 167, 168, 169, 172, + 173, 176, 177, 178, 179}; +static const int ether_rmiib_muxvals[] = {0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned i2c0_pins[] = {142, 143}; -static const unsigned i2c0_muxvals[] = {0, 0}; +static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {144, 145}; -static const unsigned i2c1_muxvals[] = {0, 0}; +static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {146, 147}; -static const unsigned i2c2_muxvals[] = {0, 0}; +static const int i2c2_muxvals[] = {0, 0}; static const unsigned i2c3_pins[] = {148, 149}; -static const unsigned i2c3_muxvals[] = {0, 0}; +static const int i2c3_muxvals[] = {0, 0}; static const unsigned i2c6_pins[] = {308, 309}; -static const unsigned i2c6_muxvals[] = {6, 6}; +static const int i2c6_muxvals[] = {6, 6}; static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54}; -static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {131, 132}; -static const unsigned nand_cs1_muxvals[] = {1, 1}; +static const int nand_cs1_muxvals[] = {1, 1}; static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158}; -static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326, 327}; -static const unsigned sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38}; +static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0}; +static const unsigned system_bus_cs0_pins[] = {318}; +static const int system_bus_cs0_muxvals[] = {5}; +static const unsigned system_bus_cs1_pins[] = {24}; +static const int system_bus_cs1_muxvals[] = {0}; +static const unsigned system_bus_cs2_pins[] = {315}; +static const int system_bus_cs2_muxvals[] = {5}; +static const unsigned system_bus_cs3_pins[] = {313}; +static const int system_bus_cs3_muxvals[] = {5}; +static const unsigned system_bus_cs4_pins[] = {305}; +static const int system_bus_cs4_muxvals[] = {5}; +static const unsigned system_bus_cs5_pins[] = {303}; +static const int system_bus_cs5_muxvals[] = {6}; +static const unsigned system_bus_cs6_pins[] = {307}; +static const int system_bus_cs6_muxvals[] = {6}; +static const unsigned system_bus_cs7_pins[] = {312}; +static const int system_bus_cs7_muxvals[] = {6}; static const unsigned uart0_pins[] = {127, 128}; -static const unsigned uart0_muxvals[] = {0, 0}; +static const int uart0_muxvals[] = {0, 0}; static const unsigned uart1_pins[] = {129, 130}; -static const unsigned uart1_muxvals[] = {0, 0}; +static const int uart1_muxvals[] = {0, 0}; static const unsigned uart2_pins[] = {131, 132}; -static const unsigned uart2_muxvals[] = {0, 0}; +static const int uart2_muxvals[] = {0, 0}; static const unsigned uart3_pins[] = {88, 89}; -static const unsigned uart3_muxvals[] = {2, 2}; +static const int uart3_muxvals[] = {2, 2}; static const unsigned usb0_pins[] = {180, 181}; -static const unsigned usb0_muxvals[] = {0, 0}; +static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {182, 183}; -static const unsigned usb1_muxvals[] = {0, 0}; +static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {184, 185}; -static const unsigned usb2_muxvals[] = {0, 0}; +static const int usb2_muxvals[] = {0, 0}; static const unsigned usb3_pins[] = {186, 187}; -static const unsigned usb3_muxvals[] = {0, 0}; +static const int usb3_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ 308, 309, 310, 311, 312, 313, 314, 315, /* PORT1x */ @@ -1069,7 +1102,7 @@ static const unsigned port_range0_pins[] = { 76, 77, 78, 79, 80, 81, 82, 83, /* PORT13x */ 84, 85, 86, 87, 88, 89, 90, 91, /* PORT14x */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 7, 7, 7, 7, 7, 7, 7, 7, /* PORT0x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT1x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT2x */ @@ -1102,7 +1135,7 @@ static const unsigned port_range1_pins[] = { 251, 252, 261, 262, 263, 264, 273, 274, /* PORT29x */ 31, 32, 33, 34, 35, 36, 37, 38, /* PORT30x */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 7, 7, 7, /* PORT175-177 */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT18x */ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT19x */ @@ -1123,7 +1156,7 @@ static const unsigned xirq_pins[] = { 234, 186, 99, 100, 101, 102, 184, 301, /* XIRQ8-15 */ 302, 303, 304, 305, 306, /* XIRQ16-20 */ }; -static const unsigned xirq_muxvals[] = { +static const int xirq_muxvals[] = { 7, 7, 7, 7, 7, 7, 7, 7, /* XIRQ0-7 */ 7, 7, 7, 7, 7, 7, 2, 2, /* XIRQ8-15 */ 2, 2, 2, 2, 2, /* XIRQ16-20 */ @@ -1131,13 +1164,17 @@ static const unsigned xirq_muxvals[] = { static const unsigned xirq_alternatives_pins[] = { 184, 310, 316, }; -static const unsigned xirq_alternatives_muxvals[] = { +static const int xirq_alternatives_muxvals[] = { 2, 2, 2, }; -static const struct uniphier_pinctrl_group ph1_pro4_groups[] = { +static const struct uniphier_pinctrl_group uniphier_pro4_groups[] = { UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_mii), + UNIPHIER_PINCTRL_GROUP(ether_rgmii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), + UNIPHIER_PINCTRL_GROUP(ether_rmiib), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), @@ -1147,6 +1184,15 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = { UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), UNIPHIER_PINCTRL_GROUP(sd1), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs0), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(system_bus_cs2), + UNIPHIER_PINCTRL_GROUP(system_bus_cs3), + UNIPHIER_PINCTRL_GROUP(system_bus_cs4), + UNIPHIER_PINCTRL_GROUP(system_bus_cs5), + UNIPHIER_PINCTRL_GROUP(system_bus_cs6), + UNIPHIER_PINCTRL_GROUP(system_bus_cs7), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart1), UNIPHIER_PINCTRL_GROUP(uart2), @@ -1413,6 +1459,9 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = { }; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_mii_groups[] = {"ether_mii"}; +static const char * const ether_rgmii_groups[] = {"ether_rgmii"}; +static const char * const ether_rmii_groups[] = {"ether_rgmii", "ether_rgmiib"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; @@ -1421,6 +1470,15 @@ static const char * const i2c6_groups[] = {"i2c6"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; static const char * const sd1_groups[] = {"sd1"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs0", + "system_bus_cs1", + "system_bus_cs2", + "system_bus_cs3", + "system_bus_cs4", + "system_bus_cs5", + "system_bus_cs6", + "system_bus_cs7"}; static const char * const uart0_groups[] = {"uart0"}; static const char * const uart1_groups[] = {"uart1"}; static const char * const uart2_groups[] = {"uart2"}; @@ -1499,8 +1557,11 @@ static const char * const xirq_groups[] = { "xirq14b", "xirq17b", "xirq18b", }; -static const struct uniphier_pinmux_function ph1_pro4_functions[] = { +static const struct uniphier_pinmux_function uniphier_pro4_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_mii), + UNIPHIER_PINMUX_FUNCTION(ether_rgmii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), @@ -1509,6 +1570,7 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = { UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), UNIPHIER_PINMUX_FUNCTION(sd1), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -1521,43 +1583,36 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata ph1_pro4_pindata = { - .groups = ph1_pro4_groups, - .groups_count = ARRAY_SIZE(ph1_pro4_groups), - .functions = ph1_pro4_functions, - .functions_count = ARRAY_SIZE(ph1_pro4_functions), - .mux_bits = 4, - .reg_stride = 8, - .load_pinctrl = true, -}; - -static struct pinctrl_desc ph1_pro4_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = ph1_pro4_pins, - .npins = ARRAY_SIZE(ph1_pro4_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_pro4_pindata = { + .pins = uniphier_pro4_pins, + .npins = ARRAY_SIZE(uniphier_pro4_pins), + .groups = uniphier_pro4_groups, + .groups_count = ARRAY_SIZE(uniphier_pro4_groups), + .functions = uniphier_pro4_functions, + .functions_count = ARRAY_SIZE(uniphier_pro4_functions), + .caps = UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE, }; -static int ph1_pro4_pinctrl_probe(struct platform_device *pdev) +static int uniphier_pro4_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc, - &ph1_pro4_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_pro4_pindata); } -static const struct of_device_id ph1_pro4_pinctrl_match[] = { +static const struct of_device_id uniphier_pro4_pinctrl_match[] = { + { .compatible = "socionext,uniphier-pro4-pinctrl" }, { .compatible = "socionext,ph1-pro4-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_pro4_pinctrl_match); -static struct platform_driver ph1_pro4_pinctrl_driver = { - .probe = ph1_pro4_pinctrl_probe, +static struct platform_driver uniphier_pro4_pinctrl_driver = { + .probe = uniphier_pro4_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = ph1_pro4_pinctrl_match, + .name = "uniphier-pro4-pinctrl", + .of_match_table = uniphier_pro4_pinctrl_match, }, }; -module_platform_driver(ph1_pro4_pinctrl_driver); +module_platform_driver(uniphier_pro4_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c index 3087f76752a6..55d4a12282a0 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c @@ -19,823 +19,840 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "ph1-pro5-pinctrl" - -static const struct pinctrl_pin_desc ph1_pro5_pins[] = { +static const struct pinctrl_pin_desc uniphier_pro5_pins[] = { UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0, - 0, UNIPHIER_PIN_DRV_4_8, + 0, UNIPHIER_PIN_DRV_1BIT, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0, - 1, UNIPHIER_PIN_DRV_4_8, + 1, UNIPHIER_PIN_DRV_1BIT, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0, - 2, UNIPHIER_PIN_DRV_4_8, + 2, UNIPHIER_PIN_DRV_1BIT, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0, - 3, UNIPHIER_PIN_DRV_4_8, + 3, UNIPHIER_PIN_DRV_1BIT, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_4_8, + 4, UNIPHIER_PIN_DRV_1BIT, 4, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE, - 5, UNIPHIER_PIN_DRV_4_8, + 5, UNIPHIER_PIN_DRV_1BIT, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE, - 6, UNIPHIER_PIN_DRV_4_8, + 6, UNIPHIER_PIN_DRV_1BIT, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE, - 7, UNIPHIER_PIN_DRV_4_8, + 7, UNIPHIER_PIN_DRV_1BIT, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_4_8, + 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_4_8, + 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_4_8, + 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_4_8, + 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_4_8, + 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_4_8, + 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_4_8, + 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE, - 15, UNIPHIER_PIN_DRV_4_8, + 15, UNIPHIER_PIN_DRV_1BIT, 15, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_4_8, + 16, UNIPHIER_PIN_DRV_1BIT, 16, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE, - 17, UNIPHIER_PIN_DRV_4_8, + 17, UNIPHIER_PIN_DRV_1BIT, 17, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE, - 18, UNIPHIER_PIN_DRV_4_8, + 18, UNIPHIER_PIN_DRV_1BIT, 18, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, - 19, UNIPHIER_PIN_DRV_4_8, + 19, UNIPHIER_PIN_DRV_1BIT, 19, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_4_8, + 20, UNIPHIER_PIN_DRV_1BIT, 20, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE, - 21, UNIPHIER_PIN_DRV_4_8, + 21, UNIPHIER_PIN_DRV_1BIT, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, - 22, UNIPHIER_PIN_DRV_4_8, + 22, UNIPHIER_PIN_DRV_1BIT, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, - 23, UNIPHIER_PIN_DRV_4_8, + 23, UNIPHIER_PIN_DRV_1BIT, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, - 24, UNIPHIER_PIN_DRV_4_8, + 24, UNIPHIER_PIN_DRV_1BIT, 24, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE, - 25, UNIPHIER_PIN_DRV_4_8, + 25, UNIPHIER_PIN_DRV_1BIT, 25, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE, - 26, UNIPHIER_PIN_DRV_4_8, + 26, UNIPHIER_PIN_DRV_1BIT, 26, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE, - 27, UNIPHIER_PIN_DRV_4_8, + 27, UNIPHIER_PIN_DRV_1BIT, 27, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE, - 28, UNIPHIER_PIN_DRV_4_8, + 28, UNIPHIER_PIN_DRV_1BIT, 28, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE, - 29, UNIPHIER_PIN_DRV_4_8, + 29, UNIPHIER_PIN_DRV_1BIT, 29, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE, - 30, UNIPHIER_PIN_DRV_4_8, + 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE, - 39, UNIPHIER_PIN_DRV_4_8, + 39, UNIPHIER_PIN_DRV_1BIT, 39, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE, - 40, UNIPHIER_PIN_DRV_4_8, + 40, UNIPHIER_PIN_DRV_1BIT, 40, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE, - 41, UNIPHIER_PIN_DRV_4_8, + 41, UNIPHIER_PIN_DRV_1BIT, 41, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE, - 42, UNIPHIER_PIN_DRV_4_8, + 42, UNIPHIER_PIN_DRV_1BIT, 42, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE, - 43, UNIPHIER_PIN_DRV_4_8, + 43, UNIPHIER_PIN_DRV_1BIT, 43, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE, - 44, UNIPHIER_PIN_DRV_4_8, + 44, UNIPHIER_PIN_DRV_1BIT, 44, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE, - 45, UNIPHIER_PIN_DRV_4_8, + 45, UNIPHIER_PIN_DRV_1BIT, 45, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE, - 46, UNIPHIER_PIN_DRV_4_8, + 46, UNIPHIER_PIN_DRV_1BIT, 46, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(47, "TXD0", 0, - 47, UNIPHIER_PIN_DRV_4_8, + 47, UNIPHIER_PIN_DRV_1BIT, 47, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(48, "RXD0", 0, - 48, UNIPHIER_PIN_DRV_4_8, + 48, UNIPHIER_PIN_DRV_1BIT, 48, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(49, "TXD1", 0, - 49, UNIPHIER_PIN_DRV_4_8, + 49, UNIPHIER_PIN_DRV_1BIT, 49, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(50, "RXD1", 0, - 50, UNIPHIER_PIN_DRV_4_8, + 50, UNIPHIER_PIN_DRV_1BIT, 50, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE, - 51, UNIPHIER_PIN_DRV_4_8, + 51, UNIPHIER_PIN_DRV_1BIT, 51, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE, - 52, UNIPHIER_PIN_DRV_4_8, + 52, UNIPHIER_PIN_DRV_1BIT, 52, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(53, "TXD3", 0, - 53, UNIPHIER_PIN_DRV_4_8, + 53, UNIPHIER_PIN_DRV_1BIT, 53, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(54, "RXD3", 0, - 54, UNIPHIER_PIN_DRV_4_8, + 54, UNIPHIER_PIN_DRV_1BIT, 54, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0, - 55, UNIPHIER_PIN_DRV_4_8, + 55, UNIPHIER_PIN_DRV_1BIT, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0, - 56, UNIPHIER_PIN_DRV_4_8, + 56, UNIPHIER_PIN_DRV_1BIT, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0, - 57, UNIPHIER_PIN_DRV_4_8, + 57, UNIPHIER_PIN_DRV_1BIT, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0, - 58, UNIPHIER_PIN_DRV_4_8, + 58, UNIPHIER_PIN_DRV_1BIT, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0, - 59, UNIPHIER_PIN_DRV_4_8, + 59, UNIPHIER_PIN_DRV_1BIT, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0, - 60, UNIPHIER_PIN_DRV_4_8, + 60, UNIPHIER_PIN_DRV_1BIT, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0, - 61, UNIPHIER_PIN_DRV_4_8, + 61, UNIPHIER_PIN_DRV_1BIT, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0, - 62, UNIPHIER_PIN_DRV_4_8, + 62, UNIPHIER_PIN_DRV_1BIT, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0, - 63, UNIPHIER_PIN_DRV_4_8, + 63, UNIPHIER_PIN_DRV_1BIT, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0, - 64, UNIPHIER_PIN_DRV_4_8, + 64, UNIPHIER_PIN_DRV_1BIT, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "XSRST", 0, - 65, UNIPHIER_PIN_DRV_4_8, + 65, UNIPHIER_PIN_DRV_1BIT, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE, - 66, UNIPHIER_PIN_DRV_4_8, + 66, UNIPHIER_PIN_DRV_1BIT, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "PWMA", 0, - 67, UNIPHIER_PIN_DRV_4_8, + 67, UNIPHIER_PIN_DRV_1BIT, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0, - 68, UNIPHIER_PIN_DRV_4_8, + 68, UNIPHIER_PIN_DRV_1BIT, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0, - 69, UNIPHIER_PIN_DRV_4_8, + 69, UNIPHIER_PIN_DRV_1BIT, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0, - 70, UNIPHIER_PIN_DRV_4_8, + 70, UNIPHIER_PIN_DRV_1BIT, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0, - 71, UNIPHIER_PIN_DRV_4_8, + 71, UNIPHIER_PIN_DRV_1BIT, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0, - 72, UNIPHIER_PIN_DRV_4_8, + 72, UNIPHIER_PIN_DRV_1BIT, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0, - 73, UNIPHIER_PIN_DRV_4_8, + 73, UNIPHIER_PIN_DRV_1BIT, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0, - 74, UNIPHIER_PIN_DRV_4_8, + 74, UNIPHIER_PIN_DRV_1BIT, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0, - 75, UNIPHIER_PIN_DRV_4_8, + 75, UNIPHIER_PIN_DRV_1BIT, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0, - 76, UNIPHIER_PIN_DRV_4_8, + 76, UNIPHIER_PIN_DRV_1BIT, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0, - 77, UNIPHIER_PIN_DRV_4_8, + 77, UNIPHIER_PIN_DRV_1BIT, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0, - 78, UNIPHIER_PIN_DRV_4_8, + 78, UNIPHIER_PIN_DRV_1BIT, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0, - 82, UNIPHIER_PIN_DRV_4_8, + 82, UNIPHIER_PIN_DRV_1BIT, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0, - 83, UNIPHIER_PIN_DRV_4_8, + 83, UNIPHIER_PIN_DRV_1BIT, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0, - 84, UNIPHIER_PIN_DRV_4_8, + 84, UNIPHIER_PIN_DRV_1BIT, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0, - 87, UNIPHIER_PIN_DRV_4_8, + 87, UNIPHIER_PIN_DRV_1BIT, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0, - 88, UNIPHIER_PIN_DRV_4_8, + 88, UNIPHIER_PIN_DRV_1BIT, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "PORT00", 0, - 89, UNIPHIER_PIN_DRV_4_8, + 89, UNIPHIER_PIN_DRV_1BIT, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "PORT01", 0, - 90, UNIPHIER_PIN_DRV_4_8, + 90, UNIPHIER_PIN_DRV_1BIT, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "PORT02", 0, - 91, UNIPHIER_PIN_DRV_4_8, + 91, UNIPHIER_PIN_DRV_1BIT, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "PORT03", 0, - 92, UNIPHIER_PIN_DRV_4_8, + 92, UNIPHIER_PIN_DRV_1BIT, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "PORT04", 0, - 93, UNIPHIER_PIN_DRV_4_8, + 93, UNIPHIER_PIN_DRV_1BIT, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "PORT05", 0, - 94, UNIPHIER_PIN_DRV_4_8, + 94, UNIPHIER_PIN_DRV_1BIT, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "PORT06", 0, - 95, UNIPHIER_PIN_DRV_4_8, + 95, UNIPHIER_PIN_DRV_1BIT, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "PORT07", 0, - 96, UNIPHIER_PIN_DRV_4_8, + 96, UNIPHIER_PIN_DRV_1BIT, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "PORT10", 0, - 97, UNIPHIER_PIN_DRV_4_8, + 97, UNIPHIER_PIN_DRV_1BIT, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "PORT11", 0, - 98, UNIPHIER_PIN_DRV_4_8, + 98, UNIPHIER_PIN_DRV_1BIT, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "PORT12", 0, - 99, UNIPHIER_PIN_DRV_4_8, + 99, UNIPHIER_PIN_DRV_1BIT, 99, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "PORT13", 0, - 100, UNIPHIER_PIN_DRV_4_8, + 100, UNIPHIER_PIN_DRV_1BIT, 100, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "PORT14", 0, - 101, UNIPHIER_PIN_DRV_4_8, + 101, UNIPHIER_PIN_DRV_1BIT, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "PORT15", 0, - 102, UNIPHIER_PIN_DRV_4_8, + 102, UNIPHIER_PIN_DRV_1BIT, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "PORT16", 0, - 103, UNIPHIER_PIN_DRV_4_8, + 103, UNIPHIER_PIN_DRV_1BIT, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "PORT17", 0, - 104, UNIPHIER_PIN_DRV_4_8, + 104, UNIPHIER_PIN_DRV_1BIT, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0, - 105, UNIPHIER_PIN_DRV_4_8, + 105, UNIPHIER_PIN_DRV_1BIT, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0, - 106, UNIPHIER_PIN_DRV_4_8, + 106, UNIPHIER_PIN_DRV_1BIT, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0, - 107, UNIPHIER_PIN_DRV_4_8, + 107, UNIPHIER_PIN_DRV_1BIT, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0, - 108, UNIPHIER_PIN_DRV_4_8, + 108, UNIPHIER_PIN_DRV_1BIT, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(109, "XPERST", 0, - 109, UNIPHIER_PIN_DRV_4_8, + 109, UNIPHIER_PIN_DRV_1BIT, 109, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0, - 110, UNIPHIER_PIN_DRV_4_8, + 110, UNIPHIER_PIN_DRV_1BIT, 110, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0, - 111, UNIPHIER_PIN_DRV_4_8, + 111, UNIPHIER_PIN_DRV_1BIT, 111, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 112, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 115, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 117, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 118, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 119, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 120, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 121, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 122, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 123, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 124, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 125, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 126, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 127, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 128, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 129, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 136, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 137, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 138, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 139, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 140, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 141, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 142, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 143, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 144, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 145, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 162, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 163, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 164, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 167, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 169, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 171, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 172, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 174, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 196, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 197, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 220, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 234, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 235, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 236, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 237, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 238, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 239, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 240, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 241, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 242, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 243, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 244, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 245, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 246, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 247, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 248, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 249, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, - 40, UNIPHIER_PIN_DRV_8_12_16_20, + 10, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, - 44, UNIPHIER_PIN_DRV_8_12_16_20, + 11, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, - 48, UNIPHIER_PIN_DRV_8_12_16_20, + 12, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, - 52, UNIPHIER_PIN_DRV_8_12_16_20, + 13, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, - 56, UNIPHIER_PIN_DRV_8_12_16_20, + 14, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, - 60, UNIPHIER_PIN_DRV_8_12_16_20, + 15, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), }; static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42}; -static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0}; +static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0}; static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46}; -static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0}; +static const int emmc_dat8_muxvals[] = {0, 0, 0, 0}; static const unsigned i2c0_pins[] = {112, 113}; -static const unsigned i2c0_muxvals[] = {0, 0}; +static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {114, 115}; -static const unsigned i2c1_muxvals[] = {0, 0}; +static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {116, 117}; -static const unsigned i2c2_muxvals[] = {0, 0}; +static const int i2c2_muxvals[] = {0, 0}; static const unsigned i2c3_pins[] = {118, 119}; -static const unsigned i2c3_muxvals[] = {0, 0}; +static const int i2c3_muxvals[] = {0, 0}; static const unsigned i2c5_pins[] = {87, 88}; -static const unsigned i2c5_muxvals[] = {2, 2}; +static const int i2c5_muxvals[] = {2, 2}; static const unsigned i2c5b_pins[] = {196, 197}; -static const unsigned i2c5b_muxvals[] = {2, 2}; +static const int i2c5b_muxvals[] = {2, 2}; static const unsigned i2c5c_pins[] = {215, 216}; -static const unsigned i2c5c_muxvals[] = {2, 2}; +static const int i2c5c_muxvals[] = {2, 2}; static const unsigned i2c6_pins[] = {101, 102}; -static const unsigned i2c6_muxvals[] = {2, 2}; +static const int i2c6_muxvals[] = {2, 2}; static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30, 31, 32, 33, 34, 35}; -static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {26, 27}; -static const unsigned nand_cs1_muxvals[] = {0, 0}; +static const int nand_cs1_muxvals[] = {0, 0}; static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258}; -static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17}; +static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0}; +static const unsigned system_bus_cs0_pins[] = {105}; +static const int system_bus_cs0_muxvals[] = {1}; +static const unsigned system_bus_cs1_pins[] = {18}; +static const int system_bus_cs1_muxvals[] = {0}; +static const unsigned system_bus_cs2_pins[] = {106}; +static const int system_bus_cs2_muxvals[] = {1}; +static const unsigned system_bus_cs3_pins[] = {100}; +static const int system_bus_cs3_muxvals[] = {1}; +static const unsigned system_bus_cs4_pins[] = {101}; +static const int system_bus_cs4_muxvals[] = {1}; +static const unsigned system_bus_cs5_pins[] = {102}; +static const int system_bus_cs5_muxvals[] = {1}; +static const unsigned system_bus_cs6_pins[] = {69}; +static const int system_bus_cs6_muxvals[] = {5}; +static const unsigned system_bus_cs7_pins[] = {70}; +static const int system_bus_cs7_muxvals[] = {5}; static const unsigned uart0_pins[] = {47, 48}; -static const unsigned uart0_muxvals[] = {0, 0}; +static const int uart0_muxvals[] = {0, 0}; static const unsigned uart0b_pins[] = {227, 228}; -static const unsigned uart0b_muxvals[] = {3, 3}; +static const int uart0b_muxvals[] = {3, 3}; static const unsigned uart1_pins[] = {49, 50}; -static const unsigned uart1_muxvals[] = {0, 0}; +static const int uart1_muxvals[] = {0, 0}; static const unsigned uart2_pins[] = {51, 52}; -static const unsigned uart2_muxvals[] = {0, 0}; +static const int uart2_muxvals[] = {0, 0}; static const unsigned uart3_pins[] = {53, 54}; -static const unsigned uart3_muxvals[] = {0, 0}; +static const int uart3_muxvals[] = {0, 0}; static const unsigned usb0_pins[] = {124, 125}; -static const unsigned usb0_muxvals[] = {0, 0}; +static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {126, 127}; -static const unsigned usb1_muxvals[] = {0, 0}; +static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {128, 129}; -static const unsigned usb2_muxvals[] = {0, 0}; +static const int usb2_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 89, 90, 91, 92, 93, 94, 95, 96, /* PORT0x */ 97, 98, 99, 100, 101, 102, 103, 104, /* PORT1x */ @@ -853,7 +870,7 @@ static const unsigned port_range0_pins[] = { 179, 180, 181, 182, 186, 187, 188, 189, /* PORT13x */ 4, 5, 6, 7, 8, 9, 10, 11, /* PORT14x */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ @@ -886,7 +903,7 @@ static const unsigned port_range1_pins[] = { 105, 106, 18, 27, 36, 128, 132, 137, /* PORT29x */ 183, 184, 185, 84, 47, 48, 51, 52, /* PORT30x */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 15, 15, 15, /* PORT175-177 */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */ @@ -907,7 +924,7 @@ static const unsigned xirq_pins[] = { 76, 77, 78, 79, 80, 81, 82, 83, /* XIRQ8-15 */ 84, 85, 86, 87, 88, /* XIRQ16-20 */ }; -static const unsigned xirq_muxvals[] = { +static const int xirq_muxvals[] = { 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */ 14, 14, 14, 14, 14, /* XIRQ16-20 */ @@ -915,11 +932,11 @@ static const unsigned xirq_muxvals[] = { static const unsigned xirq_alternatives_pins[] = { 91, 92, 239, 144, 240, 156, 241, 106, 128, }; -static const unsigned xirq_alternatives_muxvals[] = { +static const int xirq_alternatives_muxvals[] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, }; -static const struct uniphier_pinctrl_group ph1_pro5_groups[] = { +static const struct uniphier_pinctrl_group uniphier_pro5_groups[] = { UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(emmc), @@ -933,6 +950,15 @@ static const struct uniphier_pinctrl_group ph1_pro5_groups[] = { UNIPHIER_PINCTRL_GROUP(i2c5c), UNIPHIER_PINCTRL_GROUP(i2c6), UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs0), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(system_bus_cs2), + UNIPHIER_PINCTRL_GROUP(system_bus_cs3), + UNIPHIER_PINCTRL_GROUP(system_bus_cs4), + UNIPHIER_PINCTRL_GROUP(system_bus_cs5), + UNIPHIER_PINCTRL_GROUP(system_bus_cs6), + UNIPHIER_PINCTRL_GROUP(system_bus_cs7), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart0b), UNIPHIER_PINCTRL_GROUP(uart1), @@ -1213,6 +1239,15 @@ static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"}; static const char * const i2c6_groups[] = {"i2c6"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs0", + "system_bus_cs1", + "system_bus_cs2", + "system_bus_cs3", + "system_bus_cs4", + "system_bus_cs5", + "system_bus_cs6", + "system_bus_cs7"}; static const char * const uart0_groups[] = {"uart0", "uart0b"}; static const char * const uart1_groups[] = {"uart1"}; static const char * const uart2_groups[] = {"uart2"}; @@ -1291,7 +1326,7 @@ static const char * const xirq_groups[] = { "xirq18b", "xirq18c", "xirq19b", "xirq20b", }; -static const struct uniphier_pinmux_function ph1_pro5_functions[] = { +static const struct uniphier_pinmux_function uniphier_pro5_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), @@ -1301,6 +1336,7 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = { UNIPHIER_PINMUX_FUNCTION(i2c6), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -1312,43 +1348,36 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata ph1_pro5_pindata = { - .groups = ph1_pro5_groups, - .groups_count = ARRAY_SIZE(ph1_pro5_groups), - .functions = ph1_pro5_functions, - .functions_count = ARRAY_SIZE(ph1_pro5_functions), - .mux_bits = 4, - .reg_stride = 8, - .load_pinctrl = true, -}; - -static struct pinctrl_desc ph1_pro5_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = ph1_pro5_pins, - .npins = ARRAY_SIZE(ph1_pro5_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_pro5_pindata = { + .pins = uniphier_pro5_pins, + .npins = ARRAY_SIZE(uniphier_pro5_pins), + .groups = uniphier_pro5_groups, + .groups_count = ARRAY_SIZE(uniphier_pro5_groups), + .functions = uniphier_pro5_functions, + .functions_count = ARRAY_SIZE(uniphier_pro5_functions), + .caps = UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE, }; -static int ph1_pro5_pinctrl_probe(struct platform_device *pdev) +static int uniphier_pro5_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc, - &ph1_pro5_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_pro5_pindata); } -static const struct of_device_id ph1_pro5_pinctrl_match[] = { +static const struct of_device_id uniphier_pro5_pinctrl_match[] = { + { .compatible = "socionext,uniphier-pro5-pinctrl" }, { .compatible = "socionext,ph1-pro5-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_pro5_pinctrl_match); -static struct platform_driver ph1_pro5_pinctrl_driver = { - .probe = ph1_pro5_pinctrl_probe, +static struct platform_driver uniphier_pro5_pinctrl_driver = { + .probe = uniphier_pro5_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = ph1_pro5_pinctrl_match, + .name = "uniphier-pro5-pinctrl", + .of_match_table = uniphier_pro5_pinctrl_match, }, }; -module_platform_driver(ph1_pro5_pinctrl_driver); +module_platform_driver(uniphier_pro5_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c index e868030ff31c..85ca5e2d8a9c 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c @@ -19,760 +19,776 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "proxstream2-pinctrl" - -static const struct pinctrl_pin_desc proxstream2_pins[] = { +static const struct pinctrl_pin_desc uniphier_pxs2_pins[] = { UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_4_8, + 0, UNIPHIER_PIN_DRV_1BIT, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE, - 1, UNIPHIER_PIN_DRV_4_8, + 1, UNIPHIER_PIN_DRV_1BIT, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE, - 2, UNIPHIER_PIN_DRV_4_8, + 2, UNIPHIER_PIN_DRV_1BIT, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE, - 3, UNIPHIER_PIN_DRV_4_8, + 3, UNIPHIER_PIN_DRV_1BIT, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_4_8, + 4, UNIPHIER_PIN_DRV_1BIT, 4, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE, - 5, UNIPHIER_PIN_DRV_4_8, + 5, UNIPHIER_PIN_DRV_1BIT, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE, - 6, UNIPHIER_PIN_DRV_4_8, + 6, UNIPHIER_PIN_DRV_1BIT, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE, - 7, UNIPHIER_PIN_DRV_4_8, + 7, UNIPHIER_PIN_DRV_1BIT, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_4_8, + 8, UNIPHIER_PIN_DRV_1BIT, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, - 9, UNIPHIER_PIN_DRV_4_8, + 9, UNIPHIER_PIN_DRV_1BIT, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE, - 10, UNIPHIER_PIN_DRV_4_8, + 10, UNIPHIER_PIN_DRV_1BIT, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE, - 11, UNIPHIER_PIN_DRV_4_8, + 11, UNIPHIER_PIN_DRV_1BIT, 11, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_4_8, + 12, UNIPHIER_PIN_DRV_1BIT, 12, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE, - 13, UNIPHIER_PIN_DRV_4_8, + 13, UNIPHIER_PIN_DRV_1BIT, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE, - 14, UNIPHIER_PIN_DRV_4_8, + 14, UNIPHIER_PIN_DRV_1BIT, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 24, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 25, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 26, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 27, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, - 30, UNIPHIER_PIN_DRV_4_8, + 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE, - 39, UNIPHIER_PIN_DRV_4_8, + 39, UNIPHIER_PIN_DRV_1BIT, 39, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE, - 40, UNIPHIER_PIN_DRV_4_8, + 40, UNIPHIER_PIN_DRV_1BIT, 40, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE, - 41, UNIPHIER_PIN_DRV_4_8, + 41, UNIPHIER_PIN_DRV_1BIT, 41, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE, - 42, UNIPHIER_PIN_DRV_4_8, + 42, UNIPHIER_PIN_DRV_1BIT, 42, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE, - 43, UNIPHIER_PIN_DRV_4_8, + 43, UNIPHIER_PIN_DRV_1BIT, 43, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE, - 44, UNIPHIER_PIN_DRV_4_8, + 44, UNIPHIER_PIN_DRV_1BIT, 44, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE, - 45, UNIPHIER_PIN_DRV_4_8, + 45, UNIPHIER_PIN_DRV_1BIT, 45, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE, - 46, UNIPHIER_PIN_DRV_4_8, + 46, UNIPHIER_PIN_DRV_1BIT, 46, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_8_12_16_20, + 0, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_8_12_16_20, + 1, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_8_12_16_20, + 2, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_8_12_16_20, + 3, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_8_12_16_20, + 4, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_8_12_16_20, + 5, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 53, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 54, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 99, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 100, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 109, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 110, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 111, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 112, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(113, "TXD2", 0, - 113, UNIPHIER_PIN_DRV_4_8, + 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(114, "RXD2", 0, - 114, UNIPHIER_PIN_DRV_4_8, + 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "TXD1", 0, - 115, UNIPHIER_PIN_DRV_4_8, + 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "RXD1", 0, - 116, UNIPHIER_PIN_DRV_4_8, + 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 118, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 119, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 120, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 121, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 122, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 123, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 124, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 125, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 126, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 127, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 128, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 129, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 136, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 137, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 138, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 139, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 140, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 141, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE, - 142, UNIPHIER_PIN_DRV_4_8, + 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(143, "MDC", 0, - 143, UNIPHIER_PIN_DRV_4_8, + 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(144, "MDIO", 0, - 144, UNIPHIER_PIN_DRV_4_8, + 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0, - 145, UNIPHIER_PIN_DRV_4_8, + 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0, - 146, UNIPHIER_PIN_DRV_4_8, + 146, UNIPHIER_PIN_DRV_1BIT, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0, - 147, UNIPHIER_PIN_DRV_4_8, + 147, UNIPHIER_PIN_DRV_1BIT, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0, - 148, UNIPHIER_PIN_DRV_4_8, + 148, UNIPHIER_PIN_DRV_1BIT, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0, - 149, UNIPHIER_PIN_DRV_4_8, + 149, UNIPHIER_PIN_DRV_1BIT, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0, - 150, UNIPHIER_PIN_DRV_4_8, + 150, UNIPHIER_PIN_DRV_1BIT, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0, - 151, UNIPHIER_PIN_DRV_4_8, + 151, UNIPHIER_PIN_DRV_1BIT, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0, - 152, UNIPHIER_PIN_DRV_4_8, + 152, UNIPHIER_PIN_DRV_1BIT, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0, - 153, UNIPHIER_PIN_DRV_4_8, + 153, UNIPHIER_PIN_DRV_1BIT, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0, - 154, UNIPHIER_PIN_DRV_4_8, + 154, UNIPHIER_PIN_DRV_1BIT, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0, - 155, UNIPHIER_PIN_DRV_4_8, + 155, UNIPHIER_PIN_DRV_1BIT, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0, - 156, UNIPHIER_PIN_DRV_4_8, + 156, UNIPHIER_PIN_DRV_1BIT, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0, - 157, UNIPHIER_PIN_DRV_4_8, + 157, UNIPHIER_PIN_DRV_1BIT, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0, - 158, UNIPHIER_PIN_DRV_4_8, + 158, UNIPHIER_PIN_DRV_1BIT, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 162, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 163, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 164, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 167, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 169, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 171, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 172, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 174, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0, - 187, UNIPHIER_PIN_DRV_4_8, + 187, UNIPHIER_PIN_DRV_1BIT, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0, - 188, UNIPHIER_PIN_DRV_4_8, + 188, UNIPHIER_PIN_DRV_1BIT, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0, - 189, UNIPHIER_PIN_DRV_4_8, + 189, UNIPHIER_PIN_DRV_1BIT, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0, - 190, UNIPHIER_PIN_DRV_4_8, + 190, UNIPHIER_PIN_DRV_1BIT, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0, - 191, UNIPHIER_PIN_DRV_4_8, + 191, UNIPHIER_PIN_DRV_1BIT, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0, - 192, UNIPHIER_PIN_DRV_4_8, + 192, UNIPHIER_PIN_DRV_1BIT, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0, - 193, UNIPHIER_PIN_DRV_4_8, + 193, UNIPHIER_PIN_DRV_1BIT, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0, - 194, UNIPHIER_PIN_DRV_4_8, + 194, UNIPHIER_PIN_DRV_1BIT, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "PORT241", 0, - 195, UNIPHIER_PIN_DRV_4_8, + 195, UNIPHIER_PIN_DRV_1BIT, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "PORT242", 0, - 196, UNIPHIER_PIN_DRV_4_8, + 196, UNIPHIER_PIN_DRV_1BIT, 196, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(197, "PORT243", 0, - 197, UNIPHIER_PIN_DRV_4_8, + 197, UNIPHIER_PIN_DRV_1BIT, 197, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0, - 198, UNIPHIER_PIN_DRV_4_8, + 198, UNIPHIER_PIN_DRV_1BIT, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE, - 199, UNIPHIER_PIN_DRV_4_8, + 199, UNIPHIER_PIN_DRV_1BIT, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE, - 200, UNIPHIER_PIN_DRV_4_8, + 200, UNIPHIER_PIN_DRV_1BIT, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE, - 201, UNIPHIER_PIN_DRV_4_8, + 201, UNIPHIER_PIN_DRV_1BIT, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE, - 202, UNIPHIER_PIN_DRV_4_8, + 202, UNIPHIER_PIN_DRV_1BIT, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_8, + -1, UNIPHIER_PIN_DRV_FIXED8, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0, - 207, UNIPHIER_PIN_DRV_4_8, + 207, UNIPHIER_PIN_DRV_1BIT, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0, - 208, UNIPHIER_PIN_DRV_4_8, + 208, UNIPHIER_PIN_DRV_1BIT, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0, - 209, UNIPHIER_PIN_DRV_4_8, + 209, UNIPHIER_PIN_DRV_1BIT, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE, - 210, UNIPHIER_PIN_DRV_4_8, + 210, UNIPHIER_PIN_DRV_1BIT, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE, - 211, UNIPHIER_PIN_DRV_4_8, + 211, UNIPHIER_PIN_DRV_1BIT, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE, - 212, UNIPHIER_PIN_DRV_4_8, + 212, UNIPHIER_PIN_DRV_1BIT, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE, - 215, UNIPHIER_PIN_DRV_4_8, + 215, UNIPHIER_PIN_DRV_1BIT, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "CK25O", 0, - 216, UNIPHIER_PIN_DRV_4_8, + 216, UNIPHIER_PIN_DRV_1BIT, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "TXD0", 0, - 217, UNIPHIER_PIN_DRV_4_8, + 217, UNIPHIER_PIN_DRV_1BIT, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "RXD0", 0, - 218, UNIPHIER_PIN_DRV_4_8, + 218, UNIPHIER_PIN_DRV_1BIT, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "TXD3", 0, - 219, UNIPHIER_PIN_DRV_4_8, + 219, UNIPHIER_PIN_DRV_1BIT, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "RXD3", 0, - 220, UNIPHIER_PIN_DRV_4_8, + 220, UNIPHIER_PIN_DRV_1BIT, 220, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "PORT273", 0, - 221, UNIPHIER_PIN_DRV_4_8, + 221, UNIPHIER_PIN_DRV_1BIT, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0, - 222, UNIPHIER_PIN_DRV_4_8, + 222, UNIPHIER_PIN_DRV_1BIT, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "PORT274", 0, - 223, UNIPHIER_PIN_DRV_4_8, + 223, UNIPHIER_PIN_DRV_1BIT, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "PORT275", 0, - 224, UNIPHIER_PIN_DRV_4_8, + 224, UNIPHIER_PIN_DRV_1BIT, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "PORT276", 0, - 225, UNIPHIER_PIN_DRV_4_8, + 225, UNIPHIER_PIN_DRV_1BIT, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "PORT277", 0, - 226, UNIPHIER_PIN_DRV_4_8, + 226, UNIPHIER_PIN_DRV_1BIT, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "PORT280", 0, - 227, UNIPHIER_PIN_DRV_4_8, + 227, UNIPHIER_PIN_DRV_1BIT, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "PORT281", 0, - 228, UNIPHIER_PIN_DRV_4_8, + 228, UNIPHIER_PIN_DRV_1BIT, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "PORT282", 0, - 229, UNIPHIER_PIN_DRV_4_8, + 229, UNIPHIER_PIN_DRV_1BIT, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "PORT283", 0, - 230, UNIPHIER_PIN_DRV_4_8, + 230, UNIPHIER_PIN_DRV_1BIT, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "PORT284", 0, - 231, UNIPHIER_PIN_DRV_4_8, + 231, UNIPHIER_PIN_DRV_1BIT, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "PORT285", 0, - 232, UNIPHIER_PIN_DRV_4_8, + 232, UNIPHIER_PIN_DRV_1BIT, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0, - 233, UNIPHIER_PIN_DRV_4_8, + 233, UNIPHIER_PIN_DRV_1BIT, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0, - 234, UNIPHIER_PIN_DRV_4_8, + 234, UNIPHIER_PIN_DRV_1BIT, 234, UNIPHIER_PIN_PULL_DOWN), }; static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42}; -static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9}; +static const int emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9}; static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46}; -static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9}; +static const int emmc_dat8_muxvals[] = {9, 9, 9, 9}; +static const unsigned ether_mii_pins[] = {143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, + 158, 159, 199, 200, 201, 202}; +static const int ether_mii_muxvals[] = {8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 12, 12, 12, 12}; +static const unsigned ether_rgmii_pins[] = {143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, + 157, 158}; +static const int ether_rgmii_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8}; +static const unsigned ether_rmii_pins[] = {143, 144, 145, 146, 147, 148, 149, + 150, 152, 154, 155, 158}; +static const int ether_rmii_muxvals[] = {8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9}; static const unsigned i2c0_pins[] = {109, 110}; -static const unsigned i2c0_muxvals[] = {8, 8}; +static const int i2c0_muxvals[] = {8, 8}; static const unsigned i2c1_pins[] = {111, 112}; -static const unsigned i2c1_muxvals[] = {8, 8}; +static const int i2c1_muxvals[] = {8, 8}; static const unsigned i2c2_pins[] = {171, 172}; -static const unsigned i2c2_muxvals[] = {8, 8}; +static const int i2c2_muxvals[] = {8, 8}; static const unsigned i2c3_pins[] = {159, 160}; -static const unsigned i2c3_muxvals[] = {8, 8}; +static const int i2c3_muxvals[] = {8, 8}; static const unsigned i2c5_pins[] = {183, 184}; -static const unsigned i2c5_muxvals[] = {11, 11}; +static const int i2c5_muxvals[] = {11, 11}; static const unsigned i2c6_pins[] = {185, 186}; -static const unsigned i2c6_muxvals[] = {11, 11}; +static const int i2c6_muxvals[] = {11, 11}; static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41, 42, 43, 44, 45, 46}; -static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8}; +static const int nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}; static const unsigned nand_cs1_pins[] = {37, 38}; -static const unsigned nand_cs1_muxvals[] = {8, 8}; +static const int nand_cs1_muxvals[] = {8, 8}; static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55}; -static const unsigned sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8}; +static const int sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8}; +static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13}; +static const int system_bus_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8}; +static const unsigned system_bus_cs1_pins[] = {14}; +static const int system_bus_cs1_muxvals[] = {8}; static const unsigned uart0_pins[] = {217, 218}; -static const unsigned uart0_muxvals[] = {8, 8}; +static const int uart0_muxvals[] = {8, 8}; static const unsigned uart0b_pins[] = {179, 180}; -static const unsigned uart0b_muxvals[] = {10, 10}; +static const int uart0b_muxvals[] = {10, 10}; static const unsigned uart1_pins[] = {115, 116}; -static const unsigned uart1_muxvals[] = {8, 8}; +static const int uart1_muxvals[] = {8, 8}; static const unsigned uart2_pins[] = {113, 114}; -static const unsigned uart2_muxvals[] = {8, 8}; +static const int uart2_muxvals[] = {8, 8}; static const unsigned uart3_pins[] = {219, 220}; -static const unsigned uart3_muxvals[] = {8, 8}; +static const int uart3_muxvals[] = {8, 8}; static const unsigned uart3b_pins[] = {181, 182}; -static const unsigned uart3b_muxvals[] = {10, 10}; +static const int uart3b_muxvals[] = {10, 10}; static const unsigned usb0_pins[] = {56, 57}; -static const unsigned usb0_muxvals[] = {8, 8}; +static const int usb0_muxvals[] = {8, 8}; static const unsigned usb1_pins[] = {58, 59}; -static const unsigned usb1_muxvals[] = {8, 8}; +static const int usb1_muxvals[] = {8, 8}; static const unsigned usb2_pins[] = {60, 61}; -static const unsigned usb2_muxvals[] = {8, 8}; +static const int usb2_muxvals[] = {8, 8}; static const unsigned usb3_pins[] = {62, 63}; -static const unsigned usb3_muxvals[] = {8, 8}; +static const int usb3_muxvals[] = {8, 8}; static const unsigned port_range0_pins[] = { 127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */ @@ -786,7 +802,7 @@ static const unsigned port_range0_pins[] = { 61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */ 69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ @@ -818,7 +834,7 @@ static const unsigned port_range1_pins[] = { 218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */ 227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ @@ -842,15 +858,18 @@ static const unsigned xirq_pins[] = { 126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */ 74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */ }; -static const unsigned xirq_muxvals[] = { +static const int xirq_muxvals[] = { 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ }; -static const struct uniphier_pinctrl_group proxstream2_groups[] = { +static const struct uniphier_pinctrl_group uniphier_pxs2_groups[] = { UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_mii), + UNIPHIER_PINCTRL_GROUP(ether_rgmii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), @@ -860,6 +879,8 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = { UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart0b), UNIPHIER_PINCTRL_GROUP(uart1), @@ -1124,6 +1145,9 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = { }; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_mii_groups[] = {"ether_mii"}; +static const char * const ether_rgmii_groups[] = {"ether_rgmii"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; @@ -1132,6 +1156,8 @@ static const char * const i2c5_groups[] = {"i2c5"}; static const char * const i2c6_groups[] = {"i2c6"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs1"}; static const char * const uart0_groups[] = {"uart0", "uart0b"}; static const char * const uart1_groups[] = {"uart1"}; static const char * const uart2_groups[] = {"uart2"}; @@ -1208,8 +1234,11 @@ static const char * const xirq_groups[] = { "xirq20", "xirq21", "xirq22", "xirq23", }; -static const struct uniphier_pinmux_function proxstream2_functions[] = { +static const struct uniphier_pinmux_function uniphier_pxs2_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_mii), + UNIPHIER_PINMUX_FUNCTION(ether_rgmii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), @@ -1218,6 +1247,7 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = { UNIPHIER_PINMUX_FUNCTION(i2c6), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -1230,43 +1260,36 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata proxstream2_pindata = { - .groups = proxstream2_groups, - .groups_count = ARRAY_SIZE(proxstream2_groups), - .functions = proxstream2_functions, - .functions_count = ARRAY_SIZE(proxstream2_functions), - .mux_bits = 8, - .reg_stride = 4, - .load_pinctrl = false, -}; - -static struct pinctrl_desc proxstream2_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = proxstream2_pins, - .npins = ARRAY_SIZE(proxstream2_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_pxs2_pindata = { + .pins = uniphier_pxs2_pins, + .npins = ARRAY_SIZE(uniphier_pxs2_pins), + .groups = uniphier_pxs2_groups, + .groups_count = ARRAY_SIZE(uniphier_pxs2_groups), + .functions = uniphier_pxs2_functions, + .functions_count = ARRAY_SIZE(uniphier_pxs2_functions), + .caps = 0, }; -static int proxstream2_pinctrl_probe(struct platform_device *pdev) +static int uniphier_pxs2_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc, - &proxstream2_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_pxs2_pindata); } -static const struct of_device_id proxstream2_pinctrl_match[] = { +static const struct of_device_id uniphier_pxs2_pinctrl_match[] = { + { .compatible = "socionext,uniphier-pxs2-pinctrl" }, { .compatible = "socionext,proxstream2-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_pxs2_pinctrl_match); -static struct platform_driver proxstream2_pinctrl_driver = { - .probe = proxstream2_pinctrl_probe, +static struct platform_driver uniphier_pxs2_pinctrl_driver = { + .probe = uniphier_pxs2_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = proxstream2_pinctrl_match, + .name = "uniphier-pxs2-pinctrl", + .of_match_table = uniphier_pxs2_pinctrl_match, }, }; -module_platform_driver(proxstream2_pinctrl_driver); +module_platform_driver(uniphier_pxs2_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c index ceb7a9899bde..da689d880f46 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c @@ -19,453 +19,518 @@ #include "pinctrl-uniphier.h" -#define DRIVER_NAME "ph1-sld8-pinctrl" - -static const struct pinctrl_pin_desc ph1_sld8_pins[] = { +static const struct pinctrl_pin_desc uniphier_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(0, "PCA00", 0, - 15, UNIPHIER_PIN_DRV_4_8, + 15, UNIPHIER_PIN_DRV_1BIT, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "PCA01", 0, - 16, UNIPHIER_PIN_DRV_4_8, + 16, UNIPHIER_PIN_DRV_1BIT, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "PCA02", 0, - 17, UNIPHIER_PIN_DRV_4_8, + 17, UNIPHIER_PIN_DRV_1BIT, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "PCA03", 0, - 18, UNIPHIER_PIN_DRV_4_8, + 18, UNIPHIER_PIN_DRV_1BIT, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "PCA04", 0, - 19, UNIPHIER_PIN_DRV_4_8, + 19, UNIPHIER_PIN_DRV_1BIT, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "PCA05", 0, - 20, UNIPHIER_PIN_DRV_4_8, + 20, UNIPHIER_PIN_DRV_1BIT, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "PCA06", 0, - 21, UNIPHIER_PIN_DRV_4_8, + 21, UNIPHIER_PIN_DRV_1BIT, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "PCA07", 0, - 22, UNIPHIER_PIN_DRV_4_8, + 22, UNIPHIER_PIN_DRV_1BIT, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "PCA08", 0, - 23, UNIPHIER_PIN_DRV_4_8, + 23, UNIPHIER_PIN_DRV_1BIT, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "PCA09", 0, - 24, UNIPHIER_PIN_DRV_4_8, + 24, UNIPHIER_PIN_DRV_1BIT, 24, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "PCA10", 0, - 25, UNIPHIER_PIN_DRV_4_8, + 25, UNIPHIER_PIN_DRV_1BIT, 25, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "PCA11", 0, - 26, UNIPHIER_PIN_DRV_4_8, + 26, UNIPHIER_PIN_DRV_1BIT, 26, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "PCA12", 0, - 27, UNIPHIER_PIN_DRV_4_8, + 27, UNIPHIER_PIN_DRV_1BIT, 27, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(13, "PCA13", 0, - 28, UNIPHIER_PIN_DRV_4_8, + 28, UNIPHIER_PIN_DRV_1BIT, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "PCA14", 0, - 29, UNIPHIER_PIN_DRV_4_8, + 29, UNIPHIER_PIN_DRV_1BIT, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, - 30, UNIPHIER_PIN_DRV_4_8, + 30, UNIPHIER_PIN_DRV_1BIT, 30, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE, - 31, UNIPHIER_PIN_DRV_4_8, + 31, UNIPHIER_PIN_DRV_1BIT, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_4_8, + 32, UNIPHIER_PIN_DRV_1BIT, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE, - 33, UNIPHIER_PIN_DRV_4_8, + 33, UNIPHIER_PIN_DRV_1BIT, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE, - 34, UNIPHIER_PIN_DRV_4_8, + 34, UNIPHIER_PIN_DRV_1BIT, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE, - 35, UNIPHIER_PIN_DRV_4_8, + 35, UNIPHIER_PIN_DRV_1BIT, 35, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_4_8, + 36, UNIPHIER_PIN_DRV_1BIT, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE, - 0, UNIPHIER_PIN_DRV_8_12_16_20, + 0, UNIPHIER_PIN_DRV_2BIT, 119, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE, - 4, UNIPHIER_PIN_DRV_8_12_16_20, + 1, UNIPHIER_PIN_DRV_2BIT, 120, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE, - 8, UNIPHIER_PIN_DRV_8_12_16_20, + 2, UNIPHIER_PIN_DRV_2BIT, 121, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE, - 12, UNIPHIER_PIN_DRV_8_12_16_20, + 3, UNIPHIER_PIN_DRV_2BIT, 122, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE, - 16, UNIPHIER_PIN_DRV_8_12_16_20, + 4, UNIPHIER_PIN_DRV_2BIT, 123, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE, - 20, UNIPHIER_PIN_DRV_8_12_16_20, + 5, UNIPHIER_PIN_DRV_2BIT, 124, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE, - 24, UNIPHIER_PIN_DRV_8_12_16_20, + 6, UNIPHIER_PIN_DRV_2BIT, 125, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE, - 28, UNIPHIER_PIN_DRV_8_12_16_20, + 7, UNIPHIER_PIN_DRV_2BIT, 126, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE, - 32, UNIPHIER_PIN_DRV_8_12_16_20, + 8, UNIPHIER_PIN_DRV_2BIT, 127, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, - 36, UNIPHIER_PIN_DRV_8_12_16_20, + 9, UNIPHIER_PIN_DRV_2BIT, 128, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8, - 40, UNIPHIER_PIN_DRV_8_12_16_20, + 10, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8, - 44, UNIPHIER_PIN_DRV_8_12_16_20, + 11, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8, - 48, UNIPHIER_PIN_DRV_8_12_16_20, + 12, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8, - 52, UNIPHIER_PIN_DRV_8_12_16_20, + 13, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8, - 56, UNIPHIER_PIN_DRV_8_12_16_20, + 14, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8, - 60, UNIPHIER_PIN_DRV_8_12_16_20, + 15, UNIPHIER_PIN_DRV_2BIT, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(38, "SDCD", 8, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 129, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(39, "SDWP", 8, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0, - 37, UNIPHIER_PIN_DRV_4_8, + 37, UNIPHIER_PIN_DRV_1BIT, 37, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0, - 38, UNIPHIER_PIN_DRV_4_8, + 38, UNIPHIER_PIN_DRV_1BIT, 38, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0, - 39, UNIPHIER_PIN_DRV_4_8, + 39, UNIPHIER_PIN_DRV_1BIT, 39, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0, - 40, UNIPHIER_PIN_DRV_4_8, + 40, UNIPHIER_PIN_DRV_1BIT, 40, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0, - 41, UNIPHIER_PIN_DRV_4_8, + 41, UNIPHIER_PIN_DRV_1BIT, 41, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(46, "PCREG", 0, - 42, UNIPHIER_PIN_DRV_4_8, + 42, UNIPHIER_PIN_DRV_1BIT, 42, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0, - 43, UNIPHIER_PIN_DRV_4_8, + 43, UNIPHIER_PIN_DRV_1BIT, 43, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0, - 44, UNIPHIER_PIN_DRV_4_8, + 44, UNIPHIER_PIN_DRV_1BIT, 44, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0, - 45, UNIPHIER_PIN_DRV_4_8, + 45, UNIPHIER_PIN_DRV_1BIT, 45, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0, - 46, UNIPHIER_PIN_DRV_4_8, + 46, UNIPHIER_PIN_DRV_1BIT, 46, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0, - 47, UNIPHIER_PIN_DRV_4_8, + 47, UNIPHIER_PIN_DRV_1BIT, 47, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0, - 48, UNIPHIER_PIN_DRV_4_8, + 48, UNIPHIER_PIN_DRV_1BIT, 48, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0, - 49, UNIPHIER_PIN_DRV_4_8, + 49, UNIPHIER_PIN_DRV_1BIT, 49, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(54, "PCWE", 0, - 50, UNIPHIER_PIN_DRV_4_8, + 50, UNIPHIER_PIN_DRV_1BIT, 50, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(55, "PCOE", 0, - 51, UNIPHIER_PIN_DRV_4_8, + 51, UNIPHIER_PIN_DRV_1BIT, 51, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0, - 52, UNIPHIER_PIN_DRV_4_8, + 52, UNIPHIER_PIN_DRV_1BIT, 52, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0, - 53, UNIPHIER_PIN_DRV_4_8, + 53, UNIPHIER_PIN_DRV_1BIT, 53, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0, - 54, UNIPHIER_PIN_DRV_4_8, + 54, UNIPHIER_PIN_DRV_1BIT, 54, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0, - 55, UNIPHIER_PIN_DRV_4_8, + 55, UNIPHIER_PIN_DRV_1BIT, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0, - 56, UNIPHIER_PIN_DRV_4_8, + 56, UNIPHIER_PIN_DRV_1BIT, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0, - 57, UNIPHIER_PIN_DRV_4_8, + 57, UNIPHIER_PIN_DRV_1BIT, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0, - 58, UNIPHIER_PIN_DRV_4_8, + 58, UNIPHIER_PIN_DRV_1BIT, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0, - 59, UNIPHIER_PIN_DRV_4_8, + 59, UNIPHIER_PIN_DRV_1BIT, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0, - 60, UNIPHIER_PIN_DRV_4_8, + 60, UNIPHIER_PIN_DRV_1BIT, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0, - 61, UNIPHIER_PIN_DRV_4_8, + 61, UNIPHIER_PIN_DRV_1BIT, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0, - 62, UNIPHIER_PIN_DRV_4_8, + 62, UNIPHIER_PIN_DRV_1BIT, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0, - 63, UNIPHIER_PIN_DRV_4_8, + 63, UNIPHIER_PIN_DRV_1BIT, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0, - 64, UNIPHIER_PIN_DRV_4_8, + 64, UNIPHIER_PIN_DRV_1BIT, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0, - 65, UNIPHIER_PIN_DRV_4_8, + 65, UNIPHIER_PIN_DRV_1BIT, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0, - 66, UNIPHIER_PIN_DRV_4_8, + 66, UNIPHIER_PIN_DRV_1BIT, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0, - 67, UNIPHIER_PIN_DRV_4_8, + 67, UNIPHIER_PIN_DRV_1BIT, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0, - 68, UNIPHIER_PIN_DRV_4_8, + 68, UNIPHIER_PIN_DRV_1BIT, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0, - 69, UNIPHIER_PIN_DRV_4_8, + 69, UNIPHIER_PIN_DRV_1BIT, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0, - 70, UNIPHIER_PIN_DRV_4_8, + 70, UNIPHIER_PIN_DRV_1BIT, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0, - 71, UNIPHIER_PIN_DRV_4_8, + 71, UNIPHIER_PIN_DRV_1BIT, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0, - 72, UNIPHIER_PIN_DRV_4_8, + 72, UNIPHIER_PIN_DRV_1BIT, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0, - 73, UNIPHIER_PIN_DRV_4_8, + 73, UNIPHIER_PIN_DRV_1BIT, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0, - 74, UNIPHIER_PIN_DRV_4_8, + 74, UNIPHIER_PIN_DRV_1BIT, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0, - 75, UNIPHIER_PIN_DRV_4_8, + 75, UNIPHIER_PIN_DRV_1BIT, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0, - 76, UNIPHIER_PIN_DRV_4_8, + 76, UNIPHIER_PIN_DRV_1BIT, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0, - 77, UNIPHIER_PIN_DRV_4_8, + 77, UNIPHIER_PIN_DRV_1BIT, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0, - 78, UNIPHIER_PIN_DRV_4_8, + 78, UNIPHIER_PIN_DRV_1BIT, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0, - 79, UNIPHIER_PIN_DRV_4_8, + 79, UNIPHIER_PIN_DRV_1BIT, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0, - 80, UNIPHIER_PIN_DRV_4_8, + 80, UNIPHIER_PIN_DRV_1BIT, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0, - 81, UNIPHIER_PIN_DRV_4_8, + 81, UNIPHIER_PIN_DRV_1BIT, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0, - 82, UNIPHIER_PIN_DRV_4_8, + 82, UNIPHIER_PIN_DRV_1BIT, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0, - 83, UNIPHIER_PIN_DRV_4_8, + 83, UNIPHIER_PIN_DRV_1BIT, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0, - 84, UNIPHIER_PIN_DRV_4_8, + 84, UNIPHIER_PIN_DRV_1BIT, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0, - 85, UNIPHIER_PIN_DRV_4_8, + 85, UNIPHIER_PIN_DRV_1BIT, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0, - 86, UNIPHIER_PIN_DRV_4_8, + 86, UNIPHIER_PIN_DRV_1BIT, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0, - 87, UNIPHIER_PIN_DRV_4_8, + 87, UNIPHIER_PIN_DRV_1BIT, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "AGCI", 3, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "AGCR", 4, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0, - 88, UNIPHIER_PIN_DRV_4_8, + 88, UNIPHIER_PIN_DRV_1BIT, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0, - 89, UNIPHIER_PIN_DRV_4_8, + 89, UNIPHIER_PIN_DRV_1BIT, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, - 90, UNIPHIER_PIN_DRV_4_8, + 90, UNIPHIER_PIN_DRV_1BIT, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE, - 91, UNIPHIER_PIN_DRV_4_8, + 91, UNIPHIER_PIN_DRV_1BIT, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE, - 92, UNIPHIER_PIN_DRV_4_8, + 92, UNIPHIER_PIN_DRV_1BIT, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, - 93, UNIPHIER_PIN_DRV_4_8, + 93, UNIPHIER_PIN_DRV_1BIT, 93, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, - 94, UNIPHIER_PIN_DRV_4_8, + 94, UNIPHIER_PIN_DRV_1BIT, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "SDA0", 10, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(103, "SCL0", 10, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(104, "SDA1", 11, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(105, "SCL1", 11, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13, - -1, UNIPHIER_PIN_DRV_FIXED_4, + -1, UNIPHIER_PIN_DRV_FIXED4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, - 95, UNIPHIER_PIN_DRV_4_8, + 95, UNIPHIER_PIN_DRV_1BIT, 95, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, - 96, UNIPHIER_PIN_DRV_4_8, + 96, UNIPHIER_PIN_DRV_1BIT, 96, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(112, "SBO1", 0, - 97, UNIPHIER_PIN_DRV_4_8, + 97, UNIPHIER_PIN_DRV_1BIT, 97, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(113, "SBI1", 0, - 98, UNIPHIER_PIN_DRV_4_8, + 98, UNIPHIER_PIN_DRV_1BIT, 98, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(114, "TXD1", 0, - 99, UNIPHIER_PIN_DRV_4_8, + 99, UNIPHIER_PIN_DRV_1BIT, 99, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "RXD1", 0, - 100, UNIPHIER_PIN_DRV_4_8, + 100, UNIPHIER_PIN_DRV_1BIT, 100, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "HIN", 1, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(117, "VIN", 2, - -1, UNIPHIER_PIN_DRV_FIXED_5, + -1, UNIPHIER_PIN_DRV_FIXED5, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(118, "TCON0", 0, - 101, UNIPHIER_PIN_DRV_4_8, + 101, UNIPHIER_PIN_DRV_1BIT, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(119, "TCON1", 0, - 102, UNIPHIER_PIN_DRV_4_8, + 102, UNIPHIER_PIN_DRV_1BIT, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(120, "TCON2", 0, - 103, UNIPHIER_PIN_DRV_4_8, + 103, UNIPHIER_PIN_DRV_1BIT, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "TCON3", 0, - 104, UNIPHIER_PIN_DRV_4_8, + 104, UNIPHIER_PIN_DRV_1BIT, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "TCON4", 0, - 105, UNIPHIER_PIN_DRV_4_8, + 105, UNIPHIER_PIN_DRV_1BIT, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "TCON5", 0, - 106, UNIPHIER_PIN_DRV_4_8, + 106, UNIPHIER_PIN_DRV_1BIT, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "TCON6", 0, - 107, UNIPHIER_PIN_DRV_4_8, + 107, UNIPHIER_PIN_DRV_1BIT, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "TCON7", 0, - 108, UNIPHIER_PIN_DRV_4_8, + 108, UNIPHIER_PIN_DRV_1BIT, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(126, "TCON8", 0, - 109, UNIPHIER_PIN_DRV_4_8, + 109, UNIPHIER_PIN_DRV_1BIT, 109, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "PWMA", 0, - 110, UNIPHIER_PIN_DRV_4_8, + 110, UNIPHIER_PIN_DRV_1BIT, 110, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0, - 111, UNIPHIER_PIN_DRV_4_8, + 111, UNIPHIER_PIN_DRV_1BIT, 111, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0, - 112, UNIPHIER_PIN_DRV_4_8, + 112, UNIPHIER_PIN_DRV_1BIT, 112, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0, - 113, UNIPHIER_PIN_DRV_4_8, + 113, UNIPHIER_PIN_DRV_1BIT, 113, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0, - 114, UNIPHIER_PIN_DRV_4_8, + 114, UNIPHIER_PIN_DRV_1BIT, 114, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0, - 115, UNIPHIER_PIN_DRV_4_8, + 115, UNIPHIER_PIN_DRV_1BIT, 115, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0, - 116, UNIPHIER_PIN_DRV_4_8, + 116, UNIPHIER_PIN_DRV_1BIT, 116, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0, - 117, UNIPHIER_PIN_DRV_4_8, + 117, UNIPHIER_PIN_DRV_1BIT, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0, - 118, UNIPHIER_PIN_DRV_4_8, + 118, UNIPHIER_PIN_DRV_1BIT, 118, UNIPHIER_PIN_PULL_DOWN), + /* dedicated pins */ + UNIPHIER_PINCTRL_PIN(136, "ED0", -1, + 0, UNIPHIER_PIN_DRV_1BIT, + 0, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(137, "ED1", -1, + 1, UNIPHIER_PIN_DRV_1BIT, + 1, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(138, "ED2", -1, + 2, UNIPHIER_PIN_DRV_1BIT, + 2, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(139, "ED3", -1, + 3, UNIPHIER_PIN_DRV_1BIT, + 3, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(140, "ED4", -1, + 4, UNIPHIER_PIN_DRV_1BIT, + 4, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(141, "ED5", -1, + 5, UNIPHIER_PIN_DRV_1BIT, + 5, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(142, "ED6", -1, + 6, UNIPHIER_PIN_DRV_1BIT, + 6, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(143, "ED7", -1, + 7, UNIPHIER_PIN_DRV_1BIT, + 7, UNIPHIER_PIN_PULL_DOWN), + UNIPHIER_PINCTRL_PIN(144, "XERWE0", -1, + 8, UNIPHIER_PIN_DRV_1BIT, + 8, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(145, "XERWE1", -1, + 9, UNIPHIER_PIN_DRV_1BIT, + 9, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(146, "ERXW", -1, + 10, UNIPHIER_PIN_DRV_1BIT, + 10, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(147, "ES0", -1, + 11, UNIPHIER_PIN_DRV_1BIT, + 11, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(148, "ES1", -1, + 12, UNIPHIER_PIN_DRV_1BIT, + 12, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(149, "ES2", -1, + 13, UNIPHIER_PIN_DRV_1BIT, + 13, UNIPHIER_PIN_PULL_UP), + UNIPHIER_PINCTRL_PIN(150, "XECS1", -1, + 14, UNIPHIER_PIN_DRV_1BIT, + 14, UNIPHIER_PIN_PULL_DOWN), }; static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27}; -static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; +static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31}; -static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const int emmc_dat8_muxvals[] = {1, 1, 1, 1}; +static const unsigned ether_mii_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 14, + 61, 63, 64, 65, 66, 67, 68}; +static const int ether_mii_muxvals[] = {13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 27, 27, 27, 27, 27, 27, 27}; +static const unsigned ether_rmii_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13, + 14}; +static const int ether_rmii_muxvals[] = {13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13}; static const unsigned i2c0_pins[] = {102, 103}; -static const unsigned i2c0_muxvals[] = {0, 0}; +static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {104, 105}; -static const unsigned i2c1_muxvals[] = {0, 0}; +static const int i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {108, 109}; -static const unsigned i2c2_muxvals[] = {2, 2}; +static const int i2c2_muxvals[] = {2, 2}; static const unsigned i2c3_pins[] = {108, 109}; -static const unsigned i2c3_muxvals[] = {3, 3}; +static const int i2c3_muxvals[] = {3, 3}; static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26, 27, 28, 29, 30, 31}; -static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0}; +static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {22, 23}; -static const unsigned nand_cs1_muxvals[] = {0, 0}; +static const int nand_cs1_muxvals[] = {0, 0}; static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40}; -static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; +static const unsigned system_bus_pins[] = {136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149}; +static const int system_bus_muxvals[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1}; +static const unsigned system_bus_cs1_pins[] = {150}; +static const int system_bus_cs1_muxvals[] = {-1}; +static const unsigned system_bus_cs2_pins[] = {10}; +static const int system_bus_cs2_muxvals[] = {1}; +static const unsigned system_bus_cs3_pins[] = {11}; +static const int system_bus_cs3_muxvals[] = {1}; +static const unsigned system_bus_cs4_pins[] = {12}; +static const int system_bus_cs4_muxvals[] = {1}; +static const unsigned system_bus_cs5_pins[] = {13}; +static const int system_bus_cs5_muxvals[] = {1}; static const unsigned uart0_pins[] = {70, 71}; -static const unsigned uart0_muxvals[] = {3, 3}; +static const int uart0_muxvals[] = {3, 3}; static const unsigned uart1_pins[] = {114, 115}; -static const unsigned uart1_muxvals[] = {0, 0}; +static const int uart1_muxvals[] = {0, 0}; static const unsigned uart2_pins[] = {112, 113}; -static const unsigned uart2_muxvals[] = {1, 1}; +static const int uart2_muxvals[] = {1, 1}; static const unsigned uart3_pins[] = {110, 111}; -static const unsigned uart3_muxvals[] = {1, 1}; +static const int uart3_muxvals[] = {1, 1}; static const unsigned usb0_pins[] = {41, 42}; -static const unsigned usb0_muxvals[] = {0, 0}; +static const int usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {43, 44}; -static const unsigned usb1_muxvals[] = {0, 0}; +static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {114, 115}; -static const unsigned usb2_muxvals[] = {1, 1}; +static const int usb2_muxvals[] = {1, 1}; static const unsigned port_range0_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, /* PORT0x */ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT1x */ @@ -481,7 +546,7 @@ static const unsigned port_range0_pins[] = { 48, 49, 46, 45, 123, 124, 125, 126, /* PORT11x */ 47, 127, 20, 56, 22, /* PORT120-124 */ }; -static const unsigned port_range0_muxvals[] = { +static const int port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ @@ -499,39 +564,41 @@ static const unsigned port_range0_muxvals[] = { static const unsigned port_range1_pins[] = { 116, 117, /* PORT130-131 */ }; -static const unsigned port_range1_muxvals[] = { +static const int port_range1_muxvals[] = { 15, 15, /* PORT130-131 */ }; static const unsigned port_range2_pins[] = { 102, 103, 104, 105, 106, 107, 108, 109, /* PORT14x */ }; -static const unsigned port_range2_muxvals[] = { +static const int port_range2_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ }; static const unsigned port_range3_pins[] = { 23, /* PORT166 */ }; -static const unsigned port_range3_muxvals[] = { +static const int port_range3_muxvals[] = { 15, /* PORT166 */ }; static const unsigned xirq_range0_pins[] = { 128, 129, 130, 131, 132, 133, 134, 135, /* XIRQ0-7 */ 82, 87, 88, 50, 51, /* XIRQ8-12 */ }; -static const unsigned xirq_range0_muxvals[] = { +static const int xirq_range0_muxvals[] = { 0, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */ 14, 14, 14, 14, 14, /* XIRQ8-12 */ }; static const unsigned xirq_range1_pins[] = { 52, 58, /* XIRQ14-15 */ }; -static const unsigned xirq_range1_muxvals[] = { +static const int xirq_range1_muxvals[] = { 14, 14, /* XIRQ14-15 */ }; -static const struct uniphier_pinctrl_group ph1_sld8_groups[] = { +static const struct uniphier_pinctrl_group uniphier_sld8_groups[] = { UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), + UNIPHIER_PINCTRL_GROUP(ether_mii), + UNIPHIER_PINCTRL_GROUP(ether_rmii), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), @@ -539,6 +606,12 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = { UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(sd), + UNIPHIER_PINCTRL_GROUP(system_bus), + UNIPHIER_PINCTRL_GROUP(system_bus_cs1), + UNIPHIER_PINCTRL_GROUP(system_bus_cs2), + UNIPHIER_PINCTRL_GROUP(system_bus_cs3), + UNIPHIER_PINCTRL_GROUP(system_bus_cs4), + UNIPHIER_PINCTRL_GROUP(system_bus_cs5), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart1), UNIPHIER_PINCTRL_GROUP(uart2), @@ -682,12 +755,20 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = { }; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; +static const char * const ether_mii_groups[] = {"ether_mii"}; +static const char * const ether_rmii_groups[] = {"ether_rmii"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; static const char * const i2c3_groups[] = {"i2c3"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const sd_groups[] = {"sd"}; +static const char * const system_bus_groups[] = {"system_bus", + "system_bus_cs1", + "system_bus_cs2", + "system_bus_cs3", + "system_bus_cs4", + "system_bus_cs5"}; static const char * const uart0_groups[] = {"uart0"}; static const char * const uart1_groups[] = {"uart1"}; static const char * const uart2_groups[] = {"uart2"}; @@ -736,14 +817,17 @@ static const char * const xirq_groups[] = { "xirq12", /* none*/ "xirq14", "xirq15", }; -static const struct uniphier_pinmux_function ph1_sld8_functions[] = { +static const struct uniphier_pinmux_function uniphier_sld8_functions[] = { UNIPHIER_PINMUX_FUNCTION(emmc), + UNIPHIER_PINMUX_FUNCTION(ether_mii), + UNIPHIER_PINMUX_FUNCTION(ether_rmii), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), UNIPHIER_PINMUX_FUNCTION(i2c3), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(sd), + UNIPHIER_PINMUX_FUNCTION(system_bus), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), @@ -755,43 +839,36 @@ static const struct uniphier_pinmux_function ph1_sld8_functions[] = { UNIPHIER_PINMUX_FUNCTION(xirq), }; -static struct uniphier_pinctrl_socdata ph1_sld8_pindata = { - .groups = ph1_sld8_groups, - .groups_count = ARRAY_SIZE(ph1_sld8_groups), - .functions = ph1_sld8_functions, - .functions_count = ARRAY_SIZE(ph1_sld8_functions), - .mux_bits = 8, - .reg_stride = 4, - .load_pinctrl = false, -}; - -static struct pinctrl_desc ph1_sld8_pinctrl_desc = { - .name = DRIVER_NAME, - .pins = ph1_sld8_pins, - .npins = ARRAY_SIZE(ph1_sld8_pins), - .owner = THIS_MODULE, +static struct uniphier_pinctrl_socdata uniphier_sld8_pindata = { + .pins = uniphier_sld8_pins, + .npins = ARRAY_SIZE(uniphier_sld8_pins), + .groups = uniphier_sld8_groups, + .groups_count = ARRAY_SIZE(uniphier_sld8_groups), + .functions = uniphier_sld8_functions, + .functions_count = ARRAY_SIZE(uniphier_sld8_functions), + .caps = 0, }; -static int ph1_sld8_pinctrl_probe(struct platform_device *pdev) +static int uniphier_sld8_pinctrl_probe(struct platform_device *pdev) { - return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc, - &ph1_sld8_pindata); + return uniphier_pinctrl_probe(pdev, &uniphier_sld8_pindata); } -static const struct of_device_id ph1_sld8_pinctrl_match[] = { +static const struct of_device_id uniphier_sld8_pinctrl_match[] = { + { .compatible = "socionext,uniphier-sld8-pinctrl" }, { .compatible = "socionext,ph1-sld8-pinctrl" }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match); +MODULE_DEVICE_TABLE(of, uniphier_sld8_pinctrl_match); -static struct platform_driver ph1_sld8_pinctrl_driver = { - .probe = ph1_sld8_pinctrl_probe, +static struct platform_driver uniphier_sld8_pinctrl_driver = { + .probe = uniphier_sld8_pinctrl_probe, .driver = { - .name = DRIVER_NAME, - .of_match_table = ph1_sld8_pinctrl_match, + .name = "uniphier-sld8-pinctrl", + .of_match_table = uniphier_sld8_pinctrl_match, }, }; -module_platform_driver(ph1_sld8_pinctrl_driver); +module_platform_driver(uniphier_sld8_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h index a21154f4b453..923f36cb245d 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h @@ -15,14 +15,18 @@ #ifndef __PINCTRL_UNIPHIER_H__ #define __PINCTRL_UNIPHIER_H__ +#include <linux/bitops.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> +struct platform_device; + #define UNIPHIER_PINCTRL_PINMUX_BASE 0x0 #define UNIPHIER_PINCTRL_LOAD_PINMUX 0x700 #define UNIPHIER_PINCTRL_DRVCTRL_BASE 0x800 #define UNIPHIER_PINCTRL_DRV2CTRL_BASE 0x900 +#define UNIPHIER_PINCTRL_DRV3CTRL_BASE 0x980 #define UNIPHIER_PINCTRL_PUPDCTRL_BASE 0xa00 #define UNIPHIER_PINCTRL_IECTRL 0xd00 @@ -39,16 +43,16 @@ #define UNIPHIER_PIN_DRVCTRL_MASK ((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \ - 1) -/* supported drive strength (mA) */ -#define UNIPHIER_PIN_DRV_STR_SHIFT ((UNIPHIER_PIN_DRVCTRL_SHIFT) + \ +/* drive control type */ +#define UNIPHIER_PIN_DRV_TYPE_SHIFT ((UNIPHIER_PIN_DRVCTRL_SHIFT) + \ (UNIPHIER_PIN_DRVCTRL_BITS)) -#define UNIPHIER_PIN_DRV_STR_BITS 3 -#define UNIPHIER_PIN_DRV_STR_MASK ((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \ +#define UNIPHIER_PIN_DRV_TYPE_BITS 3 +#define UNIPHIER_PIN_DRV_TYPE_MASK ((1UL << (UNIPHIER_PIN_DRV_TYPE_BITS)) \ - 1) /* pull-up / pull-down register number */ -#define UNIPHIER_PIN_PUPDCTRL_SHIFT ((UNIPHIER_PIN_DRV_STR_SHIFT) + \ - (UNIPHIER_PIN_DRV_STR_BITS)) +#define UNIPHIER_PIN_PUPDCTRL_SHIFT ((UNIPHIER_PIN_DRV_TYPE_SHIFT) + \ + (UNIPHIER_PIN_DRV_TYPE_BITS)) #define UNIPHIER_PIN_PUPDCTRL_BITS 9 #define UNIPHIER_PIN_PUPDCTRL_MASK ((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\ - 1) @@ -66,13 +70,14 @@ #define UNIPHIER_PIN_IECTRL_NONE (UNIPHIER_PIN_IECTRL_MASK) -/* selectable drive strength */ -enum uniphier_pin_drv_str { - UNIPHIER_PIN_DRV_4_8, /* 2 level control: 4/8 mA */ - UNIPHIER_PIN_DRV_8_12_16_20, /* 4 level control: 8/12/16/20 mA */ - UNIPHIER_PIN_DRV_FIXED_4, /* fixed to 4mA */ - UNIPHIER_PIN_DRV_FIXED_5, /* fixed to 5mA */ - UNIPHIER_PIN_DRV_FIXED_8, /* fixed to 8mA */ +/* drive control type */ +enum uniphier_pin_drv_type { + UNIPHIER_PIN_DRV_1BIT, /* 2 level control: 4/8 mA */ + UNIPHIER_PIN_DRV_2BIT, /* 4 level control: 8/12/16/20 mA */ + UNIPHIER_PIN_DRV_3BIT, /* 8 level control: 4/5/7/9/11/12/14/16 mA */ + UNIPHIER_PIN_DRV_FIXED4, /* fixed to 4mA */ + UNIPHIER_PIN_DRV_FIXED5, /* fixed to 5mA */ + UNIPHIER_PIN_DRV_FIXED8, /* fixed to 8mA */ UNIPHIER_PIN_DRV_NONE, /* no support (input only pin) */ }; @@ -89,17 +94,17 @@ enum uniphier_pin_pull_dir { (((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT)) #define UNIPHIER_PIN_DRVCTRL(x) \ (((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT)) -#define UNIPHIER_PIN_DRV_STR(x) \ - (((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT)) +#define UNIPHIER_PIN_DRV_TYPE(x) \ + (((x) & (UNIPHIER_PIN_DRV_TYPE_MASK)) << (UNIPHIER_PIN_DRV_TYPE_SHIFT)) #define UNIPHIER_PIN_PUPDCTRL(x) \ (((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT)) #define UNIPHIER_PIN_PULL_DIR(x) \ (((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT)) -#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\ +#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_type, pupdctrl, pull_dir)\ (UNIPHIER_PIN_IECTRL(iectrl) | \ UNIPHIER_PIN_DRVCTRL(drvctrl) | \ - UNIPHIER_PIN_DRV_STR(drv_str) | \ + UNIPHIER_PIN_DRV_TYPE(drv_type) | \ UNIPHIER_PIN_PUPDCTRL(pupdctrl) | \ UNIPHIER_PIN_PULL_DIR(pull_dir)) @@ -115,10 +120,10 @@ static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data) UNIPHIER_PIN_DRVCTRL_MASK; } -static inline unsigned int uniphier_pin_get_drv_str(void *drv_data) +static inline unsigned int uniphier_pin_get_drv_type(void *drv_data) { - return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) & - UNIPHIER_PIN_DRV_STR_MASK; + return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_TYPE_SHIFT) & + UNIPHIER_PIN_DRV_TYPE_MASK; } static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data) @@ -143,7 +148,7 @@ struct uniphier_pinctrl_group { const char *name; const unsigned *pins; unsigned num_pins; - const unsigned *muxvals; + const int *muxvals; enum uniphier_pinmux_gpio_range_type range_type; }; @@ -154,13 +159,15 @@ struct uniphier_pinmux_function { }; struct uniphier_pinctrl_socdata { + const struct pinctrl_pin_desc *pins; + unsigned int npins; const struct uniphier_pinctrl_group *groups; int groups_count; const struct uniphier_pinmux_function *functions; int functions_count; - unsigned mux_bits; - unsigned reg_stride; - bool load_pinctrl; + unsigned int caps; +#define UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL BIT(1) +#define UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE BIT(0) }; #define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g) \ @@ -205,11 +212,7 @@ struct uniphier_pinctrl_socdata { .num_groups = ARRAY_SIZE(func##_groups), \ } -struct platform_device; -struct pinctrl_desc; - int uniphier_pinctrl_probe(struct platform_device *pdev, - struct pinctrl_desc *desc, struct uniphier_pinctrl_socdata *socdata); #endif /* __PINCTRL_UNIPHIER_H__ */ diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 4034d2d4c507..a66be137324c 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -31,19 +31,21 @@ /** * DOC: Overview * - * :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx - * :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp - * * gmux is a microcontroller built into the MacBook Pro to support dual GPUs: - * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas. + * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas. * * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has * dual GPUs but no built-in display.) * * gmux is connected to the LPC bus of the southbridge. Its I/O ports are * accessed differently depending on the microcontroller: Driver functions - * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux - * are infixed `_index_`. + * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux + * are infixed ``_index_``. + * + * .. _Lattice XP2: + * http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx + * .. _Renesas R4F2113: + * http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp */ struct apple_gmux_data { @@ -272,15 +274,15 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) /** * DOC: Backlight control * - * :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf - * :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf - * * On single GPU MacBooks, the PWM signal for the backlight is generated by * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended * to conserve energy. Hence the PWM signal needs to be generated by a separate * backlight driver which is controlled by gmux. The earliest generation - * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models - * use a {4}[TI LP8545]. + * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models + * use a `TI LP8545`_. + * + * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf + * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf */ static int gmux_get_brightness(struct backlight_device *bd) @@ -312,28 +314,20 @@ static const struct backlight_ops gmux_bl_ops = { /** * DOC: Graphics mux * - * :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf - * :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf - * :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf - * :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf - * :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf - * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf - * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf - * * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes * either of them to the panel. One of the tricks gmux has up its sleeve is * to lengthen the blanking interval of its output during a switch to * synchronize it with the GPU switched to. This allows for a flicker-free - * switch that is imperceptible by the user ({5}[US 8,687,007 B2]). + * switch that is imperceptible by the user (`US 8,687,007 B2`_). * * On retinas, muxing is no longer done by gmux itself, but by a separate * chip which is controlled by gmux. The chip is triple sourced, it is - * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412]. + * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_. * The panel is driven with eDP instead of LVDS since the pixel clock * required for retina resolution exceeds LVDS' limits. * * Pre-retinas are able to switch the panel's DDC pins separately. - * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux. + * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux. * The inactive GPU can thus probe the panel's EDID without switching over * the entire panel. Retinas lack this functionality as the chips used for * eDP muxing are incapable of switching the AUX channel separately (see @@ -344,15 +338,15 @@ static const struct backlight_ops gmux_bl_ops = { * * The external DP port is only fully switchable on the first two unibody * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an - * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the + * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s. * * The following MacBook Pro generations replaced the external DP port with a * combined DP/Thunderbolt port and lost the ability to switch it between GPUs, * connecting it either to the discrete GPU or the Thunderbolt controller. * Oddly enough, while the full port is no longer switchable, AUX and HPD - * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas - * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the + * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas + * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the * control of gmux. Since the integrated GPU is missing the main link, * external displays appear to it as phantoms which fail to link-train. * @@ -365,10 +359,19 @@ static const struct backlight_ops gmux_bl_ops = { * of this feature. * * gmux' initial switch state on bootup is user configurable via the EFI - * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte, + * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte, * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to * switch the panel and the external DP connector and allocates a framebuffer * for the selected GPU. + * + * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf + * .. _NXP CBTL06141: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf + * .. _NXP CBTL06142: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf + * .. _TI HD3SS212: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf + * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf + * .. _TI SN74LV4066A: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf + * .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf + * .. _TI TS3DS10224: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf */ static void gmux_read_switch_state(struct apple_gmux_data *gmux_data) diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 6f497e80c9df..b86e1bcaa055 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -85,7 +85,7 @@ * platform device and to export resources for those functions. */ #define TCO_DEVICE_NAME "iTCO_wdt" -#define SMI_EN_OFFSET 0x30 +#define SMI_EN_OFFSET 0x40 #define SMI_EN_SIZE 4 #define TCO_BASE_OFFSET 0x60 #define TCO_REGS_SIZE 16 @@ -94,6 +94,8 @@ #define TELEM_SSRAM_SIZE 240 #define TELEM_PMC_SSRAM_OFFSET 0x1B00 #define TELEM_PUNIT_SSRAM_OFFSET 0x1A00 +#define TCO_PMC_OFFSET 0x8 +#define TCO_PMC_SIZE 0x4 static const int iTCO_version = 3; @@ -502,7 +504,7 @@ static struct resource tco_res[] = { static struct itco_wdt_platform_data tco_info = { .name = "Apollo Lake SoC", - .version = 3, + .version = 5, }; #define TELEMETRY_RESOURCE_PUNIT_SSRAM 0 @@ -572,8 +574,8 @@ static int ipc_create_tco_device(void) res->end = res->start + SMI_EN_SIZE - 1; res = tco_res + TCO_RESOURCE_GCR_MEM; - res->start = ipcdev.gcr_base; - res->end = res->start + ipcdev.gcr_size - 1; + res->start = ipcdev.gcr_base + TCO_PMC_OFFSET; + res->end = res->start + TCO_PMC_SIZE - 1; ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); if (ret) { diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 7053abced0bc..3bfac539334b 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -46,6 +46,16 @@ config POWER_RESET_AXXIA Say Y if you have an Axxia family SoC. +config POWER_RESET_BRCMKONA + bool "Broadcom Kona reset driver" + depends on ARM || COMPILE_TEST + default ARCH_BCM_MOBILE + help + This driver provides restart support for Broadcom Kona chips. + + Say Y here if you have a Broadcom Kona-based board and you wish + to have restart support. + config POWER_RESET_BRCMSTB bool "Broadcom STB reset driver" depends on ARM || MIPS || COMPILE_TEST diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index d6b2560d5c4a..1be307c7fc25 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o obj-$(CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC) += at91-sama5d2_shdwc.o obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o +obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c new file mode 100644 index 000000000000..8eaa959d8be6 --- /dev/null +++ b/drivers/power/reset/brcm-kona-reset.c @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/reboot.h> + +#define RSTMGR_REG_WR_ACCESS_OFFSET 0 +#define RSTMGR_REG_CHIP_SOFT_RST_OFFSET 4 + +#define RSTMGR_WR_PASSWORD 0xa5a5 +#define RSTMGR_WR_PASSWORD_SHIFT 8 +#define RSTMGR_WR_ACCESS_ENABLE 1 + +static void __iomem *kona_reset_base; + +static int kona_reset_handler(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + /* + * A soft reset is triggered by writing a 0 to bit 0 of the soft reset + * register. To write to that register we must first write the password + * and the enable bit in the write access enable register. + */ + writel((RSTMGR_WR_PASSWORD << RSTMGR_WR_PASSWORD_SHIFT) | + RSTMGR_WR_ACCESS_ENABLE, + kona_reset_base + RSTMGR_REG_WR_ACCESS_OFFSET); + writel(0, kona_reset_base + RSTMGR_REG_CHIP_SOFT_RST_OFFSET); + + return NOTIFY_DONE; +} + +static struct notifier_block kona_reset_nb = { + .notifier_call = kona_reset_handler, + .priority = 128, +}; + +static int kona_reset_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + kona_reset_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(kona_reset_base)) + return PTR_ERR(kona_reset_base); + + return register_restart_handler(&kona_reset_nb); +} + +static const struct of_device_id of_match[] = { + { .compatible = "brcm,bcm21664-resetmgr" }, + {}, +}; + +static struct platform_driver bcm_kona_reset_driver = { + .probe = kona_reset_probe, + .driver = { + .name = "brcm-kona-reset", + .of_match_table = of_match, + }, +}; + +builtin_platform_driver(bcm_kona_reset_driver); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 6a9bf7089373..102f95a09460 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -74,8 +74,8 @@ static ssize_t vexpress_reset_active_store(struct device *dev, return err ? err : count; } -DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show, - vexpress_reset_active_store); +static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show, + vexpress_reset_active_store); enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT }; diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index 7d335422cfda..26ec24e457b1 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -155,7 +155,7 @@ static int clps711x_pwm_remove(struct platform_device *pdev) } static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = { - { .compatible = "cirrus,clps711x-pwm", }, + { .compatible = "cirrus,ep7209-pwm", }, { } }; MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids); diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index 3e95090cd7cf..5ad42f33e70c 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -245,7 +245,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) struct pwm_omap_dmtimer_chip *omap; struct pwm_omap_dmtimer_pdata *pdata; pwm_omap_dmtimer *dm_timer; - u32 prescaler; + u32 v; int status; pdata = dev_get_platdata(&pdev->dev); @@ -306,10 +306,12 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) if (pm_runtime_active(&omap->dm_timer_pdev->dev)) omap->pdata->stop(omap->dm_timer); - /* setup dmtimer prescaler */ - if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler", - &prescaler)) - omap->pdata->set_prescaler(omap->dm_timer, prescaler); + if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler", &v)) + omap->pdata->set_prescaler(omap->dm_timer, v); + + /* setup dmtimer clock source */ + if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v)) + omap->pdata->set_source(omap->dm_timer, v); omap->chip.dev = &pdev->dev; omap->chip.ops = &pwm_omap_dmtimer_ops; diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index b5a10d3c92c7..d6d2f20c4597 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -67,6 +67,15 @@ config RAPIDIO_ENUM_BASIC endchoice +config RAPIDIO_CHMAN + tristate "RapidIO Channelized Messaging driver" + depends on RAPIDIO + help + This option includes RapidIO channelized messaging driver which + provides socket-like interface to allow sharing of single RapidIO + messaging mailbox between multiple user-space applications. + See "Documentation/rapidio/rio_cm.txt" for driver description. + config RAPIDIO_MPORT_CDEV tristate "RapidIO /dev mport device driver" depends on RAPIDIO diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index 6271ada6993f..74dcea45ad49 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_RAPIDIO) += rapidio.o rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o +obj-$(CONFIG_RAPIDIO_CHMAN) += rio_cm.o obj-$(CONFIG_RAPIDIO) += switches/ obj-$(CONFIG_RAPIDIO) += devices/ diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index e165b7ce29d7..436dfe871d32 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1813,7 +1813,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = rval & 0xffff; rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, - hopcount); + hopcount, &rdev->phys_rmap); rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, hopcount, RIO_EFB_ERR_MGMNT); @@ -2242,7 +2242,7 @@ static void mport_mm_open(struct vm_area_struct *vma) { struct rio_mport_mapping *map = vma->vm_private_data; -rmcd_debug(MMAP, "0x%pad", &map->phys_addr); + rmcd_debug(MMAP, "%pad", &map->phys_addr); kref_get(&map->ref); } @@ -2250,7 +2250,7 @@ static void mport_mm_close(struct vm_area_struct *vma) { struct rio_mport_mapping *map = vma->vm_private_data; -rmcd_debug(MMAP, "0x%pad", &map->phys_addr); + rmcd_debug(MMAP, "%pad", &map->phys_addr); mutex_lock(&map->md->buf_mutex); kref_put(&map->ref, mport_release_mapping); mutex_unlock(&map->md->buf_mutex); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index b5b455614f8a..32f0f014a067 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -37,11 +37,20 @@ #include "tsi721.h" #ifdef DEBUG -u32 dbg_level = DBG_INIT | DBG_EXIT; +u32 dbg_level; module_param(dbg_level, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif +static int pcie_mrrs = -1; +module_param(pcie_mrrs, int, S_IRUGO); +MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)"); + +static u8 mbox_sel = 0x0f; +module_param(mbox_sel, byte, S_IRUGO); +MODULE_PARM_DESC(mbox_sel, + "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); + static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); @@ -1081,7 +1090,7 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) * from rstart to lstart. */ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, - u64 rstart, u32 size, u32 flags) + u64 rstart, u64 size, u32 flags) { struct tsi721_device *priv = mport->priv; int i, avail = -1; @@ -1094,6 +1103,10 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, struct tsi721_ib_win_mapping *map = NULL; int ret = -EBUSY; + /* Max IBW size supported by HW is 16GB */ + if (size > 0x400000000UL) + return -EINVAL; + if (direct) { /* Calculate minimal acceptable window size and base address */ @@ -1101,15 +1114,15 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, ibw_start = lstart & ~(ibw_size - 1); tsi_debug(IBW, &priv->pdev->dev, - "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx", + "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx", rstart, &lstart, size, ibw_start); while ((lstart + size) > (ibw_start + ibw_size)) { ibw_size *= 2; ibw_start = lstart & ~(ibw_size - 1); - if (ibw_size > 0x80000000) { /* Limit max size to 2GB */ + /* Check for crossing IBW max size 16GB */ + if (ibw_size > 0x400000000UL) return -EBUSY; - } } loc_start = ibw_start; @@ -1120,7 +1133,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, } else { tsi_debug(IBW, &priv->pdev->dev, - "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x", + "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx", rstart, &lstart, size); if (!is_power_of_2(size) || size < 0x1000 || @@ -1215,7 +1228,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, priv->ibwin_cnt--; tsi_debug(IBW, &priv->pdev->dev, - "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx", + "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx", i, ibw_start, &loc_start, ibw_size); return 0; @@ -1237,7 +1250,7 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, int i; tsi_debug(IBW, &priv->pdev->dev, - "Unmap IBW mapped to PCIe_0x%pad", &lstart); + "Unmap IBW mapped to PCIe_%pad", &lstart); /* Search for matching active inbound translation window */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { @@ -1877,6 +1890,11 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, goto out; } + if ((mbox_sel & (1 << mbox)) == 0) { + rc = -ENODEV; + goto out; + } + priv->omsg_ring[mbox].dev_id = dev_id; priv->omsg_ring[mbox].size = entries; priv->omsg_ring[mbox].sts_rdptr = 0; @@ -2161,6 +2179,11 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, goto out; } + if ((mbox_sel & (1 << mbox)) == 0) { + rc = -ENODEV; + goto out; + } + /* Initialize IB Messaging Ring */ priv->imsg_ring[mbox].dev_id = dev_id; priv->imsg_ring[mbox].size = entries; @@ -2532,11 +2555,11 @@ static int tsi721_query_mport(struct rio_mport *mport, struct tsi721_device *priv = mport->priv; u32 rval; - rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0))); + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0)); if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { - rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0))); + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0)); attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; - rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0))); + rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0)); attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; } else attr->link_speed = RIO_LINK_DOWN; @@ -2650,9 +2673,9 @@ static int tsi721_setup_mport(struct tsi721_device *priv) mport->ops = &tsi721_rio_ops; mport->index = 0; mport->sys_size = 0; /* small system */ - mport->phy_type = RIO_PHY_SERIAL; mport->priv = (void *)priv; mport->phys_efptr = 0x100; + mport->phys_rmap = 1; mport->dev.parent = &pdev->dev; mport->dev.release = tsi721_mport_release; @@ -2840,6 +2863,16 @@ static int tsi721_probe(struct pci_dev *pdev, pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); + /* Override PCIe Maximum Read Request Size setting if requested */ + if (pcie_mrrs >= 0) { + if (pcie_mrrs <= 5) + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12); + else + tsi_info(&pdev->dev, + "Invalid MRRS override value %d", pcie_mrrs); + } + /* Adjust PCIe completion timeout. */ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 5456dbddc929..5941437cbdd1 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -661,7 +661,7 @@ enum dma_rtype { */ #define TSI721_DMA_CHNUM TSI721_DMA_MAXCH -#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ +#define TSI721_DMACH_MAINT 7 /* DMA channel for maint requests */ #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 155cae1e62de..e2a418598129 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -36,18 +36,26 @@ #include "tsi721.h" -#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */ - #ifdef CONFIG_PCI_MSI static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); #endif static int tsi721_submit_sg(struct tsi721_tx_desc *desc); static unsigned int dma_desc_per_channel = 128; -module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO); +module_param(dma_desc_per_channel, uint, S_IRUGO); MODULE_PARM_DESC(dma_desc_per_channel, "Number of DMA descriptors per channel (default: 128)"); +static unsigned int dma_txqueue_sz = 16; +module_param(dma_txqueue_sz, uint, S_IRUGO); +MODULE_PARM_DESC(dma_txqueue_sz, + "DMA Transactions Queue Size (default: 16)"); + +static u8 dma_sel = 0x7f; +module_param(dma_sel, byte, S_IRUGO); +MODULE_PARM_DESC(dma_sel, + "DMA Channel Selection Mask (default: 0x7f = all)"); + static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) { return container_of(chan, struct tsi721_bdma_chan, dchan); @@ -718,6 +726,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) cookie = dma_cookie_assign(txd); desc->status = DMA_IN_PROGRESS; list_add_tail(&desc->desc_node, &bdma_chan->queue); + tsi721_advance_work(bdma_chan, NULL); spin_unlock_bh(&bdma_chan->lock); return cookie; @@ -732,7 +741,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); if (bdma_chan->bd_base) - return TSI721_DMA_TX_QUEUE_SZ; + return dma_txqueue_sz; /* Initialize BDMA channel */ if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { @@ -742,7 +751,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) } /* Allocate queue of transaction descriptors */ - desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), + desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), GFP_ATOMIC); if (!desc) { tsi_err(&dchan->dev->device, @@ -754,7 +763,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) bdma_chan->tx_desc = desc; - for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) { + for (i = 0; i < dma_txqueue_sz; i++) { dma_async_tx_descriptor_init(&desc[i].txd, dchan); desc[i].txd.tx_submit = tsi721_tx_submit; desc[i].txd.flags = DMA_CTRL_ACK; @@ -766,7 +775,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) bdma_chan->active = true; tsi721_bdma_interrupt_enable(bdma_chan, 1); - return TSI721_DMA_TX_QUEUE_SZ; + return dma_txqueue_sz; } static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) @@ -962,7 +971,7 @@ void tsi721_dma_stop_all(struct tsi721_device *priv) int i; for (i = 0; i < TSI721_DMA_MAXCH; i++) { - if (i != TSI721_DMACH_MAINT) + if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) tsi721_dma_stop(&priv->bdma[i]); } } @@ -979,7 +988,7 @@ int tsi721_register_dma(struct tsi721_device *priv) for (i = 0; i < TSI721_DMA_MAXCH; i++) { struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; - if (i == TSI721_DMACH_MAINT) + if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) continue; bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index a63a380809d1..23429bdaca84 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -49,15 +49,6 @@ struct rio_id_table { static int next_destid = 0; static int next_comptag = 1; -static int rio_mport_phys_table[] = { - RIO_EFB_PAR_EP_ID, - RIO_EFB_PAR_EP_REC_ID, - RIO_EFB_SER_EP_ID, - RIO_EFB_SER_EP_REC_ID, - -1, -}; - - /** * rio_destid_alloc - Allocate next available destID for given network * @net: RIO network @@ -380,10 +371,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = result & 0xffff; rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, - hopcount); + hopcount, &rdev->phys_rmap); + pr_debug("RIO: %s Register Map %d device\n", + __func__, rdev->phys_rmap); rdev->em_efptr = rio_mport_get_feature(port, 0, destid, hopcount, RIO_EFB_ERR_MGMNT); + if (!rdev->em_efptr) + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT_HS); } rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, @@ -445,7 +441,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); } else { if (do_enum) - /*Enable Input Output Port (transmitter reviever)*/ + /*Enable Input Output Port (transmitter receiver)*/ rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, @@ -481,10 +477,8 @@ cleanup: /** * rio_sport_is_active- Tests if a switch port has an active connection. - * @port: Master port to send transaction - * @destid: Associated destination ID for switch - * @hopcount: Hopcount to reach switch - * @sport: Switch port number + * @rdev: RapidIO device object + * @sp: Switch port number * * Reads the port error status CSR for a particular switch port to * determine if the port has an active link. Returns @@ -492,31 +486,12 @@ cleanup: * inactive. */ static int -rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) +rio_sport_is_active(struct rio_dev *rdev, int sp) { u32 result = 0; - u32 ext_ftr_ptr; - ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0); - - while (ext_ftr_ptr) { - rio_mport_read_config_32(port, destid, hopcount, - ext_ftr_ptr, &result); - result = RIO_GET_BLOCK_ID(result); - if ((result == RIO_EFB_SER_EP_FREE_ID) || - (result == RIO_EFB_SER_EP_FREE_ID_V13P) || - (result == RIO_EFB_SER_EP_FREC_ID)) - break; - - ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, - ext_ftr_ptr); - } - - if (ext_ftr_ptr) - rio_mport_read_config_32(port, destid, hopcount, - ext_ftr_ptr + - RIO_PORT_N_ERR_STS_CSR(sport), - &result); + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp), + &result); return result & RIO_PORT_N_ERR_STS_PORT_OK; } @@ -655,9 +630,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, cur_destid = next_destid; - if (rio_sport_is_active - (port, RIO_ANY_DESTID(port->sys_size), hopcount, - port_num)) { + if (rio_sport_is_active(rdev, port_num)) { pr_debug( "RIO: scanning device on port %d\n", port_num); @@ -785,8 +758,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) continue; - if (rio_sport_is_active - (port, destid, hopcount, port_num)) { + if (rio_sport_is_active(rdev, port_num)) { pr_debug( "RIO: scanning device on port %d\n", port_num); @@ -831,21 +803,11 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, static int rio_mport_is_active(struct rio_mport *port) { u32 result = 0; - u32 ext_ftr_ptr; - int *entry = rio_mport_phys_table; - - do { - if ((ext_ftr_ptr = - rio_mport_get_feature(port, 1, 0, 0, *entry))) - break; - } while (*++entry >= 0); - - if (ext_ftr_ptr) - rio_local_read_config_32(port, - ext_ftr_ptr + - RIO_PORT_N_ERR_STS_CSR(port->index), - &result); + rio_local_read_config_32(port, + port->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap), + &result); return result & RIO_PORT_N_ERR_STS_PORT_OK; } diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 0dcaa660cba1..37042858c2db 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -268,6 +268,12 @@ int rio_request_inb_mbox(struct rio_mport *mport, mport->inb_msg[mbox].mcback = minb; rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); + if (rc) { + mport->inb_msg[mbox].mcback = NULL; + mport->inb_msg[mbox].res = NULL; + release_resource(res); + kfree(res); + } } else rc = -ENOMEM; @@ -285,13 +291,22 @@ int rio_request_inb_mbox(struct rio_mport *mport, */ int rio_release_inb_mbox(struct rio_mport *mport, int mbox) { - if (mport->ops->close_inb_mbox) { - mport->ops->close_inb_mbox(mport, mbox); + int rc; - /* Release the mailbox resource */ - return release_resource(mport->inb_msg[mbox].res); - } else - return -ENOSYS; + if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res) + return -EINVAL; + + mport->ops->close_inb_mbox(mport, mbox); + mport->inb_msg[mbox].mcback = NULL; + + rc = release_resource(mport->inb_msg[mbox].res); + if (rc) + return rc; + + kfree(mport->inb_msg[mbox].res); + mport->inb_msg[mbox].res = NULL; + + return 0; } /** @@ -336,6 +351,12 @@ int rio_request_outb_mbox(struct rio_mport *mport, mport->outb_msg[mbox].mcback = moutb; rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); + if (rc) { + mport->outb_msg[mbox].mcback = NULL; + mport->outb_msg[mbox].res = NULL; + release_resource(res); + kfree(res); + } } else rc = -ENOMEM; @@ -353,13 +374,22 @@ int rio_request_outb_mbox(struct rio_mport *mport, */ int rio_release_outb_mbox(struct rio_mport *mport, int mbox) { - if (mport->ops->close_outb_mbox) { - mport->ops->close_outb_mbox(mport, mbox); + int rc; - /* Release the mailbox resource */ - return release_resource(mport->outb_msg[mbox].res); - } else - return -ENOSYS; + if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res) + return -EINVAL; + + mport->ops->close_outb_mbox(mport, mbox); + mport->outb_msg[mbox].mcback = NULL; + + rc = release_resource(mport->outb_msg[mbox].res); + if (rc) + return rc; + + kfree(mport->outb_msg[mbox].res); + mport->outb_msg[mbox].res = NULL; + + return 0; } /** @@ -756,10 +786,11 @@ EXPORT_SYMBOL_GPL(rio_unmap_outb_region); * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device + * @rmap: pointer to location to store register map type info */ u32 rio_mport_get_physefb(struct rio_mport *port, int local, - u16 destid, u8 hopcount) + u16 destid, u8 hopcount, u32 *rmap) { u32 ext_ftr_ptr; u32 ftr_header; @@ -777,14 +808,21 @@ rio_mport_get_physefb(struct rio_mport *port, int local, ftr_header = RIO_GET_BLOCK_ID(ftr_header); switch (ftr_header) { - case RIO_EFB_SER_EP_ID_V13P: - case RIO_EFB_SER_EP_REC_ID_V13P: - case RIO_EFB_SER_EP_FREE_ID_V13P: case RIO_EFB_SER_EP_ID: case RIO_EFB_SER_EP_REC_ID: case RIO_EFB_SER_EP_FREE_ID: - case RIO_EFB_SER_EP_FREC_ID: + case RIO_EFB_SER_EP_M1_ID: + case RIO_EFB_SER_EP_SW_M1_ID: + case RIO_EFB_SER_EPF_M1_ID: + case RIO_EFB_SER_EPF_SW_M1_ID: + *rmap = 1; + return ext_ftr_ptr; + case RIO_EFB_SER_EP_M2_ID: + case RIO_EFB_SER_EP_SW_M2_ID: + case RIO_EFB_SER_EPF_M2_ID: + case RIO_EFB_SER_EPF_SW_M2_ID: + *rmap = 2; return ext_ftr_ptr; default: @@ -843,16 +881,16 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) u32 regval; rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), - ®val); + RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), + ®val); if (lock) regval |= RIO_PORT_N_CTL_LOCKOUT; else regval &= ~RIO_PORT_N_CTL_LOCKOUT; rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), - regval); + RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), + regval); return 0; } EXPORT_SYMBOL_GPL(rio_set_port_lockout); @@ -876,6 +914,7 @@ int rio_enable_rx_tx_port(struct rio_mport *port, #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS u32 regval; u32 ext_ftr_ptr; + u32 rmap; /* * enable rx input tx output port @@ -883,34 +922,29 @@ int rio_enable_rx_tx_port(struct rio_mport *port, pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " "%d, port_num = %d)\n", local, destid, hopcount, port_num); - ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, + hopcount, &rmap); if (local) { - rio_local_read_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), + rio_local_read_config_32(port, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), ®val); } else { if (rio_mport_read_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), + ®val) < 0) return -EIO; } - if (regval & RIO_PORT_N_CTL_P_TYP_SER) { - /* serial */ - regval = regval | RIO_PORT_N_CTL_EN_RX_SER - | RIO_PORT_N_CTL_EN_TX_SER; - } else { - /* parallel */ - regval = regval | RIO_PORT_N_CTL_EN_RX_PAR - | RIO_PORT_N_CTL_EN_TX_PAR; - } + regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX; if (local) { - rio_local_write_config_32(port, ext_ftr_ptr + - RIO_PORT_N_CTL_CSR(0), regval); + rio_local_write_config_32(port, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval); } else { if (rio_mport_write_config_32(port, destid, hopcount, - ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), + regval) < 0) return -EIO; } #endif @@ -1012,14 +1046,14 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) /* Read from link maintenance response register * to clear valid bit */ rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), ®val); udelay(50); } /* Issue Input-status command */ rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), + RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum), RIO_MNT_REQ_CMD_IS); /* Exit if the response is not expected */ @@ -1030,7 +1064,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) while (checkcount--) { udelay(50); rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), ®val); if (regval & RIO_PORT_N_MNT_RSP_RVAL) { *lnkresp = regval; @@ -1046,6 +1080,13 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) * @rdev: Pointer to RIO device control structure * @pnum: Switch port number to clear errors * @err_status: port error status (if 0 reads register from device) + * + * TODO: Currently this routine is not compatible with recovery process + * specified for idt_gen3 RapidIO switch devices. It has to be reviewed + * to implement universal recovery process that is compatible full range + * off available devices. + * IDT gen3 switch driver now implements HW-specific error handler that + * issues soft port reset to the port to reset ERR_STOP bits and ackIDs. */ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) { @@ -1055,10 +1096,10 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) if (err_status == 0) rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); - if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) { + if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) { pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); /* * Send a Link-Request/Input-Status control symbol @@ -1073,7 +1114,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), ®val); pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; @@ -1091,43 +1132,43 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) * far inbound. */ rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), (near_ackid << 24) | (far_ackid << 8) | far_ackid); /* Align far outstanding/outbound ackIDs with * near inbound. */ far_ackid++; - if (nextdev) - rio_write_config_32(nextdev, - nextdev->phys_efptr + - RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)), - (far_ackid << 24) | - (near_ackid << 8) | near_ackid); - else - pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); + if (!nextdev) { + pr_debug("RIO_EM: nextdev pointer == NULL\n"); + goto rd_err; + } + + rio_write_config_32(nextdev, + RIO_DEV_PORT_N_ACK_STS_CSR(nextdev, + RIO_GET_PORT_NUM(nextdev->swpinfo)), + (far_ackid << 24) | + (near_ackid << 8) | near_ackid); } rd_err: - rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), - &err_status); + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } - if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) { + if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) { pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); rio_get_input_status(nextdev, RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); udelay(50); - rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), - &err_status); + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } - return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | - RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0; + return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0; } /** @@ -1227,9 +1268,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) rdev->rswitch->ops->em_handle(rdev, portnum); - rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), - &err_status); + rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + &err_status); pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { @@ -1246,8 +1286,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) * Depending on the link partner state, two attempts * may be needed for successful recovery. */ - if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | - RIO_PORT_N_ERR_STS_PW_INP_ES)) { + if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) { if (rio_clr_err_stopped(rdev, portnum, err_status)) rio_clr_err_stopped(rdev, portnum, 0); } @@ -1257,10 +1297,18 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) rdev->rswitch->port_ok &= ~(1 << portnum); rio_set_port_lockout(rdev, portnum, 1); + if (rdev->phys_rmap == 1) { rio_write_config_32(rdev, - rdev->phys_efptr + - RIO_PORT_N_ACK_STS_CSR(portnum), + RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum), RIO_PORT_N_ACK_CLEAR); + } else { + rio_write_config_32(rdev, + RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum), + RIO_PORT_N_OB_ACK_CLEAR); + rio_write_config_32(rdev, + RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum), + 0); + } /* Schedule Extraction Service */ pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", @@ -1289,9 +1337,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) } /* Clear remaining error bits and Port-Write Pending bit */ - rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), - err_status); + rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), + err_status); return 0; } @@ -1342,20 +1389,7 @@ EXPORT_SYMBOL_GPL(rio_mport_get_efb); * Tell if a device supports a given RapidIO capability. * Returns the offset of the requested extended feature * block within the device's RIO configuration space or - * 0 in case the device does not support it. Possible - * values for @ftr: - * - * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices - * - * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices - * - * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices - * - * %RIO_EFB_SER_EP_ID LP/Serial EP Devices - * - * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices - * - * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices + * 0 in case the device does not support it. */ u32 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, @@ -1848,7 +1882,9 @@ EXPORT_SYMBOL_GPL(rio_release_dma); * Initializes RapidIO capable DMA channel for the specified data transfer. * Uses DMA channel private extension to pass information related to remote * target RIO device. - * Returns pointer to DMA transaction descriptor or NULL if failed. + * + * Returns: pointer to DMA transaction descriptor if successful, + * error-valued pointer or NULL if failed. */ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, u16 destid, struct rio_dma_data *data, @@ -1883,7 +1919,9 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); * Initializes RapidIO capable DMA channel for the specified data transfer. * Uses DMA channel private extension to pass information related to remote * target RIO device. - * Returns pointer to DMA transaction descriptor or NULL if failed. + * + * Returns: pointer to DMA transaction descriptor if successful, + * error-valued pointer or NULL if failed. */ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, struct dma_chan *dchan, struct rio_dma_data *data, diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index 625d09add001..9796b3fee70d 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -22,7 +22,7 @@ extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, u8 hopcount, int ftr); extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, - u16 destid, u8 hopcount); + u16 destid, u8 hopcount, u32 *rmap); extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, u8 hopcount, u32 from); extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c new file mode 100644 index 000000000000..cecc15a880de --- /dev/null +++ b/drivers/rapidio/rio_cm.c @@ -0,0 +1,2366 @@ +/* + * rio_cm - RapidIO Channelized Messaging Driver + * + * Copyright 2013-2016 Integrated Device Technology, Inc. + * Copyright (c) 2015, Prodrive Technologies + * Copyright (c) 2015, RapidIO Trade Association + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, + * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE + * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/reboot.h> +#include <linux/bitops.h> +#include <linux/printk.h> +#include <linux/rio_cm_cdev.h> + +#define DRV_NAME "rio_cm" +#define DRV_VERSION "1.0.0" +#define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" +#define DRV_DESC "RapidIO Channelized Messaging Driver" +#define DEV_NAME "rio_cm" + +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_RDEV = BIT(3), /* RapidIO device add/remove */ + DBG_CHOP = BIT(4), /* channel operations */ + DBG_WAIT = BIT(5), /* waiting for events */ + DBG_TX = BIT(6), /* message TX */ + DBG_TX_EVENT = BIT(7), /* message TX event */ + DBG_RX_DATA = BIT(8), /* inbound data messages */ + DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +#define riocm_debug(level, fmt, arg...) \ + do { \ + if (DBG_##level & dbg_level) \ + pr_debug(DRV_NAME ": %s " fmt "\n", \ + __func__, ##arg); \ + } while (0) +#else +#define riocm_debug(level, fmt, arg...) \ + no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) +#endif + +#define riocm_warn(fmt, arg...) \ + pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) + +#define riocm_error(fmt, arg...) \ + pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) + + +static int cmbox = 1; +module_param(cmbox, int, S_IRUGO); +MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); + +static int chstart = 256; +module_param(chstart, int, S_IRUGO); +MODULE_PARM_DESC(chstart, + "Start channel number for dynamic allocation (default 256)"); + +#ifdef DEBUG +static u32 dbg_level = DBG_NONE; +module_param(dbg_level, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define RIOCM_TX_RING_SIZE 128 +#define RIOCM_RX_RING_SIZE 128 +#define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ + +#define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ +#define RIOCM_CHNUM_AUTO 0 +#define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ + +enum rio_cm_state { + RIO_CM_IDLE, + RIO_CM_CONNECT, + RIO_CM_CONNECTED, + RIO_CM_DISCONNECT, + RIO_CM_CHAN_BOUND, + RIO_CM_LISTEN, + RIO_CM_DESTROYING, +}; + +enum rio_cm_pkt_type { + RIO_CM_SYS = 0xaa, + RIO_CM_CHAN = 0x55, +}; + +enum rio_cm_chop { + CM_CONN_REQ, + CM_CONN_ACK, + CM_CONN_CLOSE, + CM_DATA_MSG, +}; + +struct rio_ch_base_bhdr { + u32 src_id; + u32 dst_id; +#define RIO_HDR_LETTER_MASK 0xffff0000 +#define RIO_HDR_MBOX_MASK 0x0000ffff + u8 src_mbox; + u8 dst_mbox; + u8 type; +} __attribute__((__packed__)); + +struct rio_ch_chan_hdr { + struct rio_ch_base_bhdr bhdr; + u8 ch_op; + u16 dst_ch; + u16 src_ch; + u16 msg_len; + u16 rsrvd; +} __attribute__((__packed__)); + +struct tx_req { + struct list_head node; + struct rio_dev *rdev; + void *buffer; + size_t len; +}; + +struct cm_dev { + struct list_head list; + struct rio_mport *mport; + void *rx_buf[RIOCM_RX_RING_SIZE]; + int rx_slots; + struct mutex rx_lock; + + void *tx_buf[RIOCM_TX_RING_SIZE]; + int tx_slot; + int tx_cnt; + int tx_ack_slot; + struct list_head tx_reqs; + spinlock_t tx_lock; + + struct list_head peers; + u32 npeers; + struct workqueue_struct *rx_wq; + struct work_struct rx_work; +}; + +struct chan_rx_ring { + void *buf[RIOCM_RX_RING_SIZE]; + int head; + int tail; + int count; + + /* Tracking RX buffers reported to upper level */ + void *inuse[RIOCM_RX_RING_SIZE]; + int inuse_cnt; +}; + +struct rio_channel { + u16 id; /* local channel ID */ + struct kref ref; /* channel refcount */ + struct file *filp; + struct cm_dev *cmdev; /* associated CM device object */ + struct rio_dev *rdev; /* remote RapidIO device */ + enum rio_cm_state state; + int error; + spinlock_t lock; + void *context; + u32 loc_destid; /* local destID */ + u32 rem_destid; /* remote destID */ + u16 rem_channel; /* remote channel ID */ + struct list_head accept_queue; + struct list_head ch_node; + struct completion comp; + struct completion comp_close; + struct chan_rx_ring rx_ring; +}; + +struct cm_peer { + struct list_head node; + struct rio_dev *rdev; +}; + +struct rio_cm_work { + struct work_struct work; + struct cm_dev *cm; + void *data; +}; + +struct conn_req { + struct list_head node; + u32 destid; /* requester destID */ + u16 chan; /* requester channel ID */ + struct cm_dev *cmdev; +}; + +/* + * A channel_dev structure represents a CM_CDEV + * @cdev Character device + * @dev Associated device object + */ +struct channel_dev { + struct cdev cdev; + struct device *dev; +}; + +static struct rio_channel *riocm_ch_alloc(u16 ch_num); +static void riocm_ch_free(struct kref *ref); +static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len); +static int riocm_ch_close(struct rio_channel *ch); + +static DEFINE_SPINLOCK(idr_lock); +static DEFINE_IDR(ch_idr); + +static LIST_HEAD(cm_dev_list); +static DECLARE_RWSEM(rdev_sem); + +static struct class *dev_class; +static unsigned int dev_major; +static unsigned int dev_minor_base; +static dev_t dev_number; +static struct channel_dev riocm_cdev; + +#define is_msg_capable(src_ops, dst_ops) \ + ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ + (dst_ops & RIO_DST_OPS_DATA_MSG)) +#define dev_cm_capable(dev) \ + is_msg_capable(dev->src_ops, dev->dst_ops) + +static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) +{ + int ret; + + spin_lock_bh(&ch->lock); + ret = (ch->state == cmp); + spin_unlock_bh(&ch->lock); + return ret; +} + +static int riocm_cmp_exch(struct rio_channel *ch, + enum rio_cm_state cmp, enum rio_cm_state exch) +{ + int ret; + + spin_lock_bh(&ch->lock); + ret = (ch->state == cmp); + if (ret) + ch->state = exch; + spin_unlock_bh(&ch->lock); + return ret; +} + +static enum rio_cm_state riocm_exch(struct rio_channel *ch, + enum rio_cm_state exch) +{ + enum rio_cm_state old; + + spin_lock_bh(&ch->lock); + old = ch->state; + ch->state = exch; + spin_unlock_bh(&ch->lock); + return old; +} + +static struct rio_channel *riocm_get_channel(u16 nr) +{ + struct rio_channel *ch; + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, nr); + if (ch) + kref_get(&ch->ref); + spin_unlock_bh(&idr_lock); + return ch; +} + +static void riocm_put_channel(struct rio_channel *ch) +{ + kref_put(&ch->ref, riocm_ch_free); +} + +static void *riocm_rx_get_msg(struct cm_dev *cm) +{ + void *msg; + int i; + + msg = rio_get_inb_message(cm->mport, cmbox); + if (msg) { + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (cm->rx_buf[i] == msg) { + cm->rx_buf[i] = NULL; + cm->rx_slots++; + break; + } + } + + if (i == RIOCM_RX_RING_SIZE) + riocm_warn("no record for buffer 0x%p", msg); + } + + return msg; +} + +/* + * riocm_rx_fill - fills a ring of receive buffers for given cm device + * @cm: cm_dev object + * @nent: max number of entries to fill + * + * Returns: none + */ +static void riocm_rx_fill(struct cm_dev *cm, int nent) +{ + int i; + + if (cm->rx_slots == 0) + return; + + for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { + if (cm->rx_buf[i] == NULL) { + cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); + if (cm->rx_buf[i] == NULL) + break; + rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); + cm->rx_slots--; + nent--; + } + } +} + +/* + * riocm_rx_free - frees all receive buffers associated with given cm device + * @cm: cm_dev object + * + * Returns: none + */ +static void riocm_rx_free(struct cm_dev *cm) +{ + int i; + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (cm->rx_buf[i] != NULL) { + kfree(cm->rx_buf[i]); + cm->rx_buf[i] = NULL; + } + } +} + +/* + * riocm_req_handler - connection request handler + * @cm: cm_dev object + * @req_data: pointer to the request packet + * + * Returns: 0 if success, or + * -EINVAL if channel is not in correct state, + * -ENODEV if cannot find a channel with specified ID, + * -ENOMEM if unable to allocate memory to store the request + */ +static int riocm_req_handler(struct cm_dev *cm, void *req_data) +{ + struct rio_channel *ch; + struct conn_req *req; + struct rio_ch_chan_hdr *hh = req_data; + u16 chnum; + + chnum = ntohs(hh->dst_ch); + + ch = riocm_get_channel(chnum); + + if (!ch) + return -ENODEV; + + if (ch->state != RIO_CM_LISTEN) { + riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); + riocm_put_channel(ch); + return -EINVAL; + } + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + riocm_put_channel(ch); + return -ENOMEM; + } + + req->destid = ntohl(hh->bhdr.src_id); + req->chan = ntohs(hh->src_ch); + req->cmdev = cm; + + spin_lock_bh(&ch->lock); + list_add_tail(&req->node, &ch->accept_queue); + spin_unlock_bh(&ch->lock); + complete(&ch->comp); + riocm_put_channel(ch); + + return 0; +} + +/* + * riocm_resp_handler - response to connection request handler + * @resp_data: pointer to the response packet + * + * Returns: 0 if success, or + * -EINVAL if channel is not in correct state, + * -ENODEV if cannot find a channel with specified ID, + */ +static int riocm_resp_handler(void *resp_data) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hh = resp_data; + u16 chnum; + + chnum = ntohs(hh->dst_ch); + ch = riocm_get_channel(chnum); + if (!ch) + return -ENODEV; + + if (ch->state != RIO_CM_CONNECT) { + riocm_put_channel(ch); + return -EINVAL; + } + + riocm_exch(ch, RIO_CM_CONNECTED); + ch->rem_channel = ntohs(hh->src_ch); + complete(&ch->comp); + riocm_put_channel(ch); + + return 0; +} + +/* + * riocm_close_handler - channel close request handler + * @req_data: pointer to the request packet + * + * Returns: 0 if success, or + * -ENODEV if cannot find a channel with specified ID, + * + error codes returned by riocm_ch_close. + */ +static int riocm_close_handler(void *data) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hh = data; + int ret; + + riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); + if (!ch) { + spin_unlock_bh(&idr_lock); + return -ENODEV; + } + idr_remove(&ch_idr, ch->id); + spin_unlock_bh(&idr_lock); + + riocm_exch(ch, RIO_CM_DISCONNECT); + + ret = riocm_ch_close(ch); + if (ret) + riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); + + return 0; +} + +/* + * rio_cm_handler - function that services request (non-data) packets + * @cm: cm_dev object + * @data: pointer to the packet + */ +static void rio_cm_handler(struct cm_dev *cm, void *data) +{ + struct rio_ch_chan_hdr *hdr; + + if (!rio_mport_is_running(cm->mport)) + goto out; + + hdr = data; + + riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", + hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); + + switch (hdr->ch_op) { + case CM_CONN_REQ: + riocm_req_handler(cm, data); + break; + case CM_CONN_ACK: + riocm_resp_handler(data); + break; + case CM_CONN_CLOSE: + riocm_close_handler(data); + break; + default: + riocm_error("Invalid packet header"); + break; + } +out: + kfree(data); +} + +/* + * rio_rx_data_handler - received data packet handler + * @cm: cm_dev object + * @buf: data packet + * + * Returns: 0 if success, or + * -ENODEV if cannot find a channel with specified ID, + * -EIO if channel is not in CONNECTED state, + * -ENOMEM if channel RX queue is full (packet discarded) + */ +static int rio_rx_data_handler(struct cm_dev *cm, void *buf) +{ + struct rio_ch_chan_hdr *hdr; + struct rio_channel *ch; + + hdr = buf; + + riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); + + ch = riocm_get_channel(ntohs(hdr->dst_ch)); + if (!ch) { + /* Discard data message for non-existing channel */ + kfree(buf); + return -ENODEV; + } + + /* Place pointer to the buffer into channel's RX queue */ + spin_lock(&ch->lock); + + if (ch->state != RIO_CM_CONNECTED) { + /* Channel is not ready to receive data, discard a packet */ + riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", + ch->id, ch->state); + spin_unlock(&ch->lock); + kfree(buf); + riocm_put_channel(ch); + return -EIO; + } + + if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { + /* If RX ring is full, discard a packet */ + riocm_debug(RX_DATA, "ch=%d is full", ch->id); + spin_unlock(&ch->lock); + kfree(buf); + riocm_put_channel(ch); + return -ENOMEM; + } + + ch->rx_ring.buf[ch->rx_ring.head] = buf; + ch->rx_ring.head++; + ch->rx_ring.count++; + ch->rx_ring.head %= RIOCM_RX_RING_SIZE; + + complete(&ch->comp); + + spin_unlock(&ch->lock); + riocm_put_channel(ch); + + return 0; +} + +/* + * rio_ibmsg_handler - inbound message packet handler + */ +static void rio_ibmsg_handler(struct work_struct *work) +{ + struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); + void *data; + struct rio_ch_chan_hdr *hdr; + + if (!rio_mport_is_running(cm->mport)) + return; + + while (1) { + mutex_lock(&cm->rx_lock); + data = riocm_rx_get_msg(cm); + if (data) + riocm_rx_fill(cm, 1); + mutex_unlock(&cm->rx_lock); + + if (data == NULL) + break; + + hdr = data; + + if (hdr->bhdr.type != RIO_CM_CHAN) { + /* For now simply discard packets other than channel */ + riocm_error("Unsupported TYPE code (0x%x). Msg dropped", + hdr->bhdr.type); + kfree(data); + continue; + } + + /* Process a channel message */ + if (hdr->ch_op == CM_DATA_MSG) + rio_rx_data_handler(cm, data); + else + rio_cm_handler(cm, data); + } +} + +static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, + int mbox, int slot) +{ + struct cm_dev *cm = dev_id; + + if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) + queue_work(cm->rx_wq, &cm->rx_work); +} + +/* + * rio_txcq_handler - TX completion handler + * @cm: cm_dev object + * @slot: TX queue slot + * + * TX completion handler also ensures that pending request packets are placed + * into transmit queue as soon as a free slot becomes available. This is done + * to give higher priority to request packets during high intensity data flow. + */ +static void rio_txcq_handler(struct cm_dev *cm, int slot) +{ + int ack_slot; + + /* ATTN: Add TX completion notification if/when direct buffer + * transfer is implemented. At this moment only correct tracking + * of tx_count is important. + */ + riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", + cm->mport->id, slot, cm->tx_cnt); + + spin_lock(&cm->tx_lock); + ack_slot = cm->tx_ack_slot; + + if (ack_slot == slot) + riocm_debug(TX_EVENT, "slot == ack_slot"); + + while (cm->tx_cnt && ((ack_slot != slot) || + (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { + + cm->tx_buf[ack_slot] = NULL; + ++ack_slot; + ack_slot &= (RIOCM_TX_RING_SIZE - 1); + cm->tx_cnt--; + } + + if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) + riocm_error("tx_cnt %d out of sync", cm->tx_cnt); + + WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); + + cm->tx_ack_slot = ack_slot; + + /* + * If there are pending requests, insert them into transmit queue + */ + if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { + struct tx_req *req, *_req; + int rc; + + list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { + list_del(&req->node); + cm->tx_buf[cm->tx_slot] = req->buffer; + rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, + req->buffer, req->len); + kfree(req->buffer); + kfree(req); + + ++cm->tx_cnt; + ++cm->tx_slot; + cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); + if (cm->tx_cnt == RIOCM_TX_RING_SIZE) + break; + } + } + + spin_unlock(&cm->tx_lock); +} + +static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, + int mbox, int slot) +{ + struct cm_dev *cm = dev_id; + + if (cm && rio_mport_is_running(cm->mport)) + rio_txcq_handler(cm, slot); +} + +static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len) +{ + unsigned long flags; + struct tx_req *treq; + + treq = kzalloc(sizeof(*treq), GFP_KERNEL); + if (treq == NULL) + return -ENOMEM; + + treq->rdev = rdev; + treq->buffer = buffer; + treq->len = len; + + spin_lock_irqsave(&cm->tx_lock, flags); + list_add_tail(&treq->node, &cm->tx_reqs); + spin_unlock_irqrestore(&cm->tx_lock, flags); + return 0; +} + +/* + * riocm_post_send - helper function that places packet into msg TX queue + * @cm: cm_dev object + * @rdev: target RapidIO device object (required by outbound msg interface) + * @buffer: pointer to a packet buffer to send + * @len: length of data to transfer + * @req: request priority flag + * + * Returns: 0 if success, or error code otherwise. + */ +static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, + void *buffer, size_t len) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&cm->tx_lock, flags); + + if (cm->mport == NULL) { + rc = -ENODEV; + goto err_out; + } + + if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { + riocm_debug(TX, "Tx Queue is full"); + rc = -EBUSY; + goto err_out; + } + + cm->tx_buf[cm->tx_slot] = buffer; + rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); + + riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", + buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); + + ++cm->tx_cnt; + ++cm->tx_slot; + cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); + +err_out: + spin_unlock_irqrestore(&cm->tx_lock, flags); + return rc; +} + +/* + * riocm_ch_send - sends a data packet to a remote device + * @ch_id: local channel ID + * @buf: pointer to a data buffer to send (including CM header) + * @len: length of data to transfer (including CM header) + * + * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET + * + * Returns: 0 if success, or + * -EINVAL if one or more input parameters is/are not valid, + * -ENODEV if cannot find a channel with specified ID, + * -EAGAIN if a channel is not in CONNECTED state, + * + error codes returned by HW send routine. + */ +static int riocm_ch_send(u16 ch_id, void *buf, int len) +{ + struct rio_channel *ch; + struct rio_ch_chan_hdr *hdr; + int ret; + + if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) + return -EINVAL; + + ch = riocm_get_channel(ch_id); + if (!ch) { + riocm_error("%s(%d) ch_%d not found", current->comm, + task_pid_nr(current), ch_id); + return -ENODEV; + } + + if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { + ret = -EAGAIN; + goto err_out; + } + + /* + * Fill buffer header section with corresponding channel data + */ + hdr = buf; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_DATA_MSG; + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + hdr->msg_len = htons((u16)len); + + /* ATTN: the function call below relies on the fact that underlying + * HW-specific add_outb_message() routine copies TX data into its own + * internal transfer buffer (true for all RIONET compatible mport + * drivers). Must be reviewed if mport driver uses the buffer directly. + */ + + ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); + if (ret) + riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); +err_out: + riocm_put_channel(ch); + return ret; +} + +static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) +{ + int i, ret = -EINVAL; + + spin_lock_bh(&ch->lock); + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (ch->rx_ring.inuse[i] == buf) { + ch->rx_ring.inuse[i] = NULL; + ch->rx_ring.inuse_cnt--; + ret = 0; + break; + } + } + + spin_unlock_bh(&ch->lock); + + if (!ret) + kfree(buf); + + return ret; +} + +/* + * riocm_ch_receive - fetch a data packet received for the specified channel + * @ch: local channel ID + * @buf: pointer to a packet buffer + * @timeout: timeout to wait for incoming packet (in jiffies) + * + * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: + * -EAGAIN if a channel is not in CONNECTED state, + * -ENOMEM if in-use tracking queue is full, + * -ETIME if wait timeout expired, + * -EINTR if wait was interrupted. + */ +static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) +{ + void *rxmsg = NULL; + int i, ret = 0; + long wret; + + if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { + ret = -EAGAIN; + goto out; + } + + if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { + /* If we do not have entries to track buffers given to upper + * layer, reject request. + */ + ret = -ENOMEM; + goto out; + } + + wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); + + riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); + + if (!wret) + ret = -ETIME; + else if (wret == -ERESTARTSYS) + ret = -EINTR; + else + ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; + + if (ret) + goto out; + + spin_lock_bh(&ch->lock); + + rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; + ch->rx_ring.buf[ch->rx_ring.tail] = NULL; + ch->rx_ring.count--; + ch->rx_ring.tail++; + ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; + ret = -ENOMEM; + + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { + if (ch->rx_ring.inuse[i] == NULL) { + ch->rx_ring.inuse[i] = rxmsg; + ch->rx_ring.inuse_cnt++; + ret = 0; + break; + } + } + + if (ret) { + /* We have no entry to store pending message: drop it */ + kfree(rxmsg); + rxmsg = NULL; + } + + spin_unlock_bh(&ch->lock); +out: + *buf = rxmsg; + return ret; +} + +/* + * riocm_ch_connect - sends a connect request to a remote device + * @loc_ch: local channel ID + * @cm: CM device to send connect request + * @peer: target RapidIO device + * @rem_ch: remote channel ID + * + * Returns: 0 if success, or + * -EINVAL if the channel is not in IDLE state, + * -EAGAIN if no connection request available immediately, + * -ETIME if ACK response timeout expired, + * -EINTR if wait for response was interrupted. + */ +static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, + struct cm_peer *peer, u16 rem_ch) +{ + struct rio_channel *ch = NULL; + struct rio_ch_chan_hdr *hdr; + int ret; + long wret; + + ch = riocm_get_channel(loc_ch); + if (!ch) + return -ENODEV; + + if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { + ret = -EINVAL; + goto conn_done; + } + + ch->cmdev = cm; + ch->rdev = peer->rdev; + ch->context = NULL; + ch->loc_destid = cm->mport->host_deviceid; + ch->rem_channel = rem_ch; + + /* + * Send connect request to the remote RapidIO device + */ + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) { + ret = -ENOMEM; + goto conn_done; + } + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(peer->rdev->destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_REQ; + hdr->dst_ch = htons(rem_ch); + hdr->src_ch = htons(loc_ch); + + /* ATTN: the function call below relies on the fact that underlying + * HW-specific add_outb_message() routine copies TX data into its + * internal transfer buffer. Must be reviewed if mport driver uses + * this buffer directly. + */ + ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); + + if (ret != -EBUSY) { + kfree(hdr); + } else { + ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); + if (ret) + kfree(hdr); + } + + if (ret) { + riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); + goto conn_done; + } + + /* Wait for connect response from the remote device */ + wret = wait_for_completion_interruptible_timeout(&ch->comp, + RIOCM_CONNECT_TO * HZ); + riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); + + if (!wret) + ret = -ETIME; + else if (wret == -ERESTARTSYS) + ret = -EINTR; + else + ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; + +conn_done: + riocm_put_channel(ch); + return ret; +} + +static int riocm_send_ack(struct rio_channel *ch) +{ + struct rio_ch_chan_hdr *hdr; + int ret; + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_ACK; + + /* ATTN: the function call below relies on the fact that underlying + * add_outb_message() routine copies TX data into its internal transfer + * buffer. Review if switching to direct buffer version. + */ + ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); + + if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, + ch->rdev, hdr, sizeof(*hdr))) + return 0; + kfree(hdr); + + if (ret) + riocm_error("send ACK to ch_%d on %s failed (ret=%d)", + ch->id, rio_name(ch->rdev), ret); + return ret; +} + +/* + * riocm_ch_accept - accept incoming connection request + * @ch_id: channel ID + * @new_ch_id: local mport device + * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection + * request is not available). + * + * Returns: pointer to new channel struct if success, or error-valued pointer: + * -ENODEV - cannot find specified channel or mport, + * -EINVAL - the channel is not in IDLE state, + * -EAGAIN - no connection request available immediately (timeout=0), + * -ENOMEM - unable to allocate new channel, + * -ETIME - wait timeout expired, + * -EINTR - wait was interrupted. + */ +static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, + long timeout) +{ + struct rio_channel *ch = NULL; + struct rio_channel *new_ch = NULL; + struct conn_req *req; + struct cm_peer *peer; + int found = 0; + int err = 0; + long wret; + + ch = riocm_get_channel(ch_id); + if (!ch) + return ERR_PTR(-EINVAL); + + if (!riocm_cmp(ch, RIO_CM_LISTEN)) { + err = -EINVAL; + goto err_put; + } + + /* Don't sleep if this is a non blocking call */ + if (!timeout) { + if (!try_wait_for_completion(&ch->comp)) { + err = -EAGAIN; + goto err_put; + } + } else { + riocm_debug(WAIT, "on %d", ch->id); + + wret = wait_for_completion_interruptible_timeout(&ch->comp, + timeout); + if (!wret) { + err = -ETIME; + goto err_put; + } else if (wret == -ERESTARTSYS) { + err = -EINTR; + goto err_put; + } + } + + spin_lock_bh(&ch->lock); + + if (ch->state != RIO_CM_LISTEN) { + err = -ECANCELED; + } else if (list_empty(&ch->accept_queue)) { + riocm_debug(WAIT, "on %d accept_queue is empty on completion", + ch->id); + err = -EIO; + } + + spin_unlock_bh(&ch->lock); + + if (err) { + riocm_debug(WAIT, "on %d returns %d", ch->id, err); + goto err_put; + } + + /* Create new channel for this connection */ + new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); + + if (IS_ERR(new_ch)) { + riocm_error("failed to get channel for new req (%ld)", + PTR_ERR(new_ch)); + err = -ENOMEM; + goto err_put; + } + + spin_lock_bh(&ch->lock); + + req = list_first_entry(&ch->accept_queue, struct conn_req, node); + list_del(&req->node); + new_ch->cmdev = ch->cmdev; + new_ch->loc_destid = ch->loc_destid; + new_ch->rem_destid = req->destid; + new_ch->rem_channel = req->chan; + + spin_unlock_bh(&ch->lock); + riocm_put_channel(ch); + kfree(req); + + down_read(&rdev_sem); + /* Find requester's device object */ + list_for_each_entry(peer, &new_ch->cmdev->peers, node) { + if (peer->rdev->destid == new_ch->rem_destid) { + riocm_debug(RX_CMD, "found matching device(%s)", + rio_name(peer->rdev)); + found = 1; + break; + } + } + up_read(&rdev_sem); + + if (!found) { + /* If peer device object not found, simply ignore the request */ + err = -ENODEV; + goto err_nodev; + } + + new_ch->rdev = peer->rdev; + new_ch->state = RIO_CM_CONNECTED; + spin_lock_init(&new_ch->lock); + + /* Acknowledge the connection request. */ + riocm_send_ack(new_ch); + + *new_ch_id = new_ch->id; + return new_ch; +err_put: + riocm_put_channel(ch); +err_nodev: + if (new_ch) { + spin_lock_bh(&idr_lock); + idr_remove(&ch_idr, new_ch->id); + spin_unlock_bh(&idr_lock); + riocm_put_channel(new_ch); + } + *new_ch_id = 0; + return ERR_PTR(err); +} + +/* + * riocm_ch_listen - puts a channel into LISTEN state + * @ch_id: channel ID + * + * Returns: 0 if success, or + * -EINVAL if the specified channel does not exists or + * is not in CHAN_BOUND state. + */ +static int riocm_ch_listen(u16 ch_id) +{ + struct rio_channel *ch = NULL; + int ret = 0; + + riocm_debug(CHOP, "(ch_%d)", ch_id); + + ch = riocm_get_channel(ch_id); + if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) + ret = -EINVAL; + riocm_put_channel(ch); + return ret; +} + +/* + * riocm_ch_bind - associate a channel object and an mport device + * @ch_id: channel ID + * @mport_id: local mport device ID + * @context: pointer to the additional caller's context + * + * Returns: 0 if success, or + * -ENODEV if cannot find specified mport, + * -EINVAL if the specified channel does not exist or + * is not in IDLE state. + */ +static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) +{ + struct rio_channel *ch = NULL; + struct cm_dev *cm; + int rc = -ENODEV; + + riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); + + /* Find matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if ((cm->mport->id == mport_id) && + rio_mport_is_running(cm->mport)) { + rc = 0; + break; + } + } + + if (rc) + goto exit; + + ch = riocm_get_channel(ch_id); + if (!ch) { + rc = -EINVAL; + goto exit; + } + + spin_lock_bh(&ch->lock); + if (ch->state != RIO_CM_IDLE) { + spin_unlock_bh(&ch->lock); + rc = -EINVAL; + goto err_put; + } + + ch->cmdev = cm; + ch->loc_destid = cm->mport->host_deviceid; + ch->context = context; + ch->state = RIO_CM_CHAN_BOUND; + spin_unlock_bh(&ch->lock); +err_put: + riocm_put_channel(ch); +exit: + up_read(&rdev_sem); + return rc; +} + +/* + * riocm_ch_alloc - channel object allocation helper routine + * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) + * + * Return value: pointer to newly created channel object, + * or error-valued pointer + */ +static struct rio_channel *riocm_ch_alloc(u16 ch_num) +{ + int id; + int start, end; + struct rio_channel *ch; + + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) + return ERR_PTR(-ENOMEM); + + if (ch_num) { + /* If requested, try to obtain the specified channel ID */ + start = ch_num; + end = ch_num + 1; + } else { + /* Obtain channel ID from the dynamic allocation range */ + start = chstart; + end = RIOCM_MAX_CHNUM + 1; + } + + idr_preload(GFP_KERNEL); + spin_lock_bh(&idr_lock); + id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); + spin_unlock_bh(&idr_lock); + idr_preload_end(); + + if (id < 0) { + kfree(ch); + return ERR_PTR(id == -ENOSPC ? -EBUSY : id); + } + + ch->id = (u16)id; + ch->state = RIO_CM_IDLE; + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->accept_queue); + INIT_LIST_HEAD(&ch->ch_node); + init_completion(&ch->comp); + init_completion(&ch->comp_close); + kref_init(&ch->ref); + ch->rx_ring.head = 0; + ch->rx_ring.tail = 0; + ch->rx_ring.count = 0; + ch->rx_ring.inuse_cnt = 0; + + return ch; +} + +/* + * riocm_ch_create - creates a new channel object and allocates ID for it + * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) + * + * Allocates and initializes a new channel object. If the parameter ch_num > 0 + * and is within the valid range, riocm_ch_create tries to allocate the + * specified ID for the new channel. If ch_num = 0, channel ID will be assigned + * automatically from the range (chstart ... RIOCM_MAX_CHNUM). + * Module parameter 'chstart' defines start of an ID range available for dynamic + * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. + * Available channel numbers are limited by 16-bit size of channel numbers used + * in the packet header. + * + * Return value: PTR to rio_channel structure if successful (with channel number + * updated via pointer) or error-valued pointer if error. + */ +static struct rio_channel *riocm_ch_create(u16 *ch_num) +{ + struct rio_channel *ch = NULL; + + ch = riocm_ch_alloc(*ch_num); + + if (IS_ERR(ch)) + riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", + *ch_num, PTR_ERR(ch)); + else + *ch_num = ch->id; + + return ch; +} + +/* + * riocm_ch_free - channel object release routine + * @ref: pointer to a channel's kref structure + */ +static void riocm_ch_free(struct kref *ref) +{ + struct rio_channel *ch = container_of(ref, struct rio_channel, ref); + int i; + + riocm_debug(CHOP, "(ch_%d)", ch->id); + + if (ch->rx_ring.inuse_cnt) { + for (i = 0; + i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { + if (ch->rx_ring.inuse[i] != NULL) { + kfree(ch->rx_ring.inuse[i]); + ch->rx_ring.inuse_cnt--; + } + } + } + + if (ch->rx_ring.count) + for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { + if (ch->rx_ring.buf[i] != NULL) { + kfree(ch->rx_ring.buf[i]); + ch->rx_ring.count--; + } + } + + complete(&ch->comp_close); +} + +static int riocm_send_close(struct rio_channel *ch) +{ + struct rio_ch_chan_hdr *hdr; + int ret; + + /* + * Send CH_CLOSE notification to the remote RapidIO device + */ + + hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->bhdr.src_id = htonl(ch->loc_destid); + hdr->bhdr.dst_id = htonl(ch->rem_destid); + hdr->bhdr.src_mbox = cmbox; + hdr->bhdr.dst_mbox = cmbox; + hdr->bhdr.type = RIO_CM_CHAN; + hdr->ch_op = CM_CONN_CLOSE; + hdr->dst_ch = htons(ch->rem_channel); + hdr->src_ch = htons(ch->id); + + /* ATTN: the function call below relies on the fact that underlying + * add_outb_message() routine copies TX data into its internal transfer + * buffer. Needs to be reviewed if switched to direct buffer mode. + */ + ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); + + if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, + hdr, sizeof(*hdr))) + return 0; + kfree(hdr); + + if (ret) + riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); + + return ret; +} + +/* + * riocm_ch_close - closes a channel object with specified ID (by local request) + * @ch: channel to be closed + */ +static int riocm_ch_close(struct rio_channel *ch) +{ + unsigned long tmo = msecs_to_jiffies(3000); + enum rio_cm_state state; + long wret; + int ret = 0; + + riocm_debug(CHOP, "ch_%d by %s(%d)", + ch->id, current->comm, task_pid_nr(current)); + + state = riocm_exch(ch, RIO_CM_DESTROYING); + if (state == RIO_CM_CONNECTED) + riocm_send_close(ch); + + complete_all(&ch->comp); + + riocm_put_channel(ch); + wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); + + riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); + + if (wret == 0) { + /* Timeout on wait occurred */ + riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", + current->comm, task_pid_nr(current), ch->id); + ret = -ETIMEDOUT; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal */ + riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", + current->comm, task_pid_nr(current), ch->id); + ret = -EINTR; + } + + if (!ret) { + riocm_debug(CHOP, "ch_%d resources released", ch->id); + kfree(ch); + } else { + riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); + } + + return ret; +} + +/* + * riocm_cdev_open() - Open character device + */ +static int riocm_cdev_open(struct inode *inode, struct file *filp) +{ + riocm_debug(INIT, "by %s(%d) filp=%p ", + current->comm, task_pid_nr(current), filp); + + if (list_empty(&cm_dev_list)) + return -ENODEV; + + return 0; +} + +/* + * riocm_cdev_release() - Release character device + */ +static int riocm_cdev_release(struct inode *inode, struct file *filp) +{ + struct rio_channel *ch, *_c; + unsigned int i; + LIST_HEAD(list); + + riocm_debug(EXIT, "by %s(%d) filp=%p", + current->comm, task_pid_nr(current), filp); + + /* Check if there are channels associated with this file descriptor */ + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch && ch->filp == filp) { + riocm_debug(EXIT, "ch_%d not released by %s(%d)", + ch->id, current->comm, + task_pid_nr(current)); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } + + return 0; +} + +/* + * cm_ep_get_list_size() - Reports number of endpoints in the network + */ +static int cm_ep_get_list_size(void __user *arg) +{ + u32 __user *p = arg; + u32 mport_id; + u32 count = 0; + struct cm_dev *cm; + + if (get_user(mport_id, p)) + return -EFAULT; + if (mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + /* Find a matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport->id == mport_id) { + count = cm->npeers; + up_read(&rdev_sem); + if (copy_to_user(arg, &count, sizeof(u32))) + return -EFAULT; + return 0; + } + } + up_read(&rdev_sem); + + return -ENODEV; +} + +/* + * cm_ep_get_list() - Returns list of attached endpoints + */ +static int cm_ep_get_list(void __user *arg) +{ + struct cm_dev *cm; + struct cm_peer *peer; + u32 info[2]; + void *buf; + u32 nent; + u32 *entry_ptr; + u32 i = 0; + int ret = 0; + + if (copy_from_user(&info, arg, sizeof(info))) + return -EFAULT; + + if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) + return -EINVAL; + + /* Find a matching cm_dev object */ + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) + if (cm->mport->id == (u8)info[1]) + goto found; + + up_read(&rdev_sem); + return -ENODEV; + +found: + nent = min(info[0], cm->npeers); + buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); + if (!buf) { + up_read(&rdev_sem); + return -ENOMEM; + } + + entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); + + list_for_each_entry(peer, &cm->peers, node) { + *entry_ptr = (u32)peer->rdev->destid; + entry_ptr++; + if (++i == nent) + break; + } + up_read(&rdev_sem); + + ((u32 *)buf)[0] = i; /* report an updated number of entries */ + ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ + if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) + ret = -EFAULT; + + kfree(buf); + return ret; +} + +/* + * cm_mport_get_list() - Returns list of available local mport devices + */ +static int cm_mport_get_list(void __user *arg) +{ + int ret = 0; + u32 entries; + void *buf; + struct cm_dev *cm; + u32 *entry_ptr; + int count = 0; + + if (copy_from_user(&entries, arg, sizeof(entries))) + return -EFAULT; + if (entries == 0 || entries > RIO_MAX_MPORTS) + return -EINVAL; + buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Scan all registered cm_dev objects */ + entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); + down_read(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (count++ < entries) { + *entry_ptr = (cm->mport->id << 16) | + cm->mport->host_deviceid; + entry_ptr++; + } + } + up_read(&rdev_sem); + + *((u32 *)buf) = count; /* report a real number of entries */ + if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) + ret = -EFAULT; + + kfree(buf); + return ret; +} + +/* + * cm_chan_create() - Create a message exchange channel + */ +static int cm_chan_create(struct file *filp, void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + struct rio_channel *ch; + + if (get_user(ch_num, p)) + return -EFAULT; + + riocm_debug(CHOP, "ch_%d requested by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + ch = riocm_ch_create(&ch_num); + if (IS_ERR(ch)) + return PTR_ERR(ch); + + ch->filp = filp; + riocm_debug(CHOP, "ch_%d created by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + return put_user(ch_num, p); +} + +/* + * cm_chan_close() - Close channel + * @filp: Pointer to file object + * @arg: Channel to close + */ +static int cm_chan_close(struct file *filp, void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + struct rio_channel *ch; + + if (get_user(ch_num, p)) + return -EFAULT; + + riocm_debug(CHOP, "ch_%d by %s(%d)", + ch_num, current->comm, task_pid_nr(current)); + + spin_lock_bh(&idr_lock); + ch = idr_find(&ch_idr, ch_num); + if (!ch) { + spin_unlock_bh(&idr_lock); + return 0; + } + if (ch->filp != filp) { + spin_unlock_bh(&idr_lock); + return -EINVAL; + } + idr_remove(&ch_idr, ch->id); + spin_unlock_bh(&idr_lock); + + return riocm_ch_close(ch); +} + +/* + * cm_chan_bind() - Bind channel + * @arg: Channel number + */ +static int cm_chan_bind(void __user *arg) +{ + struct rio_cm_channel chan; + + if (copy_from_user(&chan, arg, sizeof(chan))) + return -EFAULT; + if (chan.mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + return riocm_ch_bind(chan.id, chan.mport_id, NULL); +} + +/* + * cm_chan_listen() - Listen on channel + * @arg: Channel number + */ +static int cm_chan_listen(void __user *arg) +{ + u16 __user *p = arg; + u16 ch_num; + + if (get_user(ch_num, p)) + return -EFAULT; + + return riocm_ch_listen(ch_num); +} + +/* + * cm_chan_accept() - Accept incoming connection + * @filp: Pointer to file object + * @arg: Channel number + */ +static int cm_chan_accept(struct file *filp, void __user *arg) +{ + struct rio_cm_accept param; + long accept_to; + struct rio_channel *ch; + + if (copy_from_user(¶m, arg, sizeof(param))) + return -EFAULT; + + riocm_debug(CHOP, "on ch_%d by %s(%d)", + param.ch_num, current->comm, task_pid_nr(current)); + + accept_to = param.wait_to ? + msecs_to_jiffies(param.wait_to) : 0; + + ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to); + if (IS_ERR(ch)) + return PTR_ERR(ch); + ch->filp = filp; + + riocm_debug(CHOP, "new ch_%d for %s(%d)", + ch->id, current->comm, task_pid_nr(current)); + + if (copy_to_user(arg, ¶m, sizeof(param))) + return -EFAULT; + return 0; +} + +/* + * cm_chan_connect() - Connect on channel + * @arg: Channel information + */ +static int cm_chan_connect(void __user *arg) +{ + struct rio_cm_channel chan; + struct cm_dev *cm; + struct cm_peer *peer; + int ret = -ENODEV; + + if (copy_from_user(&chan, arg, sizeof(chan))) + return -EFAULT; + if (chan.mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + down_read(&rdev_sem); + + /* Find matching cm_dev object */ + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport->id == chan.mport_id) { + ret = 0; + break; + } + } + + if (ret) + goto err_out; + + if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { + ret = -EINVAL; + goto err_out; + } + + /* Find corresponding RapidIO endpoint device object */ + ret = -ENODEV; + + list_for_each_entry(peer, &cm->peers, node) { + if (peer->rdev->destid == chan.remote_destid) { + ret = 0; + break; + } + } + + if (ret) + goto err_out; + + up_read(&rdev_sem); + + return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); +err_out: + up_read(&rdev_sem); + return ret; +} + +/* + * cm_chan_msg_send() - Send a message through channel + * @arg: Outbound message information + */ +static int cm_chan_msg_send(void __user *arg) +{ + struct rio_cm_msg msg; + void *buf; + int ret = 0; + + if (copy_from_user(&msg, arg, sizeof(msg))) + return -EFAULT; + if (msg.size > RIO_MAX_MSG_SIZE) + return -EINVAL; + + buf = kmalloc(msg.size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) { + ret = -EFAULT; + goto out; + } + + ret = riocm_ch_send(msg.ch_num, buf, msg.size); +out: + kfree(buf); + return ret; +} + +/* + * cm_chan_msg_rcv() - Receive a message through channel + * @arg: Inbound message information + */ +static int cm_chan_msg_rcv(void __user *arg) +{ + struct rio_cm_msg msg; + struct rio_channel *ch; + void *buf; + long rxto; + int ret = 0, msg_size; + + if (copy_from_user(&msg, arg, sizeof(msg))) + return -EFAULT; + + if (msg.ch_num == 0 || msg.size == 0) + return -EINVAL; + + ch = riocm_get_channel(msg.ch_num); + if (!ch) + return -ENODEV; + + rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; + + ret = riocm_ch_receive(ch, &buf, rxto); + if (ret) + goto out; + + msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); + + if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) + ret = -EFAULT; + + riocm_ch_free_rxbuf(ch, buf); +out: + riocm_put_channel(ch); + return ret; +} + +/* + * riocm_cdev_ioctl() - IOCTL requests handler + */ +static long +riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case RIO_CM_EP_GET_LIST_SIZE: + return cm_ep_get_list_size((void __user *)arg); + case RIO_CM_EP_GET_LIST: + return cm_ep_get_list((void __user *)arg); + case RIO_CM_CHAN_CREATE: + return cm_chan_create(filp, (void __user *)arg); + case RIO_CM_CHAN_CLOSE: + return cm_chan_close(filp, (void __user *)arg); + case RIO_CM_CHAN_BIND: + return cm_chan_bind((void __user *)arg); + case RIO_CM_CHAN_LISTEN: + return cm_chan_listen((void __user *)arg); + case RIO_CM_CHAN_ACCEPT: + return cm_chan_accept(filp, (void __user *)arg); + case RIO_CM_CHAN_CONNECT: + return cm_chan_connect((void __user *)arg); + case RIO_CM_CHAN_SEND: + return cm_chan_msg_send((void __user *)arg); + case RIO_CM_CHAN_RECEIVE: + return cm_chan_msg_rcv((void __user *)arg); + case RIO_CM_MPORT_GET_LIST: + return cm_mport_get_list((void __user *)arg); + default: + break; + } + + return -EINVAL; +} + +static const struct file_operations riocm_cdev_fops = { + .owner = THIS_MODULE, + .open = riocm_cdev_open, + .release = riocm_cdev_release, + .unlocked_ioctl = riocm_cdev_ioctl, +}; + +/* + * riocm_add_dev - add new remote RapidIO device into channel management core + * @dev: device object associated with RapidIO device + * @sif: subsystem interface + * + * Adds the specified RapidIO device (if applicable) into peers list of + * the corresponding channel management device (cm_dev). + */ +static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) +{ + struct cm_peer *peer; + struct rio_dev *rdev = to_rio_dev(dev); + struct cm_dev *cm; + + /* Check if the remote device has capabilities required to support CM */ + if (!dev_cm_capable(rdev)) + return 0; + + riocm_debug(RDEV, "(%s)", rio_name(rdev)); + + peer = kmalloc(sizeof(*peer), GFP_KERNEL); + if (!peer) + return -ENOMEM; + + /* Find a corresponding cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == rdev->net->hport) + goto found; + } + + up_write(&rdev_sem); + kfree(peer); + return -ENODEV; + +found: + peer->rdev = rdev; + list_add_tail(&peer->node, &cm->peers); + cm->npeers++; + + up_write(&rdev_sem); + return 0; +} + +/* + * riocm_remove_dev - remove remote RapidIO device from channel management core + * @dev: device object associated with RapidIO device + * @sif: subsystem interface + * + * Removes the specified RapidIO device (if applicable) from peers list of + * the corresponding channel management device (cm_dev). + */ +static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct cm_dev *cm; + struct cm_peer *peer; + struct rio_channel *ch, *_c; + unsigned int i; + bool found = false; + LIST_HEAD(list); + + /* Check if the remote device has capabilities required to support CM */ + if (!dev_cm_capable(rdev)) + return; + + riocm_debug(RDEV, "(%s)", rio_name(rdev)); + + /* Find matching cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == rdev->net->hport) { + found = true; + break; + } + } + + if (!found) { + up_write(&rdev_sem); + return; + } + + /* Remove remote device from the list of peers */ + found = false; + list_for_each_entry(peer, &cm->peers, node) { + if (peer->rdev == rdev) { + riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); + found = true; + list_del(&peer->node); + cm->npeers--; + kfree(peer); + break; + } + } + + up_write(&rdev_sem); + + if (!found) + return; + + /* + * Release channels associated with this peer + */ + + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch && ch->rdev == rdev) { + if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) + riocm_exch(ch, RIO_CM_DISCONNECT); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } +} + +/* + * riocm_cdev_add() - Create rio_cm char device + * @devno: device number assigned to device (MAJ + MIN) + */ +static int riocm_cdev_add(dev_t devno) +{ + int ret; + + cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); + riocm_cdev.cdev.owner = THIS_MODULE; + ret = cdev_add(&riocm_cdev.cdev, devno, 1); + if (ret < 0) { + riocm_error("Cannot register a device with error %d", ret); + return ret; + } + + riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); + if (IS_ERR(riocm_cdev.dev)) { + cdev_del(&riocm_cdev.cdev); + return PTR_ERR(riocm_cdev.dev); + } + + riocm_debug(MPORT, "Added %s cdev(%d:%d)", + DEV_NAME, MAJOR(devno), MINOR(devno)); + + return 0; +} + +/* + * riocm_add_mport - add new local mport device into channel management core + * @dev: device object associated with mport + * @class_intf: class interface + * + * When a new mport device is added, CM immediately reserves inbound and + * outbound RapidIO mailboxes that will be used. + */ +static int riocm_add_mport(struct device *dev, + struct class_interface *class_intf) +{ + int rc; + int i; + struct cm_dev *cm; + struct rio_mport *mport = to_rio_mport(dev); + + riocm_debug(MPORT, "add mport %s", mport->name); + + cm = kzalloc(sizeof(*cm), GFP_KERNEL); + if (!cm) + return -ENOMEM; + + cm->mport = mport; + + rc = rio_request_outb_mbox(mport, cm, cmbox, + RIOCM_TX_RING_SIZE, riocm_outb_msg_event); + if (rc) { + riocm_error("failed to allocate OBMBOX_%d on %s", + cmbox, mport->name); + kfree(cm); + return -ENODEV; + } + + rc = rio_request_inb_mbox(mport, cm, cmbox, + RIOCM_RX_RING_SIZE, riocm_inb_msg_event); + if (rc) { + riocm_error("failed to allocate IBMBOX_%d on %s", + cmbox, mport->name); + rio_release_outb_mbox(mport, cmbox); + kfree(cm); + return -ENODEV; + } + + /* + * Allocate and register inbound messaging buffers to be ready + * to receive channel and system management requests + */ + for (i = 0; i < RIOCM_RX_RING_SIZE; i++) + cm->rx_buf[i] = NULL; + + cm->rx_slots = RIOCM_RX_RING_SIZE; + mutex_init(&cm->rx_lock); + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); + cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); + + cm->tx_slot = 0; + cm->tx_cnt = 0; + cm->tx_ack_slot = 0; + spin_lock_init(&cm->tx_lock); + + INIT_LIST_HEAD(&cm->peers); + cm->npeers = 0; + INIT_LIST_HEAD(&cm->tx_reqs); + + down_write(&rdev_sem); + list_add_tail(&cm->list, &cm_dev_list); + up_write(&rdev_sem); + + return 0; +} + +/* + * riocm_remove_mport - remove local mport device from channel management core + * @dev: device object associated with mport + * @class_intf: class interface + * + * Removes a local mport device from the list of registered devices that provide + * channel management services. Returns an error if the specified mport is not + * registered with the CM core. + */ +static void riocm_remove_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = to_rio_mport(dev); + struct cm_dev *cm; + struct cm_peer *peer, *temp; + struct rio_channel *ch, *_c; + unsigned int i; + bool found = false; + LIST_HEAD(list); + + riocm_debug(MPORT, "%s", mport->name); + + /* Find a matching cm_dev object */ + down_write(&rdev_sem); + list_for_each_entry(cm, &cm_dev_list, list) { + if (cm->mport == mport) { + list_del(&cm->list); + found = true; + break; + } + } + up_write(&rdev_sem); + if (!found) + return; + + flush_workqueue(cm->rx_wq); + destroy_workqueue(cm->rx_wq); + + /* Release channels bound to this mport */ + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + if (ch->cmdev == cm) { + riocm_debug(RDEV, "%s drop ch_%d", + mport->name, ch->id); + idr_remove(&ch_idr, ch->id); + list_add(&ch->ch_node, &list); + } + } + spin_unlock_bh(&idr_lock); + + if (!list_empty(&list)) { + list_for_each_entry_safe(ch, _c, &list, ch_node) { + list_del(&ch->ch_node); + riocm_ch_close(ch); + } + } + + rio_release_inb_mbox(mport, cmbox); + rio_release_outb_mbox(mport, cmbox); + + /* Remove and free peer entries */ + if (!list_empty(&cm->peers)) + riocm_debug(RDEV, "ATTN: peer list not empty"); + list_for_each_entry_safe(peer, temp, &cm->peers, node) { + riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); + list_del(&peer->node); + kfree(peer); + } + + riocm_rx_free(cm); + kfree(cm); + riocm_debug(MPORT, "%s done", mport->name); +} + +static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, + void *unused) +{ + struct rio_channel *ch; + unsigned int i; + + riocm_debug(EXIT, "."); + + spin_lock_bh(&idr_lock); + idr_for_each_entry(&ch_idr, ch, i) { + riocm_debug(EXIT, "close ch %d", ch->id); + if (ch->state == RIO_CM_CONNECTED) + riocm_send_close(ch); + } + spin_unlock_bh(&idr_lock); + + return NOTIFY_DONE; +} + +/* + * riocm_interface handles addition/removal of remote RapidIO devices + */ +static struct subsys_interface riocm_interface = { + .name = "rio_cm", + .subsys = &rio_bus_type, + .add_dev = riocm_add_dev, + .remove_dev = riocm_remove_dev, +}; + +/* + * rio_mport_interface handles addition/removal local mport devices + */ +static struct class_interface rio_mport_interface __refdata = { + .class = &rio_mport_class, + .add_dev = riocm_add_mport, + .remove_dev = riocm_remove_mport, +}; + +static struct notifier_block rio_cm_notifier = { + .notifier_call = rio_cm_shutdown, +}; + +static int __init riocm_init(void) +{ + int ret; + + /* Create device class needed by udev */ + dev_class = class_create(THIS_MODULE, DRV_NAME); + if (IS_ERR(dev_class)) { + riocm_error("Cannot create " DRV_NAME " class"); + return PTR_ERR(dev_class); + } + + ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); + if (ret) { + class_destroy(dev_class); + return ret; + } + + dev_major = MAJOR(dev_number); + dev_minor_base = MINOR(dev_number); + riocm_debug(INIT, "Registered class with %d major", dev_major); + + /* + * Register as rapidio_port class interface to get notifications about + * mport additions and removals. + */ + ret = class_interface_register(&rio_mport_interface); + if (ret) { + riocm_error("class_interface_register error: %d", ret); + goto err_reg; + } + + /* + * Register as RapidIO bus interface to get notifications about + * addition/removal of remote RapidIO devices. + */ + ret = subsys_interface_register(&riocm_interface); + if (ret) { + riocm_error("subsys_interface_register error: %d", ret); + goto err_cl; + } + + ret = register_reboot_notifier(&rio_cm_notifier); + if (ret) { + riocm_error("failed to register reboot notifier (err=%d)", ret); + goto err_sif; + } + + ret = riocm_cdev_add(dev_number); + if (ret) { + unregister_reboot_notifier(&rio_cm_notifier); + ret = -ENODEV; + goto err_sif; + } + + return 0; +err_sif: + subsys_interface_unregister(&riocm_interface); +err_cl: + class_interface_unregister(&rio_mport_interface); +err_reg: + unregister_chrdev_region(dev_number, 1); + class_destroy(dev_class); + return ret; +} + +static void __exit riocm_exit(void) +{ + riocm_debug(EXIT, "enter"); + unregister_reboot_notifier(&rio_cm_notifier); + subsys_interface_unregister(&riocm_interface); + class_interface_unregister(&rio_mport_interface); + idr_destroy(&ch_idr); + + device_unregister(riocm_cdev.dev); + cdev_del(&(riocm_cdev.cdev)); + + class_destroy(dev_class); + unregister_chrdev_region(dev_number, 1); +} + +late_initcall(riocm_init); +module_exit(riocm_exit); diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig index 345841562f95..92767fd3b541 100644 --- a/drivers/rapidio/switches/Kconfig +++ b/drivers/rapidio/switches/Kconfig @@ -22,3 +22,9 @@ config RAPIDIO_CPS_GEN2 default n ---help--- Includes support for ITD CPS Gen.2 serial RapidIO switches. + +config RAPIDIO_RXS_GEN3 + tristate "IDT RXS Gen.3 SRIO switch support" + default n + ---help--- + Includes support for ITD RXS Gen.3 serial RapidIO switches. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index 051cc6b38188..6bdd54c4e733 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o +obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 9f7fe21580bb..e67b923b1ca6 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -436,10 +436,11 @@ static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); } + spin_unlock(&rdev->rswitch->lock); + /* Create device-specific sysfs attributes */ idtg2_sysfs(rdev, true); - spin_unlock(&rdev->rswitch->lock); return 0; } @@ -452,11 +453,9 @@ static void idtg2_remove(struct rio_dev *rdev) return; } rdev->rswitch->ops = NULL; - + spin_unlock(&rdev->rswitch->lock); /* Remove device-specific sysfs attributes */ idtg2_sysfs(rdev, false); - - spin_unlock(&rdev->rswitch->lock); } static struct rio_device_id idtg2_id_table[] = { diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c new file mode 100644 index 000000000000..c5923a547bed --- /dev/null +++ b/drivers/rapidio/switches/idt_gen3.c @@ -0,0 +1,382 @@ +/* + * IDT RXS Gen.3 Serial RapidIO switch family support + * + * Copyright 2016 Integrated Device Technology, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stat.h> +#include <linux/module.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> + +#include <asm/page.h> +#include "../rio.h" + +#define RIO_EM_PW_STAT 0x40020 +#define RIO_PW_CTL 0x40204 +#define RIO_PW_CTL_PW_TMR 0xffffff00 +#define RIO_PW_ROUTE 0x40208 + +#define RIO_EM_DEV_INT_EN 0x40030 + +#define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100) +#define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000 + +#define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100) +#define RIO_PLM_SPx_PW_EN_OK2U 0x40000000 +#define RIO_PLM_SPx_PW_EN_LINIT 0x10000000 + +#define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) +#define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ + (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) + +static int +idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 rval; + u32 entry = route_port; + int err = 0; + + pr_debug("RIO: %s t=0x%x did_%x to p_%x\n", + __func__, table, route_destid, entry); + + if (route_destid > 0xFF) + return -EINVAL; + + if (route_port == RIO_INVALID_ROUTE) + entry = RIO_RT_ENTRY_DROP_PKT; + + if (table == RIO_GLOBAL_TABLE) { + /* Use broadcast register to update all per-port tables */ + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid), + entry); + return err; + } + + /* + * Verify that specified port/table number is valid + */ + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), + entry); + return err; +} + +static int +idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 rval; + int err; + + if (route_destid > 0xFF) + return -EINVAL; + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + /* + * This switch device does not have the dedicated global routing table. + * It is substituted by reading routing table of the ingress port of + * maintenance read requests. + */ + if (table == RIO_GLOBAL_TABLE) + table = RIO_GET_PORT_NUM(rval); + else if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), + &rval); + if (err) + return err; + + if (rval == RIO_RT_ENTRY_DROP_PKT) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)rval; + + return 0; +} + +static int +idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + u32 rval; + int err; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0; i <= 0xff; i++) { + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_BC_L2_Gn_ENTRYx_CSR(0, i), + RIO_RT_ENTRY_DROP_PKT); + if (err) + break; + } + + return err; + } + + err = rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &rval); + if (err) + return err; + + if (table >= RIO_GET_TOTAL_PORTS(rval)) + return -EINVAL; + + for (i = 0; i <= 0xff; i++) { + err = rio_mport_write_config_32(mport, destid, hopcount, + RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), + RIO_RT_ENTRY_DROP_PKT); + if (err) + break; + } + + return err; +} + +/* + * This routine performs device-specific initialization only. + * All standard EM configuration should be performed at upper level. + */ +static int +idtg3_em_init(struct rio_dev *rdev) +{ + int i, tmp; + u32 rval; + + pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Disable assertion of interrupt signal */ + rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0); + + /* Disable port-write event notifications during initialization */ + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, + RIO_EM_PW_TX_CTRL_PW_DIS); + + /* Configure Port-Write notifications for hot-swap events */ + tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); + for (i = 0; i < tmp; i++) { + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i), + &rval); + if (rval & RIO_PORT_N_ERR_STS_PORT_UA) + continue; + + /* Clear events signaled before enabling notification */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0); + + /* Enable event notifications */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i), + RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK); + /* Enable port-write generation on events */ + rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i), + RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT); + + } + + /* Set Port-Write destination port */ + tmp = RIO_GET_PORT_NUM(rdev->swpinfo); + rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp); + + + /* Enable sending port-write event notifications */ + rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + return 0; +} + + +/* + * idtg3_em_handler - device-specific error handler + * + * If the link is down (PORT_UNINIT) does nothing - this is considered + * as link partner removal from the port. + * + * If the link is up (PORT_OK) - situation is handled as *new* device insertion. + * In this case ERR_STOP bits are cleared by issuing soft reset command to the + * reporting port. Inbound and outbound ackIDs are cleared by the reset as well. + * This way the port is synchronized with freshly inserted device (assuming it + * was reset/powered-up on insertion). + * + * TODO: This is not sufficient in a situation when a link between two devices + * was down and up again (e.g. cable disconnect). For that situation full ackID + * realignment process has to be implemented. + */ +static int +idtg3_em_handler(struct rio_dev *rdev, u8 pnum) +{ + u32 err_status; + u32 rval; + + rio_read_config_32(rdev, + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), + &err_status); + + /* Do nothing for device/link removal */ + if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) + return 0; + + /* When link is OK we have a device insertion. + * Request port soft reset to clear errors if they present. + * Inbound and outbound ackIDs will be 0 after reset. + */ + if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES)) { + rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval); + rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), + rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST); + udelay(10); + rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval); + msleep(500); + } + + return 0; +} + +static struct rio_switch_ops idtg3_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtg3_route_add_entry, + .get_entry = idtg3_route_get_entry, + .clr_table = idtg3_route_clr_table, + .em_init = idtg3_em_init, + .em_handle = idtg3_em_handler, +}; + +static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtg3_switch_ops; + + if (rdev->do_enum) { + /* Disable hierarchical routing support: Existing fabric + * enumeration/discovery process (see rio-scan.c) uses 8-bit + * flat destination ID routing only. + */ + rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0); + } + + spin_unlock(&rdev->rswitch->lock); + + return 0; +} + +static void idtg3_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops == &idtg3_switch_ops) + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +/* + * Gen3 switches repeat sending PW messages until a corresponding event flag + * is cleared. Use shutdown notification to disable generation of port-write + * messages if their destination node is shut down. + */ +static void idtg3_shutdown(struct rio_dev *rdev) +{ + int i; + u32 rval; + u16 destid; + + /* Currently the enumerator node acts also as PW handler */ + if (!rdev->do_enum) + return; + + pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev)); + + rio_read_config_32(rdev, RIO_PW_ROUTE, &rval); + i = RIO_GET_PORT_NUM(rdev->swpinfo); + + /* Check port-write destination port */ + if (!((1 << i) & rval)) + return; + + /* Disable sending port-write event notifications if PW destID + * matches to one of the enumerator node + */ + rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval); + + if (rval & RIO_EM_PW_TGT_DEVID_DEV16) + destid = rval >> 16; + else + destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16); + + if (rdev->net->hport->host_deviceid == destid) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); + pr_debug("RIO: %s(%s) PW transmission disabled\n", + __func__, rio_name(rdev)); + } +} + +static struct rio_device_id idtg3_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtg3_driver = { + .name = "idt_gen3", + .id_table = idtg3_id_table, + .probe = idtg3_probe, + .remove = idtg3_remove, + .shutdown = idtg3_shutdown, +}; + +static int __init idtg3_init(void) +{ + return rio_register_driver(&idtg3_driver); +} + +static void __exit idtg3_exit(void) +{ + pr_debug("RIO: %s\n", __func__); + rio_unregister_driver(&idtg3_driver); + pr_debug("RIO: %s done\n", __func__); +} + +device_initcall(idtg3_init); +module_exit(idtg3_exit); + +MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 42c8b014fe15..2700d15f7584 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -175,12 +175,10 @@ tsi57x_em_init(struct rio_dev *rdev) /* Clear all pending interrupts */ rio_read_config_32(rdev, - rdev->phys_efptr + - RIO_PORT_N_ERR_STS_CSR(portnum), + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), ®val); rio_write_config_32(rdev, - rdev->phys_efptr + - RIO_PORT_N_ERR_STS_CSR(portnum), + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), regval & 0x07120214); rio_read_config_32(rdev, @@ -198,7 +196,7 @@ tsi57x_em_init(struct rio_dev *rdev) /* Skip next (odd) port if the current port is in x4 mode */ rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), ®val); if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) portnum++; @@ -221,23 +219,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) u32 regval; rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), &err_status); if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && - (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | - RIO_PORT_N_ERR_STS_PW_INP_ES))) { + (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | + RIO_PORT_N_ERR_STS_INP_ES))) { /* Remove any queued packets by locking/unlocking port */ rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), ®val); if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), regval | RIO_PORT_N_CTL_LOCKOUT); udelay(50); rio_write_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + RIO_DEV_PORT_N_CTL_CSR(rdev, portnum), regval); } @@ -245,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) * valid bit */ rio_read_config_32(rdev, - rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum), ®val); /* Send a Packet-Not-Accepted/Link-Request-Input-Status control @@ -259,8 +257,8 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) while (checkcount--) { udelay(50); rio_read_config_32(rdev, - rdev->phys_efptr + - RIO_PORT_N_MNT_RSP_CSR(portnum), + RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, + portnum), ®val); if (regval & RIO_PORT_N_MNT_RSP_RVAL) goto exit_es; diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c index 84c1ee39ddae..2ca00045eb99 100644 --- a/drivers/regulator/ab8500-ext.c +++ b/drivers/regulator/ab8500-ext.c @@ -25,6 +25,456 @@ #include <linux/mfd/abx500/ab8500.h> #include <linux/regulator/ab8500.h> +static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { + /* Main display, u8500 R3 uib */ + REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"), + /* Main display, u8500 uib and ST uib */ + REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"), + /* Secondary display, ST uib */ + REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"), + /* SFH7741 proximity sensor */ + REGULATOR_SUPPLY("vcc", "gpio-keys.0"), + /* BH1780GLS ambient light sensor */ + REGULATOR_SUPPLY("vcc", "2-0029"), + /* lsm303dlh accelerometer */ + REGULATOR_SUPPLY("vdd", "2-0018"), + /* lsm303dlhc accelerometer */ + REGULATOR_SUPPLY("vdd", "2-0019"), + /* lsm303dlh magnetometer */ + REGULATOR_SUPPLY("vdd", "2-001e"), + /* Rohm BU21013 Touchscreen devices */ + REGULATOR_SUPPLY("avdd", "3-005c"), + REGULATOR_SUPPLY("avdd", "3-005d"), + /* Synaptics RMI4 Touchscreen device */ + REGULATOR_SUPPLY("vdd", "3-004b"), + /* L3G4200D Gyroscope device */ + REGULATOR_SUPPLY("vdd", "2-0068"), + /* Ambient light sensor device */ + REGULATOR_SUPPLY("vdd", "3-0029"), + /* Pressure sensor device */ + REGULATOR_SUPPLY("vdd", "2-005c"), + /* Cypress TrueTouch Touchscreen device */ + REGULATOR_SUPPLY("vcpin", "spi8.0"), + /* Camera device */ + REGULATOR_SUPPLY("vaux12v5", "mmio_camera"), +}; + +static struct regulator_consumer_supply ab8500_vaux2_consumers[] = { + /* On-board eMMC power */ + REGULATOR_SUPPLY("vmmc", "sdi4"), + /* AB8500 audio codec */ + REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"), + /* AB8500 accessory detect 1 */ + REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"), + /* AB8500 Tv-out device */ + REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"), + /* AV8100 HDMI device */ + REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"), +}; + +static struct regulator_consumer_supply ab8500_vaux3_consumers[] = { + REGULATOR_SUPPLY("v-SD-STM", "stm"), + /* External MMC slot power */ + REGULATOR_SUPPLY("vmmc", "sdi0"), +}; + +static struct regulator_consumer_supply ab8500_vtvout_consumers[] = { + /* TV-out DENC supply */ + REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"), + /* Internal general-purpose ADC */ + REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"), + /* ADC for charger */ + REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"), + /* AB8500 Tv-out device */ + REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"), +}; + +static struct regulator_consumer_supply ab8500_vaud_consumers[] = { + /* AB8500 audio-codec main supply */ + REGULATOR_SUPPLY("vaud", "ab8500-codec.0"), +}; + +static struct regulator_consumer_supply ab8500_vamic1_consumers[] = { + /* AB8500 audio-codec Mic1 supply */ + REGULATOR_SUPPLY("vamic1", "ab8500-codec.0"), +}; + +static struct regulator_consumer_supply ab8500_vamic2_consumers[] = { + /* AB8500 audio-codec Mic2 supply */ + REGULATOR_SUPPLY("vamic2", "ab8500-codec.0"), +}; + +static struct regulator_consumer_supply ab8500_vdmic_consumers[] = { + /* AB8500 audio-codec DMic supply */ + REGULATOR_SUPPLY("vdmic", "ab8500-codec.0"), +}; + +static struct regulator_consumer_supply ab8500_vintcore_consumers[] = { + /* SoC core supply, no device */ + REGULATOR_SUPPLY("v-intcore", NULL), + /* USB Transceiver */ + REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"), + /* Handled by abx500 clk driver */ + REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"), +}; + +static struct regulator_consumer_supply ab8500_vana_consumers[] = { + /* DB8500 DSI */ + REGULATOR_SUPPLY("vdddsi1v2", "mcde"), + REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"), + REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"), + REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"), + /* DB8500 CSI */ + REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"), +}; + +/* ab8500 regulator register initialization */ +static struct ab8500_regulator_reg_init ab8500_reg_init[] = { + /* + * VanaRequestCtrl = HP/LP depending on VxRequest + * VextSupply1RequestCtrl = HP/LP depending on VxRequest + */ + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00), + /* + * VextSupply2RequestCtrl = HP/LP depending on VxRequest + * VextSupply3RequestCtrl = HP/LP depending on VxRequest + * Vaux1RequestCtrl = HP/LP depending on VxRequest + * Vaux2RequestCtrl = HP/LP depending on VxRequest + */ + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00), + /* + * Vaux3RequestCtrl = HP/LP depending on VxRequest + * SwHPReq = Control through SWValid disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00), + /* + * VanaSysClkReq1HPValid = disabled + * Vaux1SysClkReq1HPValid = disabled + * Vaux2SysClkReq1HPValid = disabled + * Vaux3SysClkReq1HPValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00), + /* + * VextSupply1SysClkReq1HPValid = disabled + * VextSupply2SysClkReq1HPValid = disabled + * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40), + /* + * VanaHwHPReq1Valid = disabled + * Vaux1HwHPreq1Valid = disabled + * Vaux2HwHPReq1Valid = disabled + * Vaux3HwHPReqValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00), + /* + * VextSupply1HwHPReq1Valid = disabled + * VextSupply2HwHPReq1Valid = disabled + * VextSupply3HwHPReq1Valid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00), + /* + * VanaHwHPReq2Valid = disabled + * Vaux1HwHPReq2Valid = disabled + * Vaux2HwHPReq2Valid = disabled + * Vaux3HwHPReq2Valid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00), + /* + * VextSupply1HwHPReq2Valid = disabled + * VextSupply2HwHPReq2Valid = disabled + * VextSupply3HwHPReq2Valid = HWReq2 controlled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04), + /* + * VanaSwHPReqValid = disabled + * Vaux1SwHPReqValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00), + /* + * Vaux2SwHPReqValid = disabled + * Vaux3SwHPReqValid = disabled + * VextSupply1SwHPReqValid = disabled + * VextSupply2SwHPReqValid = disabled + * VextSupply3SwHPReqValid = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00), + /* + * SysClkReq2Valid1 = SysClkReq2 controlled + * SysClkReq3Valid1 = disabled + * SysClkReq4Valid1 = SysClkReq4 controlled + * SysClkReq5Valid1 = disabled + * SysClkReq6Valid1 = SysClkReq6 controlled + * SysClkReq7Valid1 = disabled + * SysClkReq8Valid1 = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a), + /* + * SysClkReq2Valid2 = disabled + * SysClkReq3Valid2 = disabled + * SysClkReq4Valid2 = disabled + * SysClkReq5Valid2 = disabled + * SysClkReq6Valid2 = SysClkReq6 controlled + * SysClkReq7Valid2 = disabled + * SysClkReq8Valid2 = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20), + /* + * VTVoutEna = disabled + * Vintcore12Ena = disabled + * Vintcore12Sel = 1.25 V + * Vintcore12LP = inactive (HP) + * VTVoutLP = inactive (HP) + */ + INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10), + /* + * VaudioEna = disabled + * VdmicEna = disabled + * Vamic1Ena = disabled + * Vamic2Ena = disabled + */ + INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00), + /* + * Vamic1_dzout = high-Z when Vamic1 is disabled + * Vamic2_dzout = high-Z when Vamic2 is disabled + */ + INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00), + /* + * VPll = Hw controlled (NOTE! PRCMU bits) + * VanaRegu = force off + */ + INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02), + /* + * VrefDDREna = disabled + * VrefDDRSleepMode = inactive (no pulldown) + */ + INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00), + /* + * VextSupply1Regu = force LP + * VextSupply2Regu = force OFF + * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP) + * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 + * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 + */ + INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13), + /* + * Vaux1Regu = force HP + * Vaux2Regu = force off + */ + INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01), + /* + * Vaux3Regu = force off + */ + INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00), + /* + * Vaux1Sel = 2.8 V + */ + INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C), + /* + * Vaux2Sel = 2.9 V + */ + INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d), + /* + * Vaux3Sel = 2.91 V + */ + INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07), + /* + * VextSupply12LP = disabled (no LP) + */ + INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00), + /* + * Vaux1Disch = short discharge time + * Vaux2Disch = short discharge time + * Vaux3Disch = short discharge time + * Vintcore12Disch = short discharge time + * VTVoutDisch = short discharge time + * VaudioDisch = short discharge time + */ + INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00), + /* + * VanaDisch = short discharge time + * VdmicPullDownEna = pulldown disabled when Vdmic is disabled + * VdmicDisch = short discharge time + */ + INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00), +}; + +/* AB8500 regulators */ +static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { + /* supplies to the display/camera */ + [AB8500_LDO_AUX1] = { + .supply_regulator = "ab8500-ext-supply3", + .constraints = { + .name = "V-DISPLAY", + .min_uV = 2800000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS, + .boot_on = 1, /* display is on at boot */ + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), + .consumer_supplies = ab8500_vaux1_consumers, + }, + /* supplies to the on-board eMMC */ + [AB8500_LDO_AUX2] = { + .supply_regulator = "ab8500-ext-supply3", + .constraints = { + .name = "V-eMMC1", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers), + .consumer_supplies = ab8500_vaux2_consumers, + }, + /* supply for VAUX3, supplies to SDcard slots */ + [AB8500_LDO_AUX3] = { + .supply_regulator = "ab8500-ext-supply3", + .constraints = { + .name = "V-MMC-SD", + .min_uV = 1100000, + .max_uV = 3300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers), + .consumer_supplies = ab8500_vaux3_consumers, + }, + /* supply for tvout, gpadc, TVOUT LDO */ + [AB8500_LDO_TVOUT] = { + .constraints = { + .name = "V-TVOUT", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers), + .consumer_supplies = ab8500_vtvout_consumers, + }, + /* supply for ab8500-vaudio, VAUDIO LDO */ + [AB8500_LDO_AUDIO] = { + .constraints = { + .name = "V-AUD", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers), + .consumer_supplies = ab8500_vaud_consumers, + }, + /* supply for v-anamic1 VAMic1-LDO */ + [AB8500_LDO_ANAMIC1] = { + .constraints = { + .name = "V-AMIC1", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers), + .consumer_supplies = ab8500_vamic1_consumers, + }, + /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */ + [AB8500_LDO_ANAMIC2] = { + .constraints = { + .name = "V-AMIC2", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers), + .consumer_supplies = ab8500_vamic2_consumers, + }, + /* supply for v-dmic, VDMIC LDO */ + [AB8500_LDO_DMIC] = { + .constraints = { + .name = "V-DMIC", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers), + .consumer_supplies = ab8500_vdmic_consumers, + }, + /* supply for v-intcore12, VINTCORE12 LDO */ + [AB8500_LDO_INTCORE] = { + .constraints = { + .name = "V-INTCORE", + .min_uV = 1250000, + .max_uV = 1350000, + .input_uV = 1800000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_MODE | + REGULATOR_CHANGE_DRMS, + .valid_modes_mask = REGULATOR_MODE_NORMAL | + REGULATOR_MODE_IDLE, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers), + .consumer_supplies = ab8500_vintcore_consumers, + }, + /* supply for U8500 CSI-DSI, VANA LDO */ + [AB8500_LDO_ANA] = { + .constraints = { + .name = "V-CSI-DSI", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers), + .consumer_supplies = ab8500_vana_consumers, + }, +}; + +/* supply for VextSupply3 */ +static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = { + /* SIM supply for 3 V SIM cards */ + REGULATOR_SUPPLY("vinvsim", "sim-detect.0"), +}; + +/* + * AB8500 external regulators + */ +static struct regulator_init_data ab8500_ext_regulators[] = { + /* fixed Vbat supplies VSMPS1_EXT_1V8 */ + [AB8500_EXT_SUPPLY1] = { + .constraints = { + .name = "ab8500-ext-supply1", + .min_uV = 1800000, + .max_uV = 1800000, + .initial_mode = REGULATOR_MODE_IDLE, + .boot_on = 1, + .always_on = 1, + }, + }, + /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */ + [AB8500_EXT_SUPPLY2] = { + .constraints = { + .name = "ab8500-ext-supply2", + .min_uV = 1360000, + .max_uV = 1360000, + }, + }, + /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */ + [AB8500_EXT_SUPPLY3] = { + .constraints = { + .name = "ab8500-ext-supply3", + .min_uV = 3400000, + .max_uV = 3400000, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + .boot_on = 1, + }, + .num_consumer_supplies = + ARRAY_SIZE(ab8500_ext_supply3_consumers), + .consumer_supplies = ab8500_ext_supply3_consumers, + }, +}; + +static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = { + .reg_init = ab8500_reg_init, + .num_reg_init = ARRAY_SIZE(ab8500_reg_init), + .regulator = ab8500_regulators, + .num_regulator = ARRAY_SIZE(ab8500_regulators), + .ext_regulator = ab8500_ext_regulators, + .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators), +}; + /** * struct ab8500_ext_regulator_info - ab8500 regulator information * @dev: device pointer @@ -344,8 +794,7 @@ static struct of_regulator_match ab8500_ext_regulator_match[] = { static int ab8500_ext_regulator_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); - struct ab8500_platform_data *ppdata; - struct ab8500_regulator_platform_data *pdata; + struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data; struct device_node *np = pdev->dev.of_node; struct regulator_config config = { }; int i, err; @@ -366,18 +815,6 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev) return -EINVAL; } - ppdata = dev_get_platdata(ab8500->dev); - if (!ppdata) { - dev_err(&pdev->dev, "null parent pdata\n"); - return -EINVAL; - } - - pdata = ppdata->regulator; - if (!pdata) { - dev_err(&pdev->dev, "null pdata\n"); - return -EINVAL; - } - /* make sure the platform data has the correct size */ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) { dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 72e97d7a5209..1a8bf76a925f 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -77,6 +77,20 @@ config DA8XX_REMOTEPROC It's safe to say n here if you're not interested in multimedia offloading. +config QCOM_MDT_LOADER + tristate + +config QCOM_Q6V5_PIL + tristate "Qualcomm Hexagon V5 Peripherial Image Loader" + depends on OF && ARCH_QCOM + depends on QCOM_SMEM + select MFD_SYSCON + select QCOM_MDT_LOADER + select REMOTEPROC + help + Say y here to support the Qualcomm Peripherial Image Loader for the + Hexagon V5 based remote processors. + config ST_REMOTEPROC tristate "ST remoteproc support" depends on ARCH_STI diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 279cb2edc880..92d3758bd15c 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -11,4 +11,6 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o +obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o +obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c new file mode 100644 index 000000000000..114e8e4cef67 --- /dev/null +++ b/drivers/remoteproc/qcom_mdt_loader.c @@ -0,0 +1,179 @@ +/* + * Qualcomm Peripheral Image Loader + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2015 Sony Mobile Communications Inc + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/elf.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/remoteproc.h> +#include <linux/slab.h> + +#include "remoteproc_internal.h" +#include "qcom_mdt_loader.h" + +/** + * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc + * @rproc: remoteproc handle + * @fw: firmware header + * @tablesz: outgoing size of the table + * + * Returns a dummy table. + */ +struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz) +{ + static struct resource_table table = { .ver = 1, }; + + *tablesz = sizeof(table); + return &table; +} +EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table); + +/** + * qcom_mdt_parse() - extract useful parameters from the mdt header + * @fw: firmware handle + * @fw_addr: optional reference for base of the firmware's memory region + * @fw_size: optional reference for size of the firmware's memory region + * @fw_relocate: optional reference for flagging if the firmware is relocatable + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, + size_t *fw_size, bool *fw_relocate) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; + phys_addr_t max_addr = 0; + bool relocate = false; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (phdr->p_type != PT_LOAD) + continue; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + continue; + + if (!phdr->p_memsz) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + if (fw_addr) + *fw_addr = min_addr; + if (fw_size) + *fw_size = max_addr - min_addr; + if (fw_relocate) + *fw_relocate = relocate; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_mdt_parse); + +/** + * qcom_mdt_load() - load the firmware which header is defined in fw + * @rproc: rproc handle + * @fw: frimware object for the header + * @firmware: filename of the firmware, for building .bXX names + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_load(struct rproc *rproc, + const struct firmware *fw, + const char *firmware) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + size_t fw_name_len; + char *fw_name; + void *ptr; + int ret; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + fw_name_len = strlen(firmware); + if (fw_name_len <= 4) + return -EINVAL; + + fw_name = kstrdup(firmware, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (phdr->p_type != PT_LOAD) + continue; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + continue; + + if (!phdr->p_memsz) + continue; + + ptr = rproc_da_to_va(rproc, phdr->p_paddr, phdr->p_memsz); + if (!ptr) { + dev_err(&rproc->dev, "segment outside memory range\n"); + ret = -EINVAL; + break; + } + + if (phdr->p_filesz) { + sprintf(fw_name + fw_name_len - 3, "b%02d", i); + ret = request_firmware(&fw, fw_name, &rproc->dev); + if (ret) { + dev_err(&rproc->dev, "failed to load %s\n", + fw_name); + break; + } + + memcpy(ptr, fw->data, fw->size); + + release_firmware(fw); + } + + if (phdr->p_memsz > phdr->p_filesz) + memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); + } + + kfree(fw_name); + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_mdt_load); + +MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_mdt_loader.h b/drivers/remoteproc/qcom_mdt_loader.h new file mode 100644 index 000000000000..c5d7122755b6 --- /dev/null +++ b/drivers/remoteproc/qcom_mdt_loader.h @@ -0,0 +1,13 @@ +#ifndef __QCOM_MDT_LOADER_H__ +#define __QCOM_MDT_LOADER_H__ + +#define QCOM_MDT_TYPE_MASK (7 << 24) +#define QCOM_MDT_TYPE_HASH (2 << 24) +#define QCOM_MDT_RELOCATABLE BIT(27) + +struct resource_table * qcom_mdt_find_rsc_table(struct rproc *rproc, const struct firmware *fw, int *tablesz); +int qcom_mdt_load(struct rproc *rproc, const struct firmware *fw, const char *fw_name); + +int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, size_t *fw_size, bool *fw_relocate); + +#endif diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c new file mode 100644 index 000000000000..24791886219a --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -0,0 +1,908 @@ +/* + * Qualcomm Peripheral Image Loader + * + * Copyright (C) 2016 Linaro Ltd. + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> + +#include "remoteproc_internal.h" +#include "qcom_mdt_loader.h" + +#include <linux/qcom_scm.h> + +#define MBA_FIRMWARE_NAME "mba.b00" +#define MPSS_FIRMWARE_NAME "modem.mdt" + +#define MPSS_CRASH_REASON_SMEM 421 + +/* RMB Status Register Values */ +#define RMB_PBL_SUCCESS 0x1 + +#define RMB_MBA_XPU_UNLOCKED 0x1 +#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 +#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 +#define RMB_MBA_AUTH_COMPLETE 0x4 + +/* PBL/MBA interface registers */ +#define RMB_MBA_IMAGE_REG 0x00 +#define RMB_PBL_STATUS_REG 0x04 +#define RMB_MBA_COMMAND_REG 0x08 +#define RMB_MBA_STATUS_REG 0x0C +#define RMB_PMI_META_DATA_REG 0x10 +#define RMB_PMI_CODE_START_REG 0x14 +#define RMB_PMI_CODE_LENGTH_REG 0x18 + +#define RMB_CMD_META_DATA_READY 0x1 +#define RMB_CMD_LOAD_READY 0x2 + +/* QDSP6SS Register Offsets */ +#define QDSP6SS_RESET_REG 0x014 +#define QDSP6SS_GFMUX_CTL_REG 0x020 +#define QDSP6SS_PWR_CTL_REG 0x030 + +/* AXI Halt Register Offsets */ +#define AXI_HALTREQ_REG 0x0 +#define AXI_HALTACK_REG 0x4 +#define AXI_IDLE_REG 0x8 + +#define HALT_ACK_TIMEOUT_MS 100 + +/* QDSP6SS_RESET */ +#define Q6SS_STOP_CORE BIT(0) +#define Q6SS_CORE_ARES BIT(1) +#define Q6SS_BUS_ARES_ENABLE BIT(2) + +/* QDSP6SS_GFMUX_CTL */ +#define Q6SS_CLK_ENABLE BIT(1) + +/* QDSP6SS_PWR_CTL */ +#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) +#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) +#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) +#define Q6SS_L2TAG_SLP_NRET_N BIT(16) +#define Q6SS_ETB_SLP_NRET_N BIT(17) +#define Q6SS_L2DATA_STBY_N BIT(18) +#define Q6SS_SLP_RET_N BIT(19) +#define Q6SS_CLAMP_IO BIT(20) +#define QDSS_BHS_ON BIT(21) +#define QDSS_LDO_BYP BIT(22) + +struct q6v5 { + struct device *dev; + struct rproc *rproc; + + void __iomem *reg_base; + void __iomem *rmb_base; + + struct regmap *halt_map; + u32 halt_q6; + u32 halt_modem; + u32 halt_nc; + + struct reset_control *mss_restart; + + struct qcom_smem_state *state; + unsigned stop_bit; + + struct regulator_bulk_data supply[4]; + + struct clk *ahb_clk; + struct clk *axi_clk; + struct clk *rom_clk; + + struct completion start_done; + struct completion stop_done; + bool running; + + phys_addr_t mba_phys; + void *mba_region; + size_t mba_size; + + phys_addr_t mpss_phys; + phys_addr_t mpss_reloc; + void *mpss_region; + size_t mpss_size; +}; + +enum { + Q6V5_SUPPLY_CX, + Q6V5_SUPPLY_MX, + Q6V5_SUPPLY_MSS, + Q6V5_SUPPLY_PLL, +}; + +static int q6v5_regulator_init(struct q6v5 *qproc) +{ + int ret; + + qproc->supply[Q6V5_SUPPLY_CX].supply = "cx"; + qproc->supply[Q6V5_SUPPLY_MX].supply = "mx"; + qproc->supply[Q6V5_SUPPLY_MSS].supply = "mss"; + qproc->supply[Q6V5_SUPPLY_PLL].supply = "pll"; + + ret = devm_regulator_bulk_get(qproc->dev, + ARRAY_SIZE(qproc->supply), qproc->supply); + if (ret < 0) { + dev_err(qproc->dev, "failed to get supplies\n"); + return ret; + } + + regulator_set_load(qproc->supply[Q6V5_SUPPLY_CX].consumer, 100000); + regulator_set_load(qproc->supply[Q6V5_SUPPLY_MSS].consumer, 100000); + regulator_set_load(qproc->supply[Q6V5_SUPPLY_PLL].consumer, 10000); + + return 0; +} + +static int q6v5_regulator_enable(struct q6v5 *qproc) +{ + struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; + struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; + int ret; + + /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */ + + ret = regulator_set_voltage(mx, 1050000, INT_MAX); + if (ret) + return ret; + + regulator_set_voltage(mss, 1000000, 1150000); + + return regulator_bulk_enable(ARRAY_SIZE(qproc->supply), qproc->supply); +} + +static void q6v5_regulator_disable(struct q6v5 *qproc) +{ + struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; + struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; + + /* TODO: Q6V5_SUPPLY_CX corner votes should be released */ + + regulator_bulk_disable(ARRAY_SIZE(qproc->supply), qproc->supply); + regulator_set_voltage(mx, 0, INT_MAX); + regulator_set_voltage(mss, 0, 1150000); +} + +static int q6v5_load(struct rproc *rproc, const struct firmware *fw) +{ + struct q6v5 *qproc = rproc->priv; + + memcpy(qproc->mba_region, fw->data, fw->size); + + return 0; +} + +static const struct rproc_fw_ops q6v5_fw_ops = { + .find_rsc_table = qcom_mdt_find_rsc_table, + .load = q6v5_load, +}; + +static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) +{ + unsigned long timeout; + s32 val; + + timeout = jiffies + msecs_to_jiffies(ms); + for (;;) { + val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); + if (val) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + msleep(1); + } + + return val; +} + +static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) +{ + + unsigned long timeout; + s32 val; + + timeout = jiffies + msecs_to_jiffies(ms); + for (;;) { + val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); + if (val < 0) + break; + + if (!status && val) + break; + else if (status && val == status) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + msleep(1); + } + + return val; +} + +static int q6v5proc_reset(struct q6v5 *qproc) +{ + u32 val; + int ret; + + /* Assert resets, stop core */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE); + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* Enable power block headswitch, and wait for it to stabilize */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= QDSS_BHS_ON | QDSS_LDO_BYP; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + udelay(1); + + /* + * Turn on memories. L2 banks should be done individually + * to minimize inrush current. + */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | + Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_2; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_1; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_0; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Remove IO clamp */ + val &= ~Q6SS_CLAMP_IO; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Bring core out of reset */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val &= ~Q6SS_CORE_ARES; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* Turn on core clock */ + val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); + val |= Q6SS_CLK_ENABLE; + writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); + + /* Start core execution */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val &= ~Q6SS_STOP_CORE; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* Wait for PBL status */ + ret = q6v5_rmb_pbl_wait(qproc, 1000); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "PBL boot timed out\n"); + } else if (ret != RMB_PBL_SUCCESS) { + dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); + ret = -EINVAL; + } else { + ret = 0; + } + + return ret; +} + +static void q6v5proc_halt_axi_port(struct q6v5 *qproc, + struct regmap *halt_map, + u32 offset) +{ + unsigned long timeout; + unsigned int val; + int ret; + + /* Check if we're already idle */ + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (!ret && val) + return; + + /* Assert halt request */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); + + /* Wait for halt */ + timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); + for (;;) { + ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); + if (ret || val || time_after(jiffies, timeout)) + break; + + msleep(1); + } + + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (ret || !val) + dev_err(qproc->dev, "port failed halt\n"); + + /* Clear halt request (port will remain halted until reset) */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); +} + +static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) +{ + DEFINE_DMA_ATTRS(attrs); + dma_addr_t phys; + void *ptr; + int ret; + + dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); + ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); + if (!ptr) { + dev_err(qproc->dev, "failed to allocate mdt buffer\n"); + return -ENOMEM; + } + + memcpy(ptr, fw->data, fw->size); + + writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); + writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); + + ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); + if (ret == -ETIMEDOUT) + dev_err(qproc->dev, "MPSS header authentication timed out\n"); + else if (ret < 0) + dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); + + dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); + + return ret < 0 ? ret : 0; +} + +static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + struct elf32_hdr *ehdr; + phys_addr_t boot_addr; + phys_addr_t fw_addr; + bool relocate; + size_t size; + int ret; + int i; + + ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); + if (ret) { + dev_err(qproc->dev, "failed to parse mdt header\n"); + return ret; + } + + if (relocate) + boot_addr = qproc->mpss_phys; + else + boot_addr = fw_addr; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + phdr = &phdrs[i]; + + if (phdr->p_type != PT_LOAD) + continue; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + continue; + + if (!phdr->p_memsz) + continue; + + size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + if (!size) { + writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); + } + + size += phdr->p_memsz; + writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + } + + ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); + if (ret == -ETIMEDOUT) + dev_err(qproc->dev, "MPSS authentication timed out\n"); + else if (ret < 0) + dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); + + return ret < 0 ? ret : 0; +} + +static int q6v5_mpss_load(struct q6v5 *qproc) +{ + const struct firmware *fw; + phys_addr_t fw_addr; + bool relocate; + int ret; + + ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev); + if (ret < 0) { + dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n"); + return ret; + } + + ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); + if (ret) { + dev_err(qproc->dev, "failed to parse mdt header\n"); + goto release_firmware; + } + + if (relocate) + qproc->mpss_reloc = fw_addr; + + /* Initialize the RMB validator */ + writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + + ret = q6v5_mpss_init_image(qproc, fw); + if (ret) + goto release_firmware; + + ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME); + if (ret) + goto release_firmware; + + ret = q6v5_mpss_validate(qproc, fw); + +release_firmware: + release_firmware(fw); + + return ret < 0 ? ret : 0; +} + +static int q6v5_start(struct rproc *rproc) +{ + struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + int ret; + + ret = q6v5_regulator_enable(qproc); + if (ret) { + dev_err(qproc->dev, "failed to enable supplies\n"); + return ret; + } + + ret = reset_control_deassert(qproc->mss_restart); + if (ret) { + dev_err(qproc->dev, "failed to deassert mss restart\n"); + goto disable_vdd; + } + + ret = clk_prepare_enable(qproc->ahb_clk); + if (ret) + goto assert_reset; + + ret = clk_prepare_enable(qproc->axi_clk); + if (ret) + goto disable_ahb_clk; + + ret = clk_prepare_enable(qproc->rom_clk); + if (ret) + goto disable_axi_clk; + + writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); + + ret = q6v5proc_reset(qproc); + if (ret) + goto halt_axi_ports; + + ret = q6v5_rmb_mba_wait(qproc, 0, 5000); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "MBA boot timed out\n"); + goto halt_axi_ports; + } else if (ret != RMB_MBA_XPU_UNLOCKED && + ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { + dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); + ret = -EINVAL; + goto halt_axi_ports; + } + + dev_info(qproc->dev, "MBA booted, loading mpss\n"); + + ret = q6v5_mpss_load(qproc); + if (ret) + goto halt_axi_ports; + + ret = wait_for_completion_timeout(&qproc->start_done, + msecs_to_jiffies(5000)); + if (ret == 0) { + dev_err(qproc->dev, "start timed out\n"); + ret = -ETIMEDOUT; + goto halt_axi_ports; + } + + qproc->running = true; + + /* TODO: All done, release the handover resources */ + + return 0; + +halt_axi_ports: + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); + + clk_disable_unprepare(qproc->rom_clk); +disable_axi_clk: + clk_disable_unprepare(qproc->axi_clk); +disable_ahb_clk: + clk_disable_unprepare(qproc->ahb_clk); +assert_reset: + reset_control_assert(qproc->mss_restart); +disable_vdd: + q6v5_regulator_disable(qproc); + + return ret; +} + +static int q6v5_stop(struct rproc *rproc) +{ + struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + int ret; + + qproc->running = false; + + qcom_smem_state_update_bits(qproc->state, + BIT(qproc->stop_bit), BIT(qproc->stop_bit)); + + ret = wait_for_completion_timeout(&qproc->stop_done, + msecs_to_jiffies(5000)); + if (ret == 0) + dev_err(qproc->dev, "timed out on wait\n"); + + qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0); + + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); + + reset_control_assert(qproc->mss_restart); + clk_disable_unprepare(qproc->rom_clk); + clk_disable_unprepare(qproc->axi_clk); + clk_disable_unprepare(qproc->ahb_clk); + q6v5_regulator_disable(qproc); + + return 0; +} + +static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct q6v5 *qproc = rproc->priv; + int offset; + + offset = da - qproc->mpss_reloc; + if (offset < 0 || offset + len > qproc->mpss_size) + return NULL; + + return qproc->mpss_region + offset; +} + +static const struct rproc_ops q6v5_ops = { + .start = q6v5_start, + .stop = q6v5_stop, + .da_to_va = q6v5_da_to_va, +}; + +static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev) +{ + struct q6v5 *qproc = dev; + size_t len; + char *msg; + + /* Sometimes the stop triggers a watchdog rather than a stop-ack */ + if (!qproc->running) { + complete(&qproc->stop_done); + return IRQ_HANDLED; + } + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(qproc->dev, "watchdog received: %s\n", msg); + else + dev_err(qproc->dev, "watchdog without message\n"); + + rproc_report_crash(qproc->rproc, RPROC_WATCHDOG); + + if (!IS_ERR(msg)) + msg[0] = '\0'; + + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev) +{ + struct q6v5 *qproc = dev; + size_t len; + char *msg; + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(qproc->dev, "fatal error received: %s\n", msg); + else + dev_err(qproc->dev, "fatal error without message\n"); + + rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR); + + if (!IS_ERR(msg)) + msg[0] = '\0'; + + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_handover_interrupt(int irq, void *dev) +{ + struct q6v5 *qproc = dev; + + complete(&qproc->start_done); + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev) +{ + struct q6v5 *qproc = dev; + + complete(&qproc->stop_done); + return IRQ_HANDLED; +} + +static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) +{ + struct of_phandle_args args; + struct resource *res; + int ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); + qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qproc->reg_base)) + return PTR_ERR(qproc->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); + qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qproc->rmb_base)) + return PTR_ERR(qproc->rmb_base); + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,halt-regs", 3, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); + return -EINVAL; + } + + qproc->halt_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->halt_map)) + return PTR_ERR(qproc->halt_map); + + qproc->halt_q6 = args.args[0]; + qproc->halt_modem = args.args[1]; + qproc->halt_nc = args.args[2]; + + return 0; +} + +static int q6v5_init_clocks(struct q6v5 *qproc) +{ + qproc->ahb_clk = devm_clk_get(qproc->dev, "iface"); + if (IS_ERR(qproc->ahb_clk)) { + dev_err(qproc->dev, "failed to get iface clock\n"); + return PTR_ERR(qproc->ahb_clk); + } + + qproc->axi_clk = devm_clk_get(qproc->dev, "bus"); + if (IS_ERR(qproc->axi_clk)) { + dev_err(qproc->dev, "failed to get bus clock\n"); + return PTR_ERR(qproc->axi_clk); + } + + qproc->rom_clk = devm_clk_get(qproc->dev, "mem"); + if (IS_ERR(qproc->rom_clk)) { + dev_err(qproc->dev, "failed to get mem clock\n"); + return PTR_ERR(qproc->rom_clk); + } + + return 0; +} + +static int q6v5_init_reset(struct q6v5 *qproc) +{ + qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL); + if (IS_ERR(qproc->mss_restart)) { + dev_err(qproc->dev, "failed to acquire mss restart\n"); + return PTR_ERR(qproc->mss_restart); + } + + return 0; +} + +static int q6v5_request_irq(struct q6v5 *qproc, + struct platform_device *pdev, + const char *name, + irq_handler_t thread_fn) +{ + int ret; + + ret = platform_get_irq_byname(pdev, name); + if (ret < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", name); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, ret, + NULL, thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5", qproc); + if (ret) + dev_err(&pdev->dev, "request %s IRQ failed\n", name); + + return ret; +} + +static int q6v5_alloc_memory_region(struct q6v5 *qproc) +{ + struct device_node *child; + struct device_node *node; + struct resource r; + int ret; + + child = of_get_child_by_name(qproc->dev->of_node, "mba"); + node = of_parse_phandle(child, "memory-region", 0); + ret = of_address_to_resource(node, 0, &r); + if (ret) { + dev_err(qproc->dev, "unable to resolve mba region\n"); + return ret; + } + + qproc->mba_phys = r.start; + qproc->mba_size = resource_size(&r); + qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); + if (!qproc->mba_region) { + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &r.start, qproc->mba_size); + return -EBUSY; + } + + child = of_get_child_by_name(qproc->dev->of_node, "mpss"); + node = of_parse_phandle(child, "memory-region", 0); + ret = of_address_to_resource(node, 0, &r); + if (ret) { + dev_err(qproc->dev, "unable to resolve mpss region\n"); + return ret; + } + + qproc->mpss_phys = qproc->mpss_reloc = r.start; + qproc->mpss_size = resource_size(&r); + qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); + if (!qproc->mpss_region) { + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &r.start, qproc->mpss_size); + return -EBUSY; + } + + return 0; +} + +static int q6v5_probe(struct platform_device *pdev) +{ + struct q6v5 *qproc; + struct rproc *rproc; + int ret; + + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, + MBA_FIRMWARE_NAME, sizeof(*qproc)); + if (!rproc) { + dev_err(&pdev->dev, "failed to allocate rproc\n"); + return -ENOMEM; + } + + rproc->fw_ops = &q6v5_fw_ops; + + qproc = (struct q6v5 *)rproc->priv; + qproc->dev = &pdev->dev; + qproc->rproc = rproc; + platform_set_drvdata(pdev, qproc); + + init_completion(&qproc->start_done); + init_completion(&qproc->stop_done); + + ret = q6v5_init_mem(qproc, pdev); + if (ret) + goto free_rproc; + + ret = q6v5_alloc_memory_region(qproc); + if (ret) + goto free_rproc; + + ret = q6v5_init_clocks(qproc); + if (ret) + goto free_rproc; + + ret = q6v5_regulator_init(qproc); + if (ret) + goto free_rproc; + + ret = q6v5_init_reset(qproc); + if (ret) + goto free_rproc; + + ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt); + if (ret < 0) + goto free_rproc; + + ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt); + if (ret < 0) + goto free_rproc; + + ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt); + if (ret < 0) + goto free_rproc; + + ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt); + if (ret < 0) + goto free_rproc; + + qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit); + if (IS_ERR(qproc->state)) + goto free_rproc; + + ret = rproc_add(rproc); + if (ret) + goto free_rproc; + + return 0; + +free_rproc: + rproc_put(rproc); + + return ret; +} + +static int q6v5_remove(struct platform_device *pdev) +{ + struct q6v5 *qproc = platform_get_drvdata(pdev); + + rproc_del(qproc->rproc); + rproc_put(qproc->rproc); + + return 0; +} + +static const struct of_device_id q6v5_of_match[] = { + { .compatible = "qcom,q6v5-pil", }, + { }, +}; + +static struct platform_driver q6v5_driver = { + .probe = q6v5_probe, + .remove = q6v5_remove, + .driver = { + .name = "qcom-q6v5-pil", + .of_match_table = q6v5_of_match, + }, +}; +module_platform_driver(q6v5_driver); + +MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index db3958b3f094..fe0539ed9cb5 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1264,11 +1264,6 @@ int rproc_add(struct rproc *rproc) if (ret < 0) return ret; - /* expose to rproc_get_by_phandle users */ - mutex_lock(&rproc_list_mutex); - list_add(&rproc->node, &rproc_list); - mutex_unlock(&rproc_list_mutex); - dev_info(dev, "%s is available\n", rproc->name); dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); @@ -1276,8 +1271,16 @@ int rproc_add(struct rproc *rproc) /* create debugfs entries */ rproc_create_debug_dir(rproc); + ret = rproc_add_virtio_devices(rproc); + if (ret < 0) + return ret; - return rproc_add_virtio_devices(rproc); + /* expose to rproc_get_by_phandle users */ + mutex_lock(&rproc_list_mutex); + list_add(&rproc->node, &rproc_list); + mutex_unlock(&rproc_list_mutex); + + return 0; } EXPORT_SYMBOL(rproc_add); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 0b2733db0e9e..4be1b8c21f6f 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -12,8 +12,22 @@ menuconfig RESET_CONTROLLER If unsure, say no. +if RESET_CONTROLLER + config RESET_OXNAS bool +config TI_SYSCON_RESET + tristate "TI SYSCON Reset Driver" + depends on HAS_IOMEM + select MFD_SYSCON + help + This enables the reset driver support for TI devices with + memory-mapped reset registers as part of a syscon device node. If + you wish to use the reset framework for such memory-mapped devices, + say Y here. Otherwise, say N. + source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" + +endif diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index f173fc3847b4..5d65a93d3c43 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -3,9 +3,11 @@ obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o +obj-$(CONFIG_ARCH_MESON) += reset-meson.o obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o obj-$(CONFIG_ARCH_STI) += sti/ obj-$(CONFIG_ARCH_HISI) += hisilicon/ obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o obj-$(CONFIG_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o +obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 72b32bd15549..395dc9ce492e 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -93,6 +93,43 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev) } EXPORT_SYMBOL_GPL(reset_controller_unregister); +static void devm_reset_controller_release(struct device *dev, void *res) +{ + reset_controller_unregister(*(struct reset_controller_dev **)res); +} + +/** + * devm_reset_controller_register - resource managed reset_controller_register() + * @dev: device that is registering this reset controller + * @rcdev: a pointer to the initialized reset controller device + * + * Managed reset_controller_register(). For reset controllers registered by + * this function, reset_controller_unregister() is automatically called on + * driver detach. See reset_controller_register() for more information. + */ +int devm_reset_controller_register(struct device *dev, + struct reset_controller_dev *rcdev) +{ + struct reset_controller_dev **rcdevp; + int ret; + + rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp), + GFP_KERNEL); + if (!rcdevp) + return -ENOMEM; + + ret = reset_controller_register(rcdev); + if (!ret) { + *rcdevp = rcdev; + devres_add(dev, rcdevp); + } else { + devres_free(rcdevp); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_reset_controller_register); + /** * reset_control_reset - reset the controlled device * @rstc: reset controller diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c index 8f55fd4a2630..35ce53edabf9 100644 --- a/drivers/reset/hisilicon/hi6220_reset.c +++ b/drivers/reset/hisilicon/hi6220_reset.c @@ -1,7 +1,8 @@ /* * Hisilicon Hi6220 reset controller driver * - * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2016 Linaro Limited. + * Copyright (c) 2015-2016 Hisilicon Limited. * * Author: Feng Chen <puck.chen@hisilicon.com> * @@ -15,81 +16,130 @@ #include <linux/module.h> #include <linux/bitops.h> #include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> #include <linux/reset-controller.h> #include <linux/reset.h> #include <linux/platform_device.h> -#define ASSERT_OFFSET 0x300 -#define DEASSERT_OFFSET 0x304 -#define MAX_INDEX 0x509 +#define PERIPH_ASSERT_OFFSET 0x300 +#define PERIPH_DEASSERT_OFFSET 0x304 +#define PERIPH_MAX_INDEX 0x509 + +#define SC_MEDIA_RSTEN 0x052C +#define SC_MEDIA_RSTDIS 0x0530 +#define MEDIA_MAX_INDEX 8 #define to_reset_data(x) container_of(x, struct hi6220_reset_data, rc_dev) +enum hi6220_reset_ctrl_type { + PERIPHERAL, + MEDIA, +}; + struct hi6220_reset_data { - void __iomem *assert_base; - void __iomem *deassert_base; - struct reset_controller_dev rc_dev; + struct reset_controller_dev rc_dev; + struct regmap *regmap; }; -static int hi6220_reset_assert(struct reset_controller_dev *rc_dev, - unsigned long idx) +static int hi6220_peripheral_assert(struct reset_controller_dev *rc_dev, + unsigned long idx) { struct hi6220_reset_data *data = to_reset_data(rc_dev); + struct regmap *regmap = data->regmap; + u32 bank = idx >> 8; + u32 offset = idx & 0xff; + u32 reg = PERIPH_ASSERT_OFFSET + bank * 0x10; - int bank = idx >> 8; - int offset = idx & 0xff; + return regmap_write(regmap, reg, BIT(offset)); +} - writel(BIT(offset), data->assert_base + (bank * 0x10)); +static int hi6220_peripheral_deassert(struct reset_controller_dev *rc_dev, + unsigned long idx) +{ + struct hi6220_reset_data *data = to_reset_data(rc_dev); + struct regmap *regmap = data->regmap; + u32 bank = idx >> 8; + u32 offset = idx & 0xff; + u32 reg = PERIPH_DEASSERT_OFFSET + bank * 0x10; - return 0; + return regmap_write(regmap, reg, BIT(offset)); } -static int hi6220_reset_deassert(struct reset_controller_dev *rc_dev, - unsigned long idx) +static const struct reset_control_ops hi6220_peripheral_reset_ops = { + .assert = hi6220_peripheral_assert, + .deassert = hi6220_peripheral_deassert, +}; + +static int hi6220_media_assert(struct reset_controller_dev *rc_dev, + unsigned long idx) { struct hi6220_reset_data *data = to_reset_data(rc_dev); + struct regmap *regmap = data->regmap; - int bank = idx >> 8; - int offset = idx & 0xff; + return regmap_write(regmap, SC_MEDIA_RSTEN, BIT(idx)); +} - writel(BIT(offset), data->deassert_base + (bank * 0x10)); +static int hi6220_media_deassert(struct reset_controller_dev *rc_dev, + unsigned long idx) +{ + struct hi6220_reset_data *data = to_reset_data(rc_dev); + struct regmap *regmap = data->regmap; - return 0; + return regmap_write(regmap, SC_MEDIA_RSTDIS, BIT(idx)); } -static const struct reset_control_ops hi6220_reset_ops = { - .assert = hi6220_reset_assert, - .deassert = hi6220_reset_deassert, +static const struct reset_control_ops hi6220_media_reset_ops = { + .assert = hi6220_media_assert, + .deassert = hi6220_media_deassert, }; static int hi6220_reset_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + enum hi6220_reset_ctrl_type type; struct hi6220_reset_data *data; - struct resource *res; - void __iomem *src_base; + struct regmap *regmap; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - src_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(src_base)) - return PTR_ERR(src_base); + type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to get reset controller regmap\n"); + return PTR_ERR(regmap); + } - data->assert_base = src_base + ASSERT_OFFSET; - data->deassert_base = src_base + DEASSERT_OFFSET; - data->rc_dev.nr_resets = MAX_INDEX; - data->rc_dev.ops = &hi6220_reset_ops; - data->rc_dev.of_node = pdev->dev.of_node; + data->regmap = regmap; + data->rc_dev.of_node = np; + if (type == MEDIA) { + data->rc_dev.ops = &hi6220_media_reset_ops; + data->rc_dev.nr_resets = MEDIA_MAX_INDEX; + } else { + data->rc_dev.ops = &hi6220_peripheral_reset_ops; + data->rc_dev.nr_resets = PERIPH_MAX_INDEX; + } return reset_controller_register(&data->rc_dev); } static const struct of_device_id hi6220_reset_match[] = { - { .compatible = "hisilicon,hi6220-sysctrl" }, - { }, + { + .compatible = "hisilicon,hi6220-sysctrl", + .data = (void *)PERIPHERAL, + }, + { + .compatible = "hisilicon,hi6220-mediactrl", + .data = (void *)MEDIA, + }, + { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, hi6220_reset_match); static struct platform_driver hi6220_reset_driver = { .probe = hi6220_reset_probe, diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c index ccb940a8d9fb..16d410cd6146 100644 --- a/drivers/reset/reset-ath79.c +++ b/drivers/reset/reset-ath79.c @@ -112,7 +112,7 @@ static int ath79_reset_probe(struct platform_device *pdev) ath79_reset->rcdev.of_reset_n_cells = 1; ath79_reset->rcdev.nr_resets = 32; - err = reset_controller_register(&ath79_reset->rcdev); + err = devm_reset_controller_register(&pdev->dev, &ath79_reset->rcdev); if (err) return err; @@ -131,7 +131,6 @@ static int ath79_reset_remove(struct platform_device *pdev) struct ath79_reset *ath79_reset = platform_get_drvdata(pdev); unregister_restart_handler(&ath79_reset->restart_nb); - reset_controller_unregister(&ath79_reset->rcdev); return 0; } diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c new file mode 100644 index 000000000000..c32f11a30c5f --- /dev/null +++ b/drivers/reset/reset-meson.c @@ -0,0 +1,136 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/err.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> +#include <linux/types.h> + +#define REG_COUNT 8 +#define BITS_PER_REG 32 + +struct meson_reset { + void __iomem *reg_base; + struct reset_controller_dev rcdev; +}; + +static int meson_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct meson_reset *data = + container_of(rcdev, struct meson_reset, rcdev); + unsigned int bank = id / BITS_PER_REG; + unsigned int offset = id % BITS_PER_REG; + void __iomem *reg_addr = data->reg_base + (bank << 2); + + if (bank >= REG_COUNT) + return -EINVAL; + + writel(BIT(offset), reg_addr); + + return 0; +} + +static const struct reset_control_ops meson_reset_ops = { + .reset = meson_reset_reset, +}; + +static const struct of_device_id meson_reset_dt_ids[] = { + { .compatible = "amlogic,meson8b-reset", }, + { .compatible = "amlogic,meson-gxbb-reset", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_reset_dt_ids); + +static int meson_reset_probe(struct platform_device *pdev) +{ + struct meson_reset *data; + struct resource *res; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->reg_base)) + return PTR_ERR(data->reg_base); + + platform_set_drvdata(pdev, data); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG; + data->rcdev.ops = &meson_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + + return devm_reset_controller_register(&pdev->dev, &data->rcdev); +} + +static struct platform_driver meson_reset_driver = { + .probe = meson_reset_probe, + .driver = { + .name = "meson_reset", + .of_match_table = meson_reset_dt_ids, + }, +}; + +module_platform_driver(meson_reset_driver); + +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c index c60fb2dace3e..944980572f79 100644 --- a/drivers/reset/reset-oxnas.c +++ b/drivers/reset/reset-oxnas.c @@ -112,21 +112,11 @@ static int oxnas_reset_probe(struct platform_device *pdev) data->rcdev.ops = &oxnas_reset_ops; data->rcdev.of_node = pdev->dev.of_node; - return reset_controller_register(&data->rcdev); -} - -static int oxnas_reset_remove(struct platform_device *pdev) -{ - struct oxnas_reset *data = platform_get_drvdata(pdev); - - reset_controller_unregister(&data->rcdev); - - return 0; + return devm_reset_controller_register(&pdev->dev, &data->rcdev); } static struct platform_driver oxnas_reset_driver = { .probe = oxnas_reset_probe, - .remove = oxnas_reset_remove, .driver = { .name = "oxnas-reset", .of_match_table = oxnas_reset_dt_ids, diff --git a/drivers/reset/reset-pistachio.c b/drivers/reset/reset-pistachio.c index 72a97a15a4c8..bbc4c06dd33b 100644 --- a/drivers/reset/reset-pistachio.c +++ b/drivers/reset/reset-pistachio.c @@ -121,16 +121,7 @@ static int pistachio_reset_probe(struct platform_device *pdev) rd->rcdev.ops = &pistachio_reset_ops; rd->rcdev.of_node = np; - return reset_controller_register(&rd->rcdev); -} - -static int pistachio_reset_remove(struct platform_device *pdev) -{ - struct pistachio_reset_data *data = platform_get_drvdata(pdev); - - reset_controller_unregister(&data->rcdev); - - return 0; + return devm_reset_controller_register(dev, &rd->rcdev); } static const struct of_device_id pistachio_reset_dt_ids[] = { @@ -141,7 +132,6 @@ MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids); static struct platform_driver pistachio_reset_driver = { .probe = pistachio_reset_probe, - .remove = pistachio_reset_remove, .driver = { .name = "pistachio-reset", .of_match_table = pistachio_reset_dt_ids, diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index cd05a7032b17..12add9b0fa49 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -134,16 +134,7 @@ static int socfpga_reset_probe(struct platform_device *pdev) data->rcdev.ops = &socfpga_reset_ops; data->rcdev.of_node = pdev->dev.of_node; - return reset_controller_register(&data->rcdev); -} - -static int socfpga_reset_remove(struct platform_device *pdev) -{ - struct socfpga_reset_data *data = platform_get_drvdata(pdev); - - reset_controller_unregister(&data->rcdev); - - return 0; + return devm_reset_controller_register(dev, &data->rcdev); } static const struct of_device_id socfpga_reset_dt_ids[] = { @@ -153,7 +144,6 @@ static const struct of_device_id socfpga_reset_dt_ids[] = { static struct platform_driver socfpga_reset_driver = { .probe = socfpga_reset_probe, - .remove = socfpga_reset_remove, .driver = { .name = "socfpga-reset", .of_match_table = socfpga_reset_dt_ids, diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index 677f86555212..3080190b3f90 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -165,21 +165,11 @@ static int sunxi_reset_probe(struct platform_device *pdev) data->rcdev.ops = &sunxi_reset_ops; data->rcdev.of_node = pdev->dev.of_node; - return reset_controller_register(&data->rcdev); -} - -static int sunxi_reset_remove(struct platform_device *pdev) -{ - struct sunxi_reset_data *data = platform_get_drvdata(pdev); - - reset_controller_unregister(&data->rcdev); - - return 0; + return devm_reset_controller_register(&pdev->dev, &data->rcdev); } static struct platform_driver sunxi_reset_driver = { .probe = sunxi_reset_probe, - .remove = sunxi_reset_remove, .driver = { .name = "sunxi-reset", .of_match_table = sunxi_reset_dt_ids, diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c new file mode 100644 index 000000000000..47f0ffd3b013 --- /dev/null +++ b/drivers/reset/reset-ti-syscon.c @@ -0,0 +1,237 @@ +/* + * TI SYSCON regmap reset driver + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * Suman Anna <afd@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +#include <dt-bindings/reset/ti-syscon.h> + +/** + * struct ti_syscon_reset_control - reset control structure + * @assert_offset: reset assert control register offset from syscon base + * @assert_bit: reset assert bit in the reset assert control register + * @deassert_offset: reset deassert control register offset from syscon base + * @deassert_bit: reset deassert bit in the reset deassert control register + * @status_offset: reset status register offset from syscon base + * @status_bit: reset status bit in the reset status register + * @flags: reset flag indicating how the (de)assert and status are handled + */ +struct ti_syscon_reset_control { + unsigned int assert_offset; + unsigned int assert_bit; + unsigned int deassert_offset; + unsigned int deassert_bit; + unsigned int status_offset; + unsigned int status_bit; + u32 flags; +}; + +/** + * struct ti_syscon_reset_data - reset controller information structure + * @rcdev: reset controller entity + * @regmap: regmap handle containing the memory-mapped reset registers + * @controls: array of reset controls + * @nr_controls: number of controls in control array + */ +struct ti_syscon_reset_data { + struct reset_controller_dev rcdev; + struct regmap *regmap; + struct ti_syscon_reset_control *controls; + unsigned int nr_controls; +}; + +#define to_ti_syscon_reset_data(rcdev) \ + container_of(rcdev, struct ti_syscon_reset_data, rcdev) + +/** + * ti_syscon_reset_assert() - assert device reset + * @rcdev: reset controller entity + * @id: ID of the reset to be asserted + * + * This function implements the reset driver op to assert a device's reset. + * This asserts the reset in a manner prescribed by the reset flags. + * + * Return: 0 for successful request, else a corresponding error value + */ +static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev); + struct ti_syscon_reset_control *control; + unsigned int mask, value; + + if (id >= data->nr_controls) + return -EINVAL; + + control = &data->controls[id]; + + if (control->flags & ASSERT_NONE) + return -ENOTSUPP; /* assert not supported for this reset */ + + mask = BIT(control->assert_bit); + value = (control->flags & ASSERT_SET) ? mask : 0x0; + + return regmap_update_bits(data->regmap, control->assert_offset, mask, value); +} + +/** + * ti_syscon_reset_deassert() - deassert device reset + * @rcdev: reset controller entity + * @id: ID of reset to be deasserted + * + * This function implements the reset driver op to deassert a device's reset. + * This deasserts the reset in a manner prescribed by the reset flags. + * + * Return: 0 for successful request, else a corresponding error value + */ +static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev); + struct ti_syscon_reset_control *control; + unsigned int mask, value; + + if (id >= data->nr_controls) + return -EINVAL; + + control = &data->controls[id]; + + if (control->flags & DEASSERT_NONE) + return -ENOTSUPP; /* deassert not supported for this reset */ + + mask = BIT(control->deassert_bit); + value = (control->flags & DEASSERT_SET) ? mask : 0x0; + + return regmap_update_bits(data->regmap, control->deassert_offset, mask, value); +} + +/** + * ti_syscon_reset_status() - check device reset status + * @rcdev: reset controller entity + * @id: ID of the reset for which the status is being requested + * + * This function implements the reset driver op to return the status of a + * device's reset. + * + * Return: 0 if reset is deasserted, true if reset is asserted, else a + * corresponding error value + */ +static int ti_syscon_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev); + struct ti_syscon_reset_control *control; + unsigned int reset_state; + int ret; + + if (id >= data->nr_controls) + return -EINVAL; + + control = &data->controls[id]; + + if (control->flags & STATUS_NONE) + return -ENOTSUPP; /* status not supported for this reset */ + + ret = regmap_read(data->regmap, control->status_offset, &reset_state); + if (ret) + return ret; + + return (reset_state & BIT(control->status_bit)) && + (control->flags & STATUS_SET); +} + +static struct reset_control_ops ti_syscon_reset_ops = { + .assert = ti_syscon_reset_assert, + .deassert = ti_syscon_reset_deassert, + .status = ti_syscon_reset_status, +}; + +static int ti_syscon_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct ti_syscon_reset_data *data; + struct regmap *regmap; + const __be32 *list; + struct ti_syscon_reset_control *controls; + int size, nr_controls, i; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + regmap = syscon_node_to_regmap(np->parent); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + list = of_get_property(np, "ti,reset-bits", &size); + if (!list || (size / sizeof(*list)) % 7 != 0) { + dev_err(dev, "invalid DT reset description\n"); + return -EINVAL; + } + + nr_controls = (size / sizeof(*list)) / 7; + controls = devm_kzalloc(dev, nr_controls * sizeof(*controls), GFP_KERNEL); + if (!controls) + return -ENOMEM; + + for (i = 0; i < nr_controls; i++) { + controls[i].assert_offset = be32_to_cpup(list++); + controls[i].assert_bit = be32_to_cpup(list++); + controls[i].deassert_offset = be32_to_cpup(list++); + controls[i].deassert_bit = be32_to_cpup(list++); + controls[i].status_offset = be32_to_cpup(list++); + controls[i].status_bit = be32_to_cpup(list++); + controls[i].flags = be32_to_cpup(list++); + } + + data->rcdev.ops = &ti_syscon_reset_ops; + data->rcdev.owner = THIS_MODULE; + data->rcdev.of_node = np; + data->rcdev.nr_resets = nr_controls; + data->regmap = regmap; + data->controls = controls; + data->nr_controls = nr_controls; + + platform_set_drvdata(pdev, data); + + return devm_reset_controller_register(dev, &data->rcdev); +} + +static const struct of_device_id ti_syscon_reset_of_match[] = { + { .compatible = "ti,syscon-reset", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ti_syscon_reset_of_match); + +static struct platform_driver ti_syscon_reset_driver = { + .probe = ti_syscon_reset_probe, + .driver = { + .name = "ti-syscon-reset", + .of_match_table = ti_syscon_reset_of_match, + }, +}; +module_platform_driver(ti_syscon_reset_driver); + +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); +MODULE_DESCRIPTION("TI SYSCON Regmap Reset Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c index a7e87bc45885..138f2f205662 100644 --- a/drivers/reset/reset-zynq.c +++ b/drivers/reset/reset-zynq.c @@ -122,16 +122,7 @@ static int zynq_reset_probe(struct platform_device *pdev) priv->rcdev.ops = &zynq_reset_ops; priv->rcdev.of_node = pdev->dev.of_node; - return reset_controller_register(&priv->rcdev); -} - -static int zynq_reset_remove(struct platform_device *pdev) -{ - struct zynq_reset_data *priv = platform_get_drvdata(pdev); - - reset_controller_unregister(&priv->rcdev); - - return 0; + return devm_reset_controller_register(&pdev->dev, &priv->rcdev); } static const struct of_device_id zynq_reset_dt_ids[] = { @@ -141,7 +132,6 @@ static const struct of_device_id zynq_reset_dt_ids[] = { static struct platform_driver zynq_reset_driver = { .probe = zynq_reset_probe, - .remove = zynq_reset_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = zynq_reset_dt_ids, diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig index f8c15a37fb35..613178553612 100644 --- a/drivers/reset/sti/Kconfig +++ b/drivers/reset/sti/Kconfig @@ -2,7 +2,6 @@ if ARCH_STI config STI_RESET_SYSCFG bool - select RESET_CONTROLLER config STIH415_RESET bool diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index 9c18d6fd8107..ea20f627dabe 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -134,7 +134,7 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) goto exit; } - rc = be64_to_cpu(msg.params[1]); + rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) { rc = -EIO; goto exit; @@ -181,7 +181,7 @@ static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) goto exit; } - rc = be64_to_cpu(msg.params[1]); + rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) rc = -EIO; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index fac1b51ea0de..9d66b4fb174b 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode); static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio); static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn, long size); + void **kaddr, pfn_t *pfn, long size); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -884,7 +884,7 @@ fail: static long dcssblk_direct_access (struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct dcssblk_dev_info *dev_info; unsigned long offset, dev_sz; @@ -894,7 +894,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum, return -ENODEV; dev_sz = dev_info->end - dev_info->start; offset = secnum * 512; - *kaddr = (void __pmem *) (dev_info->start + offset); + *kaddr = (void *) dev_info->start + offset; *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV); return dev_sz - offset; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 0ac520dd1b21..c71df0c7dedc 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -46,7 +46,8 @@ struct read_info_sccb { u64 rnmax2; /* 104-111 */ u8 _pad_112[116 - 112]; /* 112-115 */ u8 fac116; /* 116 */ - u8 _pad_117[119 - 117]; /* 117-118 */ + u8 fac117; /* 117 */ + u8 _pad_118; /* 118 */ u8 fac119; /* 119 */ u16 hcpua; /* 120-121 */ u8 _pad_122[124 - 122]; /* 122-123 */ @@ -114,7 +115,12 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.facilities = sccb->facilities; sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_core_type = !!(sccb->fac84 & 0x01); + sclp.has_gsls = !!(sccb->fac85 & 0x80); + sclp.has_64bscao = !!(sccb->fac116 & 0x80); + sclp.has_cmma = !!(sccb->fac116 & 0x40); sclp.has_esca = !!(sccb->fac116 & 0x08); + sclp.has_pfmfi = !!(sccb->fac117 & 0x40); + sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_hvs = !!(sccb->fac119 & 0x80); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; @@ -145,6 +151,10 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.has_siif = cpue->siif; sclp.has_sigpif = cpue->sigpif; sclp.has_sief2 = cpue->sief2; + sclp.has_gpere = cpue->gpere; + sclp.has_ib = cpue->ib; + sclp.has_cei = cpue->cei; + sclp.has_skey = cpue->skey; break; } diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c index 2553db0fdb52..f59b71776bbd 100644 --- a/drivers/s390/char/sclp_ocf.c +++ b/drivers/s390/char/sclp_ocf.c @@ -26,7 +26,7 @@ #define OCF_LENGTH_CPC_NAME 8UL static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1]; -static char cpc_name[OCF_LENGTH_CPC_NAME + 1]; +static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */ static DEFINE_SPINLOCK(sclp_ocf_lock); static struct work_struct sclp_ocf_change_work; @@ -72,9 +72,8 @@ static void sclp_ocf_handler(struct evbuf_header *evbuf) } if (cpc) { size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length); + memset(cpc_name, 0, OCF_LENGTH_CPC_NAME); memcpy(cpc_name, cpc + 1, size); - EBCASC(cpc_name, size); - cpc_name[size] = 0; } spin_unlock(&sclp_ocf_lock); schedule_work(&sclp_ocf_change_work); @@ -85,15 +84,23 @@ static struct sclp_register sclp_ocf_event = { .receiver_fn = sclp_ocf_handler, }; +void sclp_ocf_cpc_name_copy(char *dst) +{ + spin_lock_irq(&sclp_ocf_lock); + memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME); + spin_unlock_irq(&sclp_ocf_lock); +} +EXPORT_SYMBOL(sclp_ocf_cpc_name_copy); + static ssize_t cpc_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - int rc; + char name[OCF_LENGTH_CPC_NAME + 1]; - spin_lock_irq(&sclp_ocf_lock); - rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name); - spin_unlock_irq(&sclp_ocf_lock); - return rc; + sclp_ocf_cpc_name_copy(name); + name[OCF_LENGTH_CPC_NAME] = 0; + EBCASC(name, OCF_LENGTH_CPC_NAME); + return snprintf(page, PAGE_SIZE, "%s\n", name); } static struct kobj_attribute cpc_name_attr = diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index e96aced58627..46be25c7461e 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -37,8 +37,7 @@ enum cfg_task_t { /* Map for pending configure tasks. */ static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; -static DEFINE_MUTEX(cfg_lock); -static int cfg_busy; +static DEFINE_SPINLOCK(cfg_lock); /* Map for channel-path status. */ static struct sclp_chp_info chp_info; @@ -666,6 +665,20 @@ static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) chp_cfg_task[chpid.cssid][chpid.id] = cfg; } +/* Fetch the first configure task. Set chpid accordingly. */ +static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid) +{ + enum cfg_task_t t = cfg_none; + + chp_id_for_each(chpid) { + t = cfg_get_task(*chpid); + if (t != cfg_none) + break; + } + + return t; +} + /* Perform one configure/deconfigure request. Reschedule work function until * last request. */ static void cfg_func(struct work_struct *work) @@ -674,16 +687,9 @@ static void cfg_func(struct work_struct *work) enum cfg_task_t t; int rc; - mutex_lock(&cfg_lock); - t = cfg_none; - chp_id_for_each(&chpid) { - t = cfg_get_task(chpid); - if (t != cfg_none) { - cfg_set_task(chpid, cfg_none); - break; - } - } - mutex_unlock(&cfg_lock); + spin_lock(&cfg_lock); + t = chp_cfg_fetch_task(&chpid); + spin_unlock(&cfg_lock); switch (t) { case cfg_configure: @@ -709,12 +715,13 @@ static void cfg_func(struct work_struct *work) case cfg_none: /* Get updated information after last change. */ info_update(); - mutex_lock(&cfg_lock); - cfg_busy = 0; - mutex_unlock(&cfg_lock); wake_up_interruptible(&cfg_wait_queue); return; } + spin_lock(&cfg_lock); + if (t == cfg_get_task(chpid)) + cfg_set_task(chpid, cfg_none); + spin_unlock(&cfg_lock); schedule_work(&cfg_work); } @@ -729,10 +736,9 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) { CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, configure); - mutex_lock(&cfg_lock); + spin_lock(&cfg_lock); cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); - cfg_busy = 1; - mutex_unlock(&cfg_lock); + spin_unlock(&cfg_lock); schedule_work(&cfg_work); } @@ -746,15 +752,27 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) void chp_cfg_cancel_deconfigure(struct chp_id chpid) { CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); - mutex_lock(&cfg_lock); + spin_lock(&cfg_lock); if (cfg_get_task(chpid) == cfg_deconfigure) cfg_set_task(chpid, cfg_none); - mutex_unlock(&cfg_lock); + spin_unlock(&cfg_lock); +} + +static bool cfg_idle(void) +{ + struct chp_id chpid; + enum cfg_task_t t; + + spin_lock(&cfg_lock); + t = chp_cfg_fetch_task(&chpid); + spin_unlock(&cfg_lock); + + return t == cfg_none; } static int cfg_wait_idle(void) { - if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) + if (wait_event_interruptible(cfg_wait_queue, cfg_idle())) return -ERESTARTSYS; return 0; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 4feb27215ab6..ed92fb09fc8e 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -468,6 +468,8 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) { struct ap_queue_status status; + if (msg == NULL) + return -EINVAL; status = __ap_recv(qid, psmid, msg, length); switch (status.response_code) { case AP_RESPONSE_NORMAL: @@ -617,6 +619,8 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) { struct ap_queue_status status; + if (!ap_dev->reply) + return AP_WAIT_NONE; status = ap_sm_recv(ap_dev); switch (status.response_code) { case AP_RESPONSE_NORMAL: @@ -638,6 +642,31 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) } /** + * ap_sm_suspend_read(): Receive pending reply messages from an AP device + * without changing the device state in between. In suspend mode we don't + * allow sending new requests, therefore just fetch pending replies. + * @ap_dev: pointer to the AP device + * + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN + */ +static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev) +{ + struct ap_queue_status status; + + if (!ap_dev->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(ap_dev); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (ap_dev->queue_count > 0) + return AP_WAIT_AGAIN; + /* fall through */ + default: + return AP_WAIT_NONE; + } +} + +/** * ap_sm_write(): Send messages from the request queue to an AP device. * @ap_dev: pointer to the AP device * @@ -738,7 +767,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) struct ap_queue_status status; unsigned long info; - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0 && ap_dev->reply) /* Try to read a completed message and get the status */ status = ap_sm_recv(ap_dev); else @@ -778,7 +807,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) struct ap_queue_status status; unsigned long info; - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0 && ap_dev->reply) /* Try to read a completed message and get the status */ status = ap_sm_recv(ap_dev); else @@ -834,7 +863,7 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { [AP_EVENT_TIMEOUT] = ap_sm_reset, }, [AP_STATE_SUSPEND_WAIT] = { - [AP_EVENT_POLL] = ap_sm_read, + [AP_EVENT_POLL] = ap_sm_suspend_read, [AP_EVENT_TIMEOUT] = ap_sm_nop, }, [AP_STATE_BORKED] = { @@ -1335,6 +1364,17 @@ static struct bus_type ap_bus_type = { .resume = ap_dev_resume, }; +void ap_device_init_reply(struct ap_device *ap_dev, + struct ap_message *reply) +{ + ap_dev->reply = reply; + + spin_lock_bh(&ap_dev->lock); + ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); + spin_unlock_bh(&ap_dev->lock); +} +EXPORT_SYMBOL(ap_device_init_reply); + static int ap_device_probe(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); @@ -1779,7 +1819,8 @@ int __init ap_module_init(void) if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { pr_warn("%d is not a valid cryptographic domain\n", ap_domain_index); - return -EINVAL; + rc = -EINVAL; + goto out_free; } /* In resume callback we need to know if the user had set the domain. * If so, we can not just reset it. @@ -1852,6 +1893,7 @@ out: unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); +out_free: kfree(ap_configuration); return rc; } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6adcbdf225d1..d7fdf5c024d7 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -262,6 +262,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_flush_queue(struct ap_device *ap_dev); void ap_bus_force_rescan(void); +void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); int ap_module_init(void); void ap_module_exit(void); diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 1e849d6e1dfe..15104aaa075a 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -126,7 +126,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) MSGTYPE50_VARIANT_DEFAULT); zdev->ap_dev = ap_dev; zdev->online = 1; - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) { diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index bb3908818505..ccb2e78ebf0e 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -147,7 +147,7 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev) return -ENODEV; zdev->ap_dev = ap_dev; zdev->online = 1; - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) { diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index f41852768953..df8f0c4dacb7 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -327,7 +327,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) else zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index f54bbd5a6062..e43545c86bcf 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -102,8 +102,4 @@ struct asyc_intr_info { #define SCAN_HOST 0x04 }; -#ifndef CONFIG_CXL_EEH -#define cxl_perst_reloads_same_image(_a, _b) do { } while (0) -#endif - #endif /* _CXLFLASH_MAIN_H */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index adf61b43eb70..734a0428ef0e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4854,20 +4854,17 @@ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; - int bars = 0; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ - if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); @@ -4884,7 +4881,7 @@ out_disable_device: pci_disable_device(pdev); out_error: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device, bars:x%x\n", bars); + "1401 Failed to enable pci device\n"); return -ENODEV; } @@ -4899,17 +4896,14 @@ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; - int bars; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Release PCI resource and disable PCI device */ - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); pci_disable_device(pdev); return; @@ -9811,7 +9805,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; @@ -9886,7 +9879,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) lpfc_hba_free(phba); - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); pci_disable_device(pdev); } diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 59c477883a73..6201dce3553b 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1183,7 +1183,6 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define qla82xx_get_temp_val(x) ((x) >> 16) -#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) #define qla82xx_get_temp_state(x) ((x) & 0xffff) #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index cb58ef0d9b2c..fe42a2fdf351 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,7 +1,6 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/bcm/Kconfig" -source "drivers/soc/brcmstb/Kconfig" source "drivers/soc/fsl/qe/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" @@ -10,6 +9,7 @@ source "drivers/soc/samsung/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" +source "drivers/soc/ux500/Kconfig" source "drivers/soc/versatile/Kconfig" endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 380230f03874..50c23d0bd457 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -3,7 +3,6 @@ # obj-y += bcm/ -obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ @@ -15,4 +14,5 @@ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_SOC_TI) += ti/ +obj-$(CONFIG_ARCH_U8500) += ux500/ obj-$(CONFIG_PLAT_VERSATILE) += versatile/ diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig index 3066edea184d..a39b0d58ddd0 100644 --- a/drivers/soc/bcm/Kconfig +++ b/drivers/soc/bcm/Kconfig @@ -1,9 +1,23 @@ +menu "Broadcom SoC drivers" + config RASPBERRYPI_POWER bool "Raspberry Pi power domain driver" - depends on ARCH_BCM2835 || COMPILE_TEST + depends on ARCH_BCM2835 || (COMPILE_TEST && OF) depends on RASPBERRYPI_FIRMWARE=y select PM_GENERIC_DOMAINS if PM - select PM_GENERIC_DOMAINS_OF if PM help This enables support for the RPi power domains which can be enabled or disabled via the RPi firmware. + +config SOC_BRCMSTB + bool "Broadcom STB SoC drivers" + depends on ARM + select SOC_BUS + help + Enables drivers for the Broadcom Set-Top Box (STB) series of chips. + This option alone enables only some support code, while the drivers + can be enabled individually within this menu. + + If unsure, say N. + +endmenu diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile index 63aa3eb23087..dc4fced72d21 100644 --- a/drivers/soc/bcm/Makefile +++ b/drivers/soc/bcm/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o +obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ diff --git a/drivers/soc/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile index 9120b2715d3e..9120b2715d3e 100644 --- a/drivers/soc/brcmstb/Makefile +++ b/drivers/soc/bcm/brcmstb/Makefile diff --git a/drivers/soc/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c index 9049c076f9a1..3c39415d484f 100644 --- a/drivers/soc/brcmstb/biuctrl.c +++ b/drivers/soc/bcm/brcmstb/biuctrl.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> +#include <linux/soc/brcmstb/brcmstb.h> #define CPU_CREDIT_REG_OFFSET 0x184 #define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000 diff --git a/drivers/soc/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c index 94e7335553f4..94e7335553f4 100644 --- a/drivers/soc/brcmstb/common.c +++ b/drivers/soc/bcm/brcmstb/common.c diff --git a/drivers/soc/brcmstb/Kconfig b/drivers/soc/brcmstb/Kconfig deleted file mode 100644 index 7fec3b4c80a1..000000000000 --- a/drivers/soc/brcmstb/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -menuconfig SOC_BRCMSTB - bool "Broadcom STB SoC drivers" - depends on ARM - select SOC_BUS - help - Enables drivers for the Broadcom Set-Top Box (STB) series of chips. - This option alone enables only some support code, while the drivers - can be enabled individually within this menu. - - If unsure, say N. diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c index 54261decb369..d5437ca76ed9 100644 --- a/drivers/soc/qcom/smem_state.c +++ b/drivers/soc/qcom/smem_state.c @@ -104,26 +104,26 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev, if (con_id) { index = of_property_match_string(dev->of_node, - "qcom,state-names", + "qcom,smem-state-names", con_id); if (index < 0) { - dev_err(dev, "missing qcom,state-names\n"); + dev_err(dev, "missing qcom,smem-state-names\n"); return ERR_PTR(index); } } ret = of_parse_phandle_with_args(dev->of_node, - "qcom,state", - "#qcom,state-cells", + "qcom,smem-states", + "#qcom,smem-state-cells", index, &args); if (ret) { - dev_err(dev, "failed to parse qcom,state property\n"); + dev_err(dev, "failed to parse qcom,smem-states property\n"); return ERR_PTR(ret); } if (args.args_count != 1) { - dev_err(dev, "invalid #qcom,state-cells\n"); + dev_err(dev, "invalid #qcom,smem-state-cells\n"); return ERR_PTR(-EINVAL); } diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index f1eed7f9dd67..f51fb2ea7200 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -196,7 +196,7 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data) /* Match newly created entries */ for (i = smp2p->valid_entries; i < in->valid_entries; i++) { list_for_each_entry(entry, &smp2p->inbound, node) { - memcpy_fromio(buf, in->entries[i].name, sizeof(buf)); + memcpy(buf, in->entries[i].name, sizeof(buf)); if (!strcmp(buf, entry->name)) { entry->value = &in->entries[i].value; break; @@ -343,12 +343,13 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p, /* Allocate an entry from the smem item */ strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME); - memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); - out->valid_entries++; + memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); /* Make the logical entry reference the physical value */ entry->value = &out->entries[out->valid_entries].value; + out->valid_entries++; + entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry); if (IS_ERR(entry->state)) { dev_err(smp2p->dev, "failed to register qcom_smem_state\n"); diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 6b777af1bc19..d0337b2a71c8 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -495,7 +495,7 @@ static int qcom_smsm_probe(struct platform_device *pdev) if (!smsm->hosts) return -ENOMEM; - local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,state-cells"); + local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells"); if (!local_node) { dev_err(&pdev->dev, "no state entry\n"); return -EINVAL; diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index c544f3d2c6ee..520aedd29965 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2016, Linaro Ltd. * Copyright (c) 2015, Sony Mobile Communications Inc. * * This program is free software; you can redistribute it and/or modify @@ -14,8 +15,16 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/soc/qcom/smd.h> +#include <linux/io.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/soc/qcom/wcnss_ctrl.h> #define WCNSS_REQUEST_TIMEOUT (5 * HZ) +#define WCNSS_CBC_TIMEOUT (10 * HZ) + +#define WCNSS_ACK_DONE_BOOTING 1 +#define WCNSS_ACK_COLD_BOOTING 2 #define NV_FRAGMENT_SIZE 3072 #define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" @@ -25,17 +34,19 @@ * @dev: device handle * @channel: SMD channel handle * @ack: completion for outstanding requests + * @cbc: completion for cbc complete indication * @ack_status: status of the outstanding request - * @download_nv_work: worker for uploading nv binary + * @probe_work: worker for uploading nv binary */ struct wcnss_ctrl { struct device *dev; struct qcom_smd_channel *channel; struct completion ack; + struct completion cbc; int ack_status; - struct work_struct download_nv_work; + struct work_struct probe_work; }; /* message types */ @@ -48,6 +59,11 @@ enum { WCNSS_UPLOAD_CAL_RESP, WCNSS_DOWNLOAD_CAL_REQ, WCNSS_DOWNLOAD_CAL_RESP, + WCNSS_VBAT_LEVEL_IND, + WCNSS_BUILD_VERSION_REQ, + WCNSS_BUILD_VERSION_RESP, + WCNSS_PM_CONFIG_REQ, + WCNSS_CBC_COMPLETE_IND, }; /** @@ -128,7 +144,7 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, version->major, version->minor, version->version, version->revision); - schedule_work(&wcnss->download_nv_work); + complete(&wcnss->ack); break; case WCNSS_DOWNLOAD_NV_RESP: if (count != sizeof(*nvresp)) { @@ -141,6 +157,10 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, wcnss->ack_status = nvresp->status; complete(&wcnss->ack); break; + case WCNSS_CBC_COMPLETE_IND: + dev_dbg(wcnss->dev, "cold boot complete\n"); + complete(&wcnss->cbc); + break; default: dev_info(wcnss->dev, "unknown message type %d\n", hdr->type); break; @@ -156,20 +176,32 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, static int wcnss_request_version(struct wcnss_ctrl *wcnss) { struct wcnss_msg_hdr msg; + int ret; msg.type = WCNSS_VERSION_REQ; msg.len = sizeof(msg); + ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg)); + if (ret < 0) + return ret; + + ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT); + if (!ret) { + dev_err(wcnss->dev, "timeout waiting for version response\n"); + return -ETIMEDOUT; + } - return qcom_smd_send(wcnss->channel, &msg, sizeof(msg)); + return 0; } /** * wcnss_download_nv() - send nv binary to WCNSS - * @work: work struct to acquire wcnss context + * @wcnss: wcnss_ctrl state handle + * @expect_cbc: indicator to caller that an cbc event is expected + * + * Returns 0 on success. Negative errno on failure. */ -static void wcnss_download_nv(struct work_struct *work) +static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) { - struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, download_nv_work); struct wcnss_download_nv_req *req; const struct firmware *fw; const void *data; @@ -178,10 +210,10 @@ static void wcnss_download_nv(struct work_struct *work) req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL); if (!req) - return; + return -ENOMEM; ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev); - if (ret) { + if (ret < 0) { dev_err(wcnss->dev, "Failed to load nv file %s: %d\n", NVBIN_FILE, ret); goto free_req; @@ -207,7 +239,7 @@ static void wcnss_download_nv(struct work_struct *work) memcpy(req->fragment, data, req->frag_size); ret = qcom_smd_send(wcnss->channel, req, req->hdr.len); - if (ret) { + if (ret < 0) { dev_err(wcnss->dev, "failed to send smd packet\n"); goto release_fw; } @@ -220,16 +252,58 @@ static void wcnss_download_nv(struct work_struct *work) } while (left > 0); ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT); - if (!ret) + if (!ret) { dev_err(wcnss->dev, "timeout waiting for nv upload ack\n"); - else if (wcnss->ack_status != 1) - dev_err(wcnss->dev, "nv upload response failed err: %d\n", - wcnss->ack_status); + ret = -ETIMEDOUT; + } else { + *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING; + ret = 0; + } release_fw: release_firmware(fw); free_req: kfree(req); + + return ret; +} + +/** + * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS + * @wcnss: wcnss handle, retrieved from drvdata + * @name: SMD channel name + * @cb: callback to handle incoming data on the channel + */ +struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +{ + struct wcnss_ctrl *_wcnss = wcnss; + + return qcom_smd_open_channel(_wcnss->channel, name, cb); +} +EXPORT_SYMBOL(qcom_wcnss_open_channel); + +static void wcnss_async_probe(struct work_struct *work) +{ + struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work); + bool expect_cbc; + int ret; + + ret = wcnss_request_version(wcnss); + if (ret < 0) + return; + + ret = wcnss_download_nv(wcnss, &expect_cbc); + if (ret < 0) + return; + + /* Wait for pending cold boot completion if indicated by the nv downloader */ + if (expect_cbc) { + ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT); + if (!ret) + dev_err(wcnss->dev, "expected cold boot completion\n"); + } + + of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev); } static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) @@ -244,25 +318,38 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) wcnss->channel = sdev->channel; init_completion(&wcnss->ack); - INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv); + init_completion(&wcnss->cbc); + INIT_WORK(&wcnss->probe_work, wcnss_async_probe); qcom_smd_set_drvdata(sdev->channel, wcnss); + dev_set_drvdata(&sdev->dev, wcnss); + + schedule_work(&wcnss->probe_work); + + return 0; +} + +static void wcnss_ctrl_remove(struct qcom_smd_device *sdev) +{ + struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel); - return wcnss_request_version(wcnss); + cancel_work_sync(&wcnss->probe_work); + of_platform_depopulate(&sdev->dev); } -static const struct qcom_smd_id wcnss_ctrl_smd_match[] = { - { .name = "WCNSS_CTRL" }, +static const struct of_device_id wcnss_ctrl_of_match[] = { + { .compatible = "qcom,wcnss", }, {} }; static struct qcom_smd_driver wcnss_ctrl_driver = { .probe = wcnss_ctrl_probe, + .remove = wcnss_ctrl_remove, .callback = wcnss_ctrl_smd_callback, - .smd_match_table = wcnss_ctrl_smd_match, .driver = { .name = "qcom_wcnss_ctrl", .owner = THIS_MODULE, + .of_match_table = wcnss_ctrl_of_match, }, }; diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 151fcd3f025b..623039c3514c 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -1,7 +1,9 @@ obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o +obj-$(CONFIG_ARCH_R8A7792) += rcar-sysc.o r8a7792-sysc.o # R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. obj-$(CONFIG_ARCH_R8A7793) += rcar-sysc.o r8a7791-sysc.o obj-$(CONFIG_ARCH_R8A7794) += rcar-sysc.o r8a7794-sysc.o obj-$(CONFIG_ARCH_R8A7795) += rcar-sysc.o r8a7795-sysc.o +obj-$(CONFIG_ARCH_R8A7796) += rcar-sysc.o r8a7796-sysc.o diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c new file mode 100644 index 000000000000..ca7467d7b7ec --- /dev/null +++ b/drivers/soc/renesas/r8a7792-sysc.c @@ -0,0 +1,34 @@ +/* + * Renesas R-Car V2H (R8A7792) System Controller + * + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include <dt-bindings/power/r8a7792-sysc.h> + +#include "rcar-sysc.h" + +static const struct rcar_sysc_area r8a7792_areas[] __initconst = { + { "always-on", 0, 0, R8A7792_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "ca15-scu", 0x180, 0, R8A7792_PD_CA15_SCU, R8A7792_PD_ALWAYS_ON, + PD_SCU }, + { "ca15-cpu0", 0x40, 0, R8A7792_PD_CA15_CPU0, R8A7792_PD_CA15_SCU, + PD_CPU_NOCR }, + { "ca15-cpu1", 0x40, 1, R8A7792_PD_CA15_CPU1, R8A7792_PD_CA15_SCU, + PD_CPU_NOCR }, + { "sgx", 0xc0, 0, R8A7792_PD_SGX, R8A7792_PD_ALWAYS_ON }, + { "imp", 0x140, 0, R8A7792_PD_IMP, R8A7792_PD_ALWAYS_ON }, +}; + +const struct rcar_sysc_info r8a7792_sysc_info __initconst = { + .areas = r8a7792_areas, + .num_areas = ARRAY_SIZE(r8a7792_areas), +}; diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c new file mode 100644 index 000000000000..f700c842b9e1 --- /dev/null +++ b/drivers/soc/renesas/r8a7796-sysc.c @@ -0,0 +1,48 @@ +/* + * Renesas R-Car M3-W System Controller + * + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/bug.h> +#include <linux/kernel.h> + +#include <dt-bindings/power/r8a7796-sysc.h> + +#include "rcar-sysc.h" + +static const struct rcar_sysc_area r8a7796_areas[] __initconst = { + { "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON, + PD_SCU }, + { "ca57-cpu0", 0x80, 0, R8A7796_PD_CA57_CPU0, R8A7796_PD_CA57_SCU, + PD_CPU_NOCR }, + { "ca57-cpu1", 0x80, 1, R8A7796_PD_CA57_CPU1, R8A7796_PD_CA57_SCU, + PD_CPU_NOCR }, + { "ca53-scu", 0x140, 0, R8A7796_PD_CA53_SCU, R8A7796_PD_ALWAYS_ON, + PD_SCU }, + { "ca53-cpu0", 0x200, 0, R8A7796_PD_CA53_CPU0, R8A7796_PD_CA53_SCU, + PD_CPU_NOCR }, + { "ca53-cpu1", 0x200, 1, R8A7796_PD_CA53_CPU1, R8A7796_PD_CA53_SCU, + PD_CPU_NOCR }, + { "ca53-cpu2", 0x200, 2, R8A7796_PD_CA53_CPU2, R8A7796_PD_CA53_SCU, + PD_CPU_NOCR }, + { "ca53-cpu3", 0x200, 3, R8A7796_PD_CA53_CPU3, R8A7796_PD_CA53_SCU, + PD_CPU_NOCR }, + { "cr7", 0x240, 0, R8A7796_PD_CR7, R8A7796_PD_ALWAYS_ON }, + { "a3vc", 0x380, 0, R8A7796_PD_A3VC, R8A7796_PD_ALWAYS_ON }, + { "a2vc0", 0x3c0, 0, R8A7796_PD_A2VC0, R8A7796_PD_A3VC }, + { "a2vc1", 0x3c0, 1, R8A7796_PD_A2VC1, R8A7796_PD_A3VC }, + { "3dg-a", 0x100, 0, R8A7796_PD_3DG_A, R8A7796_PD_ALWAYS_ON }, + { "3dg-b", 0x100, 1, R8A7796_PD_3DG_B, R8A7796_PD_3DG_A }, + { "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON }, +}; + +const struct rcar_sysc_info r8a7796_sysc_info __initconst = { + .areas = r8a7796_areas, + .num_areas = ARRAY_SIZE(r8a7796_areas), +}; diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 79dbc770895f..65c8e1eb90c0 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -164,15 +164,6 @@ static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch) return false; } -void __iomem *rcar_sysc_init(phys_addr_t base) -{ - rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE); - if (!rcar_sysc_base) - panic("unable to ioremap R-Car SYSC hardware block\n"); - - return rcar_sysc_base; -} - struct rcar_sysc_pd { struct generic_pm_domain genpd; struct rcar_sysc_ch ch; @@ -293,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = { #ifdef CONFIG_ARCH_R8A7791 { .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info }, #endif +#ifdef CONFIG_ARCH_R8A7792 + { .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info }, +#endif #ifdef CONFIG_ARCH_R8A7793 /* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */ { .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info }, @@ -303,6 +297,9 @@ static const struct of_device_id rcar_sysc_matches[] = { #ifdef CONFIG_ARCH_R8A7795 { .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info }, #endif +#ifdef CONFIG_ARCH_R8A7796 + { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info }, +#endif { /* sentinel */ } }; @@ -322,6 +319,9 @@ static int __init rcar_sysc_pd_init(void) unsigned int i; int error; + if (rcar_sysc_base) + return 0; + np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match); if (!np) return -ENODEV; @@ -392,10 +392,35 @@ static int __init rcar_sysc_pd_init(void) domains->domains[area->isr_bit] = &pd->genpd; } - of_genpd_add_provider_onecell(np, &domains->onecell_data); + error = of_genpd_add_provider_onecell(np, &domains->onecell_data); out_put: of_node_put(np); return error; } early_initcall(rcar_sysc_pd_init); + +void __init rcar_sysc_init(phys_addr_t base, u32 syscier) +{ + u32 syscimr; + + if (!rcar_sysc_pd_init()) + return; + + rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE); + + /* + * Mask all interrupt sources to prevent the CPU from receiving them. + * Make sure not to clear reserved bits that were set before. + */ + syscimr = ioread32(rcar_sysc_base + SYSCIMR); + syscimr |= syscier; + pr_debug("%s: syscimr = 0x%08x\n", __func__, syscimr); + iowrite32(syscimr, rcar_sysc_base + SYSCIMR); + + /* + * SYSC needs all interrupt sources enabled to control power. + */ + pr_debug("%s: syscier = 0x%08x\n", __func__, syscier); + iowrite32(syscier, rcar_sysc_base + SYSCIER); +} diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 5e766174c2f4..77dbe861473f 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -53,6 +53,8 @@ struct rcar_sysc_info { extern const struct rcar_sysc_info r8a7779_sysc_info; extern const struct rcar_sysc_info r8a7790_sysc_info; extern const struct rcar_sysc_info r8a7791_sysc_info; +extern const struct rcar_sysc_info r8a7792_sysc_info; extern const struct rcar_sysc_info r8a7794_sysc_info; extern const struct rcar_sysc_info r8a7795_sysc_info; +extern const struct rcar_sysc_info r8a7796_sysc_info; #endif /* __SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index d7fc123006a3..245533907d1b 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -10,4 +10,8 @@ config EXYNOS_PMU bool "Exynos PMU controller driver" if COMPILE_TEST depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST) +config EXYNOS_PM_DOMAINS + bool "Exynos PM domains" if COMPILE_TEST + depends on PM_GENERIC_DOMAINS || COMPILE_TEST + endif diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index f64ac4d80564..3619f2ecddaa 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \ exynos5250-pmu.o exynos5420-pmu.o +obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c index 20b3ab8aa790..af2f54e14b83 100644 --- a/drivers/soc/samsung/exynos3250-pmu.c +++ b/drivers/soc/samsung/exynos3250-pmu.c @@ -14,7 +14,7 @@ #include "exynos-pmu.h" -static struct exynos_pmu_conf exynos3250_pmu_config[] = { +static const struct exynos_pmu_conf exynos3250_pmu_config[] = { /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */ { EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} }, { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} }, diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c index b962fb6a5d22..3f2c64180ef8 100644 --- a/drivers/soc/samsung/exynos5420-pmu.c +++ b/drivers/soc/samsung/exynos5420-pmu.c @@ -17,7 +17,7 @@ #include "exynos-pmu.h" -static struct exynos_pmu_conf exynos5420_pmu_config[] = { +static const struct exynos_pmu_conf exynos5420_pmu_config[] = { /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} }, { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} }, diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c new file mode 100644 index 000000000000..4822346aadc6 --- /dev/null +++ b/drivers/soc/samsung/pm_domains.c @@ -0,0 +1,245 @@ +/* + * Exynos Generic power domain support. + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Implementation of Exynos specific power domain control which is used in + * conjunction with runtime-pm. Support for both device-tree and non-device-tree + * based power domain support is included. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/io.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/pm_domain.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/sched.h> + +#define MAX_CLK_PER_DOMAIN 4 + +struct exynos_pm_domain_config { + /* Value for LOCAL_PWR_CFG and STATUS fields for each domain */ + u32 local_pwr_cfg; +}; + +/* + * Exynos specific wrapper around the generic power domain + */ +struct exynos_pm_domain { + void __iomem *base; + char const *name; + bool is_off; + struct generic_pm_domain pd; + struct clk *oscclk; + struct clk *clk[MAX_CLK_PER_DOMAIN]; + struct clk *pclk[MAX_CLK_PER_DOMAIN]; + struct clk *asb_clk[MAX_CLK_PER_DOMAIN]; + u32 local_pwr_cfg; +}; + +static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) +{ + struct exynos_pm_domain *pd; + void __iomem *base; + u32 timeout, pwr; + char *op; + int i; + + pd = container_of(domain, struct exynos_pm_domain, pd); + base = pd->base; + + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + if (IS_ERR(pd->asb_clk[i])) + break; + clk_prepare_enable(pd->asb_clk[i]); + } + + /* Set oscclk before powering off a domain*/ + if (!power_on) { + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + if (IS_ERR(pd->clk[i])) + break; + pd->pclk[i] = clk_get_parent(pd->clk[i]); + if (clk_set_parent(pd->clk[i], pd->oscclk)) + pr_err("%s: error setting oscclk as parent to clock %d\n", + pd->name, i); + } + } + + pwr = power_on ? pd->local_pwr_cfg : 0; + writel_relaxed(pwr, base); + + /* Wait max 1ms */ + timeout = 10; + + while ((readl_relaxed(base + 0x4) & pd->local_pwr_cfg) != pwr) { + if (!timeout) { + op = (power_on) ? "enable" : "disable"; + pr_err("Power domain %s %s failed\n", domain->name, op); + return -ETIMEDOUT; + } + timeout--; + cpu_relax(); + usleep_range(80, 100); + } + + /* Restore clocks after powering on a domain*/ + if (power_on) { + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + if (IS_ERR(pd->clk[i])) + break; + + if (IS_ERR(pd->pclk[i])) + continue; /* Skip on first power up */ + if (clk_set_parent(pd->clk[i], pd->pclk[i])) + pr_err("%s: error setting parent to clock%d\n", + pd->name, i); + } + } + + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + if (IS_ERR(pd->asb_clk[i])) + break; + clk_disable_unprepare(pd->asb_clk[i]); + } + + return 0; +} + +static int exynos_pd_power_on(struct generic_pm_domain *domain) +{ + return exynos_pd_power(domain, true); +} + +static int exynos_pd_power_off(struct generic_pm_domain *domain) +{ + return exynos_pd_power(domain, false); +} + +static const struct exynos_pm_domain_config exynos4210_cfg __initconst = { + .local_pwr_cfg = 0x7, +}; + +static const struct of_device_id exynos_pm_domain_of_match[] __initconst = { + { + .compatible = "samsung,exynos4210-pd", + .data = &exynos4210_cfg, + }, + { }, +}; + +static __init int exynos4_pm_init_power_domain(void) +{ + struct device_node *np; + const struct of_device_id *match; + + for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) { + const struct exynos_pm_domain_config *pm_domain_cfg; + struct exynos_pm_domain *pd; + int on, i; + + pm_domain_cfg = match->data; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + pr_err("%s: failed to allocate memory for domain\n", + __func__); + of_node_put(np); + return -ENOMEM; + } + pd->pd.name = kstrdup_const(strrchr(np->full_name, '/') + 1, + GFP_KERNEL); + if (!pd->pd.name) { + kfree(pd); + of_node_put(np); + return -ENOMEM; + } + + pd->name = pd->pd.name; + pd->base = of_iomap(np, 0); + if (!pd->base) { + pr_warn("%s: failed to map memory\n", __func__); + kfree_const(pd->pd.name); + kfree(pd); + continue; + } + + pd->pd.power_off = exynos_pd_power_off; + pd->pd.power_on = exynos_pd_power_on; + pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg; + + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + char clk_name[8]; + + snprintf(clk_name, sizeof(clk_name), "asb%d", i); + pd->asb_clk[i] = of_clk_get_by_name(np, clk_name); + if (IS_ERR(pd->asb_clk[i])) + break; + } + + pd->oscclk = of_clk_get_by_name(np, "oscclk"); + if (IS_ERR(pd->oscclk)) + goto no_clk; + + for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { + char clk_name[8]; + + snprintf(clk_name, sizeof(clk_name), "clk%d", i); + pd->clk[i] = of_clk_get_by_name(np, clk_name); + if (IS_ERR(pd->clk[i])) + break; + /* + * Skip setting parent on first power up. + * The parent at this time may not be useful at all. + */ + pd->pclk[i] = ERR_PTR(-EINVAL); + } + + if (IS_ERR(pd->clk[0])) + clk_put(pd->oscclk); + +no_clk: + on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg; + + pm_genpd_init(&pd->pd, NULL, !on); + of_genpd_add_provider_simple(np, &pd->pd); + } + + /* Assign the child power domains to their parents */ + for_each_matching_node(np, exynos_pm_domain_of_match) { + struct generic_pm_domain *child_domain, *parent_domain; + struct of_phandle_args args; + + args.np = np; + args.args_count = 0; + child_domain = of_genpd_get_from_provider(&args); + if (IS_ERR(child_domain)) + continue; + + if (of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", 0, &args) != 0) + continue; + + parent_domain = of_genpd_get_from_provider(&args); + if (IS_ERR(parent_domain)) + continue; + + if (pm_genpd_add_subdomain(parent_domain, child_domain)) + pr_warn("%s failed to add subdomain: %s\n", + parent_domain->name, child_domain->name); + else + pr_info("%s has as child subdomain: %s.\n", + parent_domain->name, child_domain->name); + } + + return 0; +} +core_initcall(exynos4_pm_init_power_domain); diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index bb173456bbff..71c834f3847e 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -51,6 +51,7 @@ #define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */ #define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */ #define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */ +#define PMC_CNTRL_MAIN_RST (1 << 4) #define DPD_SAMPLE 0x020 #define DPD_SAMPLE_ENABLE (1 << 0) @@ -80,6 +81,14 @@ #define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2) #define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1) +#define PMC_RST_STATUS 0x1b4 +#define PMC_RST_STATUS_POR 0 +#define PMC_RST_STATUS_WATCHDOG 1 +#define PMC_RST_STATUS_SENSOR 2 +#define PMC_RST_STATUS_SW_MAIN 3 +#define PMC_RST_STATUS_LP0 4 +#define PMC_RST_STATUS_AOTAG 5 + #define IO_DPD_REQ 0x1b8 #define IO_DPD_REQ_CODE_IDLE (0 << 30) #define IO_DPD_REQ_CODE_OFF (1 << 30) @@ -399,6 +408,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, disable_clks: tegra_powergate_disable_clocks(pg); usleep_range(10, 20); + powergate_off: tegra_powergate_set(pg->id, false); @@ -436,6 +446,7 @@ assert_resets: usleep_range(10, 20); tegra_powergate_reset_deassert(pg); usleep_range(10, 20); + disable_clks: tegra_powergate_disable_clocks(pg); @@ -540,6 +551,9 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, struct tegra_powergate pg; int err; + if (!tegra_powergate_is_available(id)) + return -EINVAL; + pg.id = id; pg.clks = &clk; pg.num_clks = 1; @@ -638,9 +652,10 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, tegra_pmc_writel(value, PMC_SCRATCH0); - value = tegra_pmc_readl(0); - value |= 0x10; - tegra_pmc_writel(value, 0); + /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ + value = tegra_pmc_readl(PMC_CNTRL); + value |= PMC_CNTRL_MAIN_RST; + tegra_pmc_writel(value, PMC_CNTRL); return NOTIFY_DONE; } @@ -722,13 +737,14 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg, err: while (i--) clk_put(pg->clks[i]); + kfree(pg->clks); return err; } static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, - struct device_node *np) + struct device_node *np, bool off) { struct reset_control *rst; unsigned int i, count; @@ -748,6 +764,16 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, err = PTR_ERR(pg->resets[i]); goto error; } + + if (off) + err = reset_control_assert(pg->resets[i]); + else + err = reset_control_deassert(pg->resets[i]); + + if (err) { + reset_control_put(pg->resets[i]); + goto error; + } } pg->num_resets = count; @@ -757,6 +783,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, error: while (i--) reset_control_put(pg->resets[i]); + kfree(pg->resets); return err; @@ -765,16 +792,19 @@ error: static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) { struct tegra_powergate *pg; + int id, err; bool off; - int id; pg = kzalloc(sizeof(*pg), GFP_KERNEL); if (!pg) - goto error; + return; id = tegra_powergate_lookup(pmc, np->name); - if (id < 0) + if (id < 0) { + dev_err(pmc->dev, "powergate lookup failed for %s: %d\n", + np->name, id); goto free_mem; + } /* * Clear the bit for this powergate so it cannot be managed @@ -788,31 +818,64 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) pg->genpd.power_on = tegra_genpd_power_on; pg->pmc = pmc; - if (tegra_powergate_of_get_clks(pg, np)) + off = !tegra_powergate_is_powered(pg->id); + + err = tegra_powergate_of_get_clks(pg, np); + if (err < 0) { + dev_err(pmc->dev, "failed to get clocks for %s: %d\n", + np->name, err); goto set_available; + } - if (tegra_powergate_of_get_resets(pg, np)) + err = tegra_powergate_of_get_resets(pg, np, off); + if (err < 0) { + dev_err(pmc->dev, "failed to get resets for %s: %d\n", + np->name, err); goto remove_clks; + } - off = !tegra_powergate_is_powered(pg->id); + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) + goto power_on_cleanup; + + /* + * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB + * host and super-speed partitions. Once the XHCI driver + * manages the partitions itself this code can be removed. Note + * that we don't register these partitions with the genpd core + * to avoid it from powering down the partitions as they appear + * to be unused. + */ + if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) && + (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) + goto power_on_cleanup; pm_genpd_init(&pg->genpd, NULL, off); - if (of_genpd_add_provider_simple(np, &pg->genpd)) + err = of_genpd_add_provider_simple(np, &pg->genpd); + if (err < 0) { + dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n", + np->name, err); goto remove_resets; + } dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name); return; +power_on_cleanup: + if (off) + WARN_ON(tegra_powergate_power_up(pg, true)); + remove_resets: while (pg->num_resets--) reset_control_put(pg->resets[pg->num_resets]); + kfree(pg->resets); remove_clks: while (pg->num_clks--) clk_put(pg->clks[pg->num_clks]); + kfree(pg->clks); set_available: @@ -820,16 +883,20 @@ set_available: free_mem: kfree(pg); - -error: - dev_err(pmc->dev, "failed to create power domain for %s\n", np->name); } -static void tegra_powergate_init(struct tegra_pmc *pmc) +static void tegra_powergate_init(struct tegra_pmc *pmc, + struct device_node *parent) { struct device_node *np, *child; + unsigned int i; + + /* Create a bitmap of the available and valid partitions */ + for (i = 0; i < pmc->soc->num_powergates; i++) + if (pmc->soc->powergates[i]) + set_bit(i, pmc->powergates_available); - np = of_get_child_by_name(pmc->dev->of_node, "powergates"); + np = of_get_child_by_name(parent, "powergates"); if (!np) return; @@ -1205,6 +1272,14 @@ static int tegra_pmc_probe(struct platform_device *pdev) struct resource *res; int err; + /* + * Early initialisation should have configured an initial + * register mapping and setup the soc data pointer. If these + * are not valid then something went badly wrong! + */ + if (WARN_ON(!pmc->base || !pmc->soc)) + return -ENODEV; + err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node); if (err < 0) return err; @@ -1242,8 +1317,6 @@ static int tegra_pmc_probe(struct platform_device *pdev) return err; } - tegra_powergate_init(pmc); - mutex_lock(&pmc->powergates_lock); iounmap(pmc->base); pmc->base = base; @@ -1477,10 +1550,11 @@ static int __init tegra_pmc_early_init(void) const struct of_device_id *match; struct device_node *np; struct resource regs; - unsigned int i; bool invert; u32 value; + mutex_init(&pmc->powergates_lock); + np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match); if (!np) { /* @@ -1515,39 +1589,40 @@ static int __init tegra_pmc_early_init(void) */ if (of_address_to_resource(np, 0, ®s) < 0) { pr_err("failed to get PMC registers\n"); + of_node_put(np); return -ENXIO; } - - pmc->soc = match->data; } pmc->base = ioremap_nocache(regs.start, resource_size(®s)); if (!pmc->base) { pr_err("failed to map PMC registers\n"); + of_node_put(np); return -ENXIO; } - /* Create a bit-map of the available and valid partitions */ - for (i = 0; i < pmc->soc->num_powergates; i++) - if (pmc->soc->powergates[i]) - set_bit(i, pmc->powergates_available); + if (np) { + pmc->soc = match->data; - mutex_init(&pmc->powergates_lock); + tegra_powergate_init(pmc, np); - /* - * Invert the interrupt polarity if a PMC device tree node exists and - * contains the nvidia,invert-interrupt property. - */ - invert = of_property_read_bool(np, "nvidia,invert-interrupt"); + /* + * Invert the interrupt polarity if a PMC device tree node + * exists and contains the nvidia,invert-interrupt property. + */ + invert = of_property_read_bool(np, "nvidia,invert-interrupt"); - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(PMC_CNTRL); - if (invert) - value |= PMC_CNTRL_INTR_POLARITY; - else - value &= ~PMC_CNTRL_INTR_POLARITY; + if (invert) + value |= PMC_CNTRL_INTR_POLARITY; + else + value &= ~PMC_CNTRL_INTR_POLARITY; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(value, PMC_CNTRL); + + of_node_put(np); + } return 0; } diff --git a/drivers/soc/ux500/Kconfig b/drivers/soc/ux500/Kconfig new file mode 100644 index 000000000000..025a44aef5db --- /dev/null +++ b/drivers/soc/ux500/Kconfig @@ -0,0 +1,7 @@ +config UX500_SOC_ID + bool "SoC bus for ST-Ericsson ux500" + depends on ARCH_U8500 || COMPILE_TEST + default ARCH_U8500 + help + Include support for the SoC bus on the ARM RealView platforms + providing some sysfs information about the ASIC variant. diff --git a/drivers/soc/ux500/Makefile b/drivers/soc/ux500/Makefile new file mode 100644 index 000000000000..0b87ad04b018 --- /dev/null +++ b/drivers/soc/ux500/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_UX500_SOC_ID) += ux500-soc-id.o diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c new file mode 100644 index 000000000000..6c1be74e5fcc --- /dev/null +++ b/drivers/soc/ux500/ux500-soc-id.c @@ -0,0 +1,222 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/sys_soc.h> + +#include <asm/cputype.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/mach/map.h> + +/** + * struct dbx500_asic_id - fields of the ASIC ID + * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard" + * @partnumber: hithereto 0x8500 for DB8500 + * @revision: version code in the series + */ +struct dbx500_asic_id { + u16 partnumber; + u8 revision; + u8 process; +}; + +static struct dbx500_asic_id dbx500_id; + +static unsigned int __init ux500_read_asicid(phys_addr_t addr) +{ + void __iomem *virt = ioremap(addr, 4); + unsigned int asicid; + + if (!virt) + return 0; + + asicid = readl(virt); + iounmap(virt); + + return asicid; +} + +static void ux500_print_soc_info(unsigned int asicid) +{ + unsigned int rev = dbx500_id.revision; + + pr_info("DB%4x ", dbx500_id.partnumber); + + if (rev == 0x01) + pr_cont("Early Drop"); + else if (rev >= 0xA0) + pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf); + else + pr_cont("Unknown"); + + pr_cont(" [%#010x]\n", asicid); +} + +static unsigned int partnumber(unsigned int asicid) +{ + return (asicid >> 8) & 0xffff; +} + +/* + * SOC MIDR ASICID ADDRESS ASICID VALUE + * DB8500ed 0x410fc090 0x9001FFF4 0x00850001 + * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0 + * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1 + * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0 + * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2 + * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0 + * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx + */ + +static void __init ux500_setup_id(void) +{ + unsigned int cpuid = read_cpuid_id(); + unsigned int asicid = 0; + phys_addr_t addr = 0; + + switch (cpuid) { + case 0x410fc090: /* DB8500ed */ + case 0x411fc091: /* DB8500v1 */ + addr = 0x9001FFF4; + break; + + case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */ + asicid = ux500_read_asicid(0x9001DBF4); + if (partnumber(asicid) == 0x8500 || + partnumber(asicid) == 0x8520) + /* DB8500v2 */ + break; + + /* DB5500v1 */ + addr = 0x9001FFF4; + break; + + case 0x413fc090: /* DB9540 */ + addr = 0xFFFFDBF4; + break; + } + + if (addr) + asicid = ux500_read_asicid(addr); + + if (!asicid) { + pr_err("Unable to identify SoC\n"); + BUG(); + } + + dbx500_id.process = asicid >> 24; + dbx500_id.partnumber = partnumber(asicid); + dbx500_id.revision = asicid & 0xff; + + ux500_print_soc_info(asicid); +} + +static const char * __init ux500_get_machine(void) +{ + return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber); +} + +static const char * __init ux500_get_family(void) +{ + return kasprintf(GFP_KERNEL, "ux500"); +} + +static const char * __init ux500_get_revision(void) +{ + unsigned int rev = dbx500_id.revision; + + if (rev == 0x01) + return kasprintf(GFP_KERNEL, "%s", "ED"); + else if (rev >= 0xA0) + return kasprintf(GFP_KERNEL, "%d.%d", + (rev >> 4) - 0xA + 1, rev & 0xf); + + return kasprintf(GFP_KERNEL, "%s", "Unknown"); +} + +static ssize_t ux500_get_process(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (dbx500_id.process == 0x00) + return sprintf(buf, "Standard\n"); + + return sprintf(buf, "%02xnm\n", dbx500_id.process); +} + +static const char *db8500_read_soc_id(struct device_node *backupram) +{ + void __iomem *base; + void __iomem *uid; + const char *retstr; + + base = of_iomap(backupram, 0); + if (!base) + return NULL; + uid = base + 0x1fc0; + + /* Throw these device-specific numbers into the entropy pool */ + add_device_randomness(uid, 0x14); + retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", + readl((u32 *)uid+0), + readl((u32 *)uid+1), readl((u32 *)uid+2), + readl((u32 *)uid+3), readl((u32 *)uid+4)); + iounmap(base); + return retstr; +} + +static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr, + struct device_node *backupram) +{ + soc_dev_attr->soc_id = db8500_read_soc_id(backupram); + soc_dev_attr->machine = ux500_get_machine(); + soc_dev_attr->family = ux500_get_family(); + soc_dev_attr->revision = ux500_get_revision(); +} + +static const struct device_attribute ux500_soc_attr = + __ATTR(process, S_IRUGO, ux500_get_process, NULL); + +static int __init ux500_soc_device_init(void) +{ + struct device *parent; + struct soc_device *soc_dev; + struct soc_device_attribute *soc_dev_attr; + struct device_node *backupram; + + backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram"); + if (!backupram) + return 0; + + ux500_setup_id(); + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + soc_info_populate(soc_dev_attr, backupram); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr); + return PTR_ERR(soc_dev); + } + + parent = soc_device_to_device(soc_dev); + device_create_file(parent, &ux500_soc_attr); + + return 0; +} +subsys_initcall(ux500_soc_device_init); diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index d5adf9f31602..a56eca0e95a6 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -68,32 +68,6 @@ struct spi_st { struct completion done; }; -static int spi_st_clk_enable(struct spi_st *spi_st) -{ - /* - * Current platforms use one of the core clocks for SPI and I2C. - * If we attempt to disable the clock, the system will hang. - * - * TODO: Remove this when platform supports power domains. - */ - return 0; - - return clk_prepare_enable(spi_st->clk); -} - -static void spi_st_clk_disable(struct spi_st *spi_st) -{ - /* - * Current platforms use one of the core clocks for SPI and I2C. - * If we attempt to disable the clock, the system will hang. - * - * TODO: Remove this when platform supports power domains. - */ - return; - - clk_disable_unprepare(spi_st->clk); -} - /* Load the TX FIFO */ static void ssc_write_tx_fifo(struct spi_st *spi_st) { @@ -349,7 +323,7 @@ static int spi_st_probe(struct platform_device *pdev) goto put_master; } - ret = spi_st_clk_enable(spi_st); + ret = clk_prepare_enable(spi_st->clk); if (ret) goto put_master; @@ -408,7 +382,7 @@ static int spi_st_probe(struct platform_device *pdev) return 0; clk_disable: - spi_st_clk_disable(spi_st); + clk_disable_unprepare(spi_st->clk); put_master: spi_master_put(master); return ret; @@ -419,7 +393,7 @@ static int spi_st_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct spi_st *spi_st = spi_master_get_devdata(master); - spi_st_clk_disable(spi_st); + clk_disable_unprepare(spi_st->clk); pinctrl_pm_select_sleep_state(&pdev->dev); @@ -435,7 +409,7 @@ static int spi_st_runtime_suspend(struct device *dev) writel_relaxed(0, spi_st->base + SSC_IEN); pinctrl_pm_select_sleep_state(dev); - spi_st_clk_disable(spi_st); + clk_disable_unprepare(spi_st->clk); return 0; } @@ -446,7 +420,7 @@ static int spi_st_runtime_resume(struct device *dev) struct spi_st *spi_st = spi_master_get_devdata(master); int ret; - ret = spi_st_clk_enable(spi_st); + ret = clk_prepare_enable(spi_st->clk); pinctrl_pm_select_default_state(dev); return ret; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 24d2745e9437..45a1b4ec4ca3 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -72,10 +72,10 @@ static unsigned long lowmem_deathpending_timeout; static unsigned long lowmem_count(struct shrinker *s, struct shrink_control *sc) { - return global_page_state(NR_ACTIVE_ANON) + - global_page_state(NR_ACTIVE_FILE) + - global_page_state(NR_INACTIVE_ANON) + - global_page_state(NR_INACTIVE_FILE); + return global_node_page_state(NR_ACTIVE_ANON) + + global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_ANON) + + global_node_page_state(NR_INACTIVE_FILE); } static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) @@ -91,8 +91,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; - int other_file = global_page_state(NR_FILE_PAGES) - - global_page_state(NR_SHMEM) - + int other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - total_swapcache_pages(); if (lowmem_adj_size < array_size) diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h index 425ebc5c32aa..fab66396d421 100644 --- a/drivers/staging/android/sync_debug.h +++ b/drivers/staging/android/sync_debug.h @@ -34,7 +34,8 @@ struct sync_timeline { char name[32]; /* protected by child_list_lock */ - int context, value; + u64 context; + int value; struct list_head child_list_head; spinlock_t child_list_lock; diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index f77524294c27..8e52722266fe 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -170,7 +170,8 @@ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) * Insert it into sai_entries tail when init. */ static struct ll_sa_entry * -ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, +ll_sa_entry_alloc(struct dentry *parent, + struct ll_statahead_info *sai, __u64 index, const char *name, int len) { struct ll_inode_info *lli; @@ -217,7 +218,8 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, dname = (char *)entry + sizeof(struct ll_sa_entry); memcpy(dname, name, len); dname[len] = 0; - entry->se_qstr.hash = full_name_hash(name, len); + + entry->se_qstr.hash = full_name_hash(parent, name, len); entry->se_qstr.len = len; entry->se_qstr.name = dname; @@ -898,7 +900,7 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name, int rc; int rc1; - entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name, + entry = ll_sa_entry_alloc(parent, sai, sai->sai_index, entry_name, entry_name_len); if (IS_ERR(entry)) return; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index d4cc73bb6e1e..542801f04b0d 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -415,7 +415,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) return rc; } - rc = posix_acl_valid(acl); + rc = posix_acl_valid(&init_user_ns, acl); if (rc) { CERROR("validate acl: %d\n", rc); posix_acl_release(acl); diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index d1a7d6beee60..d011135802d5 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -1864,7 +1864,8 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req) LASSERT(page_count >= 0); for (i = 0; i < page_count; i++) - dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); + dec_node_page_state(desc->bd_iov[i].kiov_page, + NR_UNSTABLE_NFS); atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr); LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); @@ -1898,7 +1899,8 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req) LASSERT(page_count >= 0); for (i = 0; i < page_count; i++) - inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); + inc_node_page_state(desc->bd_iov[i].kiov_page, + NR_UNSTABLE_NFS); LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr); diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h index 913101980827..798c48d0d32c 100644 --- a/drivers/tty/hvc/hvc_console.h +++ b/drivers/tty/hvc/hvc_console.h @@ -60,6 +60,7 @@ struct hvc_struct { struct winsize ws; struct work_struct tty_resize; struct list_head next; + unsigned long flags; }; /* implemented by a low level driver */ diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c index c9adb0559f61..bc7a96874637 100644 --- a/drivers/tty/hvc/hvc_irq.c +++ b/drivers/tty/hvc/hvc_irq.c @@ -14,6 +14,11 @@ static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance) /* if hvc_poll request a repoll, then kick the hvcd thread */ if (hvc_poll(dev_instance)) hvc_kick(); + + /* + * We're safe to always return IRQ_HANDLED as the hvcd thread will + * iterate through each hvc_struct. + */ return IRQ_HANDLED; } @@ -28,8 +33,8 @@ int notifier_add_irq(struct hvc_struct *hp, int irq) hp->irq_requested = 0; return 0; } - rc = request_irq(irq, hvc_handle_interrupt, 0, - "hvc_console", hp); + rc = request_irq(irq, hvc_handle_interrupt, hp->flags, + "hvc_console", hp); if (!rc) hp->irq_requested = 1; return rc; diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 47b54c6aefd2..510799311099 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -214,7 +214,13 @@ static int hvc_opal_probe(struct platform_device *dev) dev->dev.of_node->full_name, boot ? " (boot console)" : ""); - irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT)); + irq = irq_of_parse_and_map(dev->dev.of_node, 0); + if (!irq) { + pr_info("hvc%d: No interrupts property, using OPAL event\n", + termno); + irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT)); + } + if (!irq) { pr_err("hvc_opal: Unable to map interrupt for device %s\n", dev->dev.of_node->full_name); @@ -224,6 +230,9 @@ static int hvc_opal_probe(struct platform_device *dev) hp = hvc_alloc(termno, irq, ops, MAX_VIO_PUT_CHARS); if (IS_ERR(hp)) return PTR_ERR(hp); + + /* hvc consoles on powernv may need to share a single irq */ + hp->flags = IRQF_SHARED; dev_set_drvdata(&dev->dev, hp); return 0; diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index 5beafd2d2218..ac1328629baa 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -539,7 +539,7 @@ static int uart_clps711x_remove(struct platform_device *pdev) } static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = { - { .compatible = "cirrus,clps711x-uart", }, + { .compatible = "cirrus,ep7209-uart", }, { } }; MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids); diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index ca0d3802f2af..4e603d060e80 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); - if (port->sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&port->lock); - } else - spin_lock(&port->lock); for (i = 0; i < n; i++) { if (*s == '\n') diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c10972fcc8e4..4fd041bec332 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -387,7 +387,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) * need to have the registers polled during D3, so avoid D3cold. */ if (xhci->quirks & XHCI_COMP_MODE_QUIRK) - pdev->no_d3cold = true; + pci_d3cold_disable(pdev); if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 6e705971d637..eb8f8d37cd95 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -79,15 +79,6 @@ config USB_LCD To compile this driver as a module, choose M here: the module will be called usblcd. -config USB_LED - tristate "USB LED driver support" - help - Say Y here if you want to connect an USBLED device to your - computer's USB port. - - To compile this driver as a module, choose M here: the - module will be called usbled. - config USB_CYPRESS_CY7C63 tristate "Cypress CY7C63xxx USB driver support" help diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 2769cf6351b4..3d79faaad2fb 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o obj-$(CONFIG_USB_LCD) += usblcd.o obj-$(CONFIG_USB_LD) += ldusb.o -obj-$(CONFIG_USB_LED) += usbled.o obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o obj-$(CONFIG_USB_RIO500) += rio500.o obj-$(CONFIG_USB_TEST) += usbtest.o diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c deleted file mode 100644 index bdef0d6eb91d..000000000000 --- a/drivers/usb/misc/usbled.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * USB LED driver - * - * Copyright (C) 2004 Greg Kroah-Hartman (greg@kroah.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2. - * - */ - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/usb.h> - - -#define DRIVER_AUTHOR "Greg Kroah-Hartman, greg@kroah.com" -#define DRIVER_DESC "USB LED Driver" - -enum led_type { - DELCOM_VISUAL_SIGNAL_INDICATOR, - DREAM_CHEEKY_WEBMAIL_NOTIFIER, - RISO_KAGAKU_LED -}; - -/* the Webmail LED made by RISO KAGAKU CORP. decodes a color index - internally, we want to keep the red+green+blue sysfs api, so we decode - from 1-bit RGB to the riso kagaku color index according to this table... */ - -static unsigned const char riso_kagaku_tbl[] = { -/* R+2G+4B -> riso kagaku color index */ - [0] = 0, /* black */ - [1] = 2, /* red */ - [2] = 1, /* green */ - [3] = 5, /* yellow */ - [4] = 3, /* blue */ - [5] = 6, /* magenta */ - [6] = 4, /* cyan */ - [7] = 7 /* white */ -}; - -#define RISO_KAGAKU_IX(r,g,b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)] - -/* table of devices that work with this driver */ -static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x0fc5, 0x1223), - .driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR }, - { USB_DEVICE(0x1d34, 0x0004), - .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER }, - { USB_DEVICE(0x1d34, 0x000a), - .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER }, - { USB_DEVICE(0x1294, 0x1320), - .driver_info = RISO_KAGAKU_LED }, - { }, -}; -MODULE_DEVICE_TABLE(usb, id_table); - -struct usb_led { - struct usb_device *udev; - unsigned char blue; - unsigned char red; - unsigned char green; - enum led_type type; -}; - -static void change_color(struct usb_led *led) -{ - int retval = 0; - unsigned char *buffer; - int actlength; - - buffer = kmalloc(8, GFP_KERNEL); - if (!buffer) { - dev_err(&led->udev->dev, "out of memory\n"); - return; - } - - switch (led->type) { - case DELCOM_VISUAL_SIGNAL_INDICATOR: { - unsigned char color = 0x07; - - if (led->blue) - color &= ~0x04; - if (led->red) - color &= ~0x02; - if (led->green) - color &= ~0x01; - dev_dbg(&led->udev->dev, - "blue = %d, red = %d, green = %d, color = %.2x\n", - led->blue, led->red, led->green, color); - - retval = usb_control_msg(led->udev, - usb_sndctrlpipe(led->udev, 0), - 0x12, - 0xc8, - (0x02 * 0x100) + 0x0a, - (0x00 * 0x100) + color, - buffer, - 8, - 2000); - break; - } - - case DREAM_CHEEKY_WEBMAIL_NOTIFIER: - dev_dbg(&led->udev->dev, - "red = %d, green = %d, blue = %d\n", - led->red, led->green, led->blue); - - buffer[0] = led->red; - buffer[1] = led->green; - buffer[2] = led->blue; - buffer[3] = buffer[4] = buffer[5] = 0; - buffer[6] = 0x1a; - buffer[7] = 0x05; - - retval = usb_control_msg(led->udev, - usb_sndctrlpipe(led->udev, 0), - 0x09, - 0x21, - 0x200, - 0, - buffer, - 8, - 2000); - break; - - case RISO_KAGAKU_LED: - buffer[0] = RISO_KAGAKU_IX(led->red, led->green, led->blue); - buffer[1] = 0; - buffer[2] = 0; - buffer[3] = 0; - buffer[4] = 0; - - retval = usb_interrupt_msg(led->udev, - usb_sndctrlpipe(led->udev, 2), - buffer, 5, &actlength, 1000 /*ms timeout*/); - break; - - default: - dev_err(&led->udev->dev, "unknown device type %d\n", led->type); - } - - if (retval) - dev_dbg(&led->udev->dev, "retval = %d\n", retval); - kfree(buffer); -} - -#define show_set(value) \ -static ssize_t show_##value(struct device *dev, struct device_attribute *attr,\ - char *buf) \ -{ \ - struct usb_interface *intf = to_usb_interface(dev); \ - struct usb_led *led = usb_get_intfdata(intf); \ - \ - return sprintf(buf, "%d\n", led->value); \ -} \ -static ssize_t set_##value(struct device *dev, struct device_attribute *attr,\ - const char *buf, size_t count) \ -{ \ - struct usb_interface *intf = to_usb_interface(dev); \ - struct usb_led *led = usb_get_intfdata(intf); \ - int temp = simple_strtoul(buf, NULL, 10); \ - \ - led->value = temp; \ - change_color(led); \ - return count; \ -} \ -static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); -show_set(blue); -show_set(red); -show_set(green); - -static int led_probe(struct usb_interface *interface, - const struct usb_device_id *id) -{ - struct usb_device *udev = interface_to_usbdev(interface); - struct usb_led *dev = NULL; - int retval = -ENOMEM; - - dev = kzalloc(sizeof(struct usb_led), GFP_KERNEL); - if (dev == NULL) { - dev_err(&interface->dev, "out of memory\n"); - goto error_mem; - } - - dev->udev = usb_get_dev(udev); - dev->type = id->driver_info; - - usb_set_intfdata(interface, dev); - - retval = device_create_file(&interface->dev, &dev_attr_blue); - if (retval) - goto error; - retval = device_create_file(&interface->dev, &dev_attr_red); - if (retval) - goto error; - retval = device_create_file(&interface->dev, &dev_attr_green); - if (retval) - goto error; - - if (dev->type == DREAM_CHEEKY_WEBMAIL_NOTIFIER) { - unsigned char *enable; - - enable = kmemdup("\x1f\x02\0\x5f\0\0\x1a\x03", 8, GFP_KERNEL); - if (!enable) { - dev_err(&interface->dev, "out of memory\n"); - retval = -ENOMEM; - goto error; - } - - retval = usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - 0x09, - 0x21, - 0x200, - 0, - enable, - 8, - 2000); - - kfree(enable); - if (retval != 8) - goto error; - } - - dev_info(&interface->dev, "USB LED device now attached\n"); - return 0; - -error: - device_remove_file(&interface->dev, &dev_attr_blue); - device_remove_file(&interface->dev, &dev_attr_red); - device_remove_file(&interface->dev, &dev_attr_green); - usb_set_intfdata(interface, NULL); - usb_put_dev(dev->udev); - kfree(dev); -error_mem: - return retval; -} - -static void led_disconnect(struct usb_interface *interface) -{ - struct usb_led *dev; - - dev = usb_get_intfdata(interface); - - device_remove_file(&interface->dev, &dev_attr_blue); - device_remove_file(&interface->dev, &dev_attr_red); - device_remove_file(&interface->dev, &dev_attr_green); - - /* first remove the files, then set the pointer to NULL */ - usb_set_intfdata(interface, NULL); - - usb_put_dev(dev->udev); - - kfree(dev); - - dev_info(&interface->dev, "USB LED now disconnected\n"); -} - -static struct usb_driver led_driver = { - .name = "usbled", - .probe = led_probe, - .disconnect = led_disconnect, - .id_table = id_table, -}; - -module_usb_driver(led_driver); - -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 188b1ff03f5f..d624a527777f 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -110,6 +110,74 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev) return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; } +static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) +{ + struct resource *res; + int bar; + struct vfio_pci_dummy_resource *dummy_res; + + INIT_LIST_HEAD(&vdev->dummy_resources_list); + + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + res = vdev->pdev->resource + bar; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP)) + goto no_mmap; + + if (!(res->flags & IORESOURCE_MEM)) + goto no_mmap; + + /* + * The PCI core shouldn't set up a resource with a + * type but zero size. But there may be bugs that + * cause us to do that. + */ + if (!resource_size(res)) + goto no_mmap; + + if (resource_size(res) >= PAGE_SIZE) { + vdev->bar_mmap_supported[bar] = true; + continue; + } + + if (!(res->start & ~PAGE_MASK)) { + /* + * Add a dummy resource to reserve the remainder + * of the exclusive page in case that hot-add + * device's bar is assigned into it. + */ + dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL); + if (dummy_res == NULL) + goto no_mmap; + + dummy_res->resource.name = "vfio sub-page reserved"; + dummy_res->resource.start = res->end + 1; + dummy_res->resource.end = res->start + PAGE_SIZE - 1; + dummy_res->resource.flags = res->flags; + if (request_resource(res->parent, + &dummy_res->resource)) { + kfree(dummy_res); + goto no_mmap; + } + dummy_res->index = bar; + list_add(&dummy_res->res_next, + &vdev->dummy_resources_list); + vdev->bar_mmap_supported[bar] = true; + continue; + } + /* + * Here we don't handle the case when the BAR is not page + * aligned because we can't expect the BAR will be + * assigned into the same location in a page in guest + * when we passthrough the BAR. And it's hard to access + * this BAR in userspace because we have no way to get + * the BAR's location in a page. + */ +no_mmap: + vdev->bar_mmap_supported[bar] = false; + } +} + static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); @@ -218,12 +286,15 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) } } + vfio_pci_probe_mmaps(vdev); + return 0; } static void vfio_pci_disable(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_dummy_resource *dummy_res, *tmp; int i, bar; /* Stop the device from further DMA */ @@ -252,6 +323,13 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) vdev->barmap[bar] = NULL; } + list_for_each_entry_safe(dummy_res, tmp, + &vdev->dummy_resources_list, res_next) { + list_del(&dummy_res->res_next); + release_resource(&dummy_res->resource); + kfree(dummy_res); + } + vdev->needs_reset = true; /* @@ -623,9 +701,7 @@ static long vfio_pci_ioctl(void *device_data, info.flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; - if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) && - pci_resource_flags(pdev, info.index) & - IORESOURCE_MEM && info.size >= PAGE_SIZE) { + if (vdev->bar_mmap_supported[info.index]) { info.flags |= VFIO_REGION_INFO_FLAG_MMAP; if (info.index == vdev->msix_bar) { ret = msix_sparse_mmap_cap(vdev, &caps); @@ -1049,16 +1125,16 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) return -EINVAL; if (index >= VFIO_PCI_ROM_REGION_INDEX) return -EINVAL; - if (!(pci_resource_flags(pdev, index) & IORESOURCE_MEM)) + if (!vdev->bar_mmap_supported[index]) return -EINVAL; - phys_len = pci_resource_len(pdev, index); + phys_len = PAGE_ALIGN(pci_resource_len(pdev, index)); req_len = vma->vm_end - vma->vm_start; pgoff = vma->vm_pgoff & ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); req_start = pgoff << PAGE_SHIFT; - if (phys_len < PAGE_SIZE || req_start + req_len > phys_len) + if (req_start + req_len > phys_len) return -EINVAL; if (index == vdev->msix_bar) { diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 016c14a1b454..2128de86c80d 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -57,9 +57,16 @@ struct vfio_pci_region { u32 flags; }; +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; + bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1]; u8 *pci_config_map; u8 *vconfig; struct perm_bits *msi_perm; @@ -88,6 +95,7 @@ struct vfio_pci_device { int refcnt; struct eventfd_ctx *err_trigger; struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index a66479bd0edf..31372fbf6c5b 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -68,6 +68,7 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) vdev->get_resource = get_amba_resource; vdev->get_irq = get_amba_irq; vdev->parent_module = THIS_MODULE; + vdev->reset_required = false; ret = vfio_platform_probe_common(vdev, &adev->dev); if (ret) { diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index b1cc3a768784..6561751a1063 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -23,6 +23,10 @@ #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>" #define DRIVER_DESC "VFIO for platform devices - User Level meta-driver" +static bool reset_required = true; +module_param(reset_required, bool, 0444); +MODULE_PARM_DESC(reset_required, "override reset requirement (default: 1)"); + /* probing devices from the linux platform bus */ static struct resource *get_platform_resource(struct vfio_platform_device *vdev, @@ -66,6 +70,7 @@ static int vfio_platform_probe(struct platform_device *pdev) vdev->get_resource = get_platform_resource; vdev->get_irq = get_platform_irq; vdev->parent_module = THIS_MODULE; + vdev->reset_required = reset_required; ret = vfio_platform_probe_common(vdev, &pdev->dev); if (ret) diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e65b142d3422..1cf2d462b53d 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -13,6 +13,7 @@ */ #include <linux/device.h> +#include <linux/acpi.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/mutex.h> @@ -27,6 +28,8 @@ #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>" #define DRIVER_DESC "VFIO platform base module" +#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL) + static LIST_HEAD(reset_list); static DEFINE_MUTEX(driver_lock); @@ -41,7 +44,7 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat, if (!strcmp(iter->compat, compat) && try_module_get(iter->owner)) { *module = iter->owner; - reset_fn = iter->reset; + reset_fn = iter->of_reset; break; } } @@ -49,20 +52,91 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat, return reset_fn; } -static void vfio_platform_get_reset(struct vfio_platform_device *vdev) +static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev, + struct device *dev) { - vdev->reset = vfio_platform_lookup_reset(vdev->compat, - &vdev->reset_module); - if (!vdev->reset) { + struct acpi_device *adev; + + if (acpi_disabled) + return -ENOENT; + + adev = ACPI_COMPANION(dev); + if (!adev) { + pr_err("VFIO: ACPI companion device not found for %s\n", + vdev->name); + return -ENODEV; + } + +#ifdef CONFIG_ACPI + vdev->acpihid = acpi_device_hid(adev); +#endif + return WARN_ON(!vdev->acpihid) ? -EINVAL : 0; +} + +int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, + const char **extra_dbg) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = vdev->device; + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status acpi_ret; + + acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); + if (ACPI_FAILURE(acpi_ret)) { + if (extra_dbg) + *extra_dbg = acpi_format_exception(acpi_ret); + return -EINVAL; + } + + return 0; +#else + return -ENOENT; +#endif +} + +bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev) +{ +#ifdef CONFIG_ACPI + struct device *dev = vdev->device; + acpi_handle handle = ACPI_HANDLE(dev); + + return acpi_has_method(handle, "_RST"); +#else + return false; +#endif +} + +static bool vfio_platform_has_reset(struct vfio_platform_device *vdev) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return vfio_platform_acpi_has_reset(vdev); + + return vdev->of_reset ? true : false; +} + +static int vfio_platform_get_reset(struct vfio_platform_device *vdev) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT; + + vdev->of_reset = vfio_platform_lookup_reset(vdev->compat, + &vdev->reset_module); + if (!vdev->of_reset) { request_module("vfio-reset:%s", vdev->compat); - vdev->reset = vfio_platform_lookup_reset(vdev->compat, - &vdev->reset_module); + vdev->of_reset = vfio_platform_lookup_reset(vdev->compat, + &vdev->reset_module); } + + return vdev->of_reset ? 0 : -ENOENT; } static void vfio_platform_put_reset(struct vfio_platform_device *vdev) { - if (vdev->reset) + if (VFIO_PLATFORM_IS_ACPI(vdev)) + return; + + if (vdev->of_reset) module_put(vdev->reset_module); } @@ -134,6 +208,21 @@ static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev) kfree(vdev->regions); } +static int vfio_platform_call_reset(struct vfio_platform_device *vdev, + const char **extra_dbg) +{ + if (VFIO_PLATFORM_IS_ACPI(vdev)) { + dev_info(vdev->device, "reset\n"); + return vfio_platform_acpi_call_reset(vdev, extra_dbg); + } else if (vdev->of_reset) { + dev_info(vdev->device, "reset\n"); + return vdev->of_reset(vdev); + } + + dev_warn(vdev->device, "no reset function found!\n"); + return -EINVAL; +} + static void vfio_platform_release(void *device_data) { struct vfio_platform_device *vdev = device_data; @@ -141,11 +230,14 @@ static void vfio_platform_release(void *device_data) mutex_lock(&driver_lock); if (!(--vdev->refcnt)) { - if (vdev->reset) { - dev_info(vdev->device, "reset\n"); - vdev->reset(vdev); - } else { - dev_warn(vdev->device, "no reset function found!\n"); + const char *extra_dbg = NULL; + int ret; + + ret = vfio_platform_call_reset(vdev, &extra_dbg); + if (ret && vdev->reset_required) { + dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n", + ret, extra_dbg ? extra_dbg : ""); + WARN_ON(1); } vfio_platform_regions_cleanup(vdev); vfio_platform_irq_cleanup(vdev); @@ -167,6 +259,8 @@ static int vfio_platform_open(void *device_data) mutex_lock(&driver_lock); if (!vdev->refcnt) { + const char *extra_dbg = NULL; + ret = vfio_platform_regions_init(vdev); if (ret) goto err_reg; @@ -175,11 +269,11 @@ static int vfio_platform_open(void *device_data) if (ret) goto err_irq; - if (vdev->reset) { - dev_info(vdev->device, "reset\n"); - vdev->reset(vdev); - } else { - dev_warn(vdev->device, "no reset function found!\n"); + ret = vfio_platform_call_reset(vdev, &extra_dbg); + if (ret && vdev->reset_required) { + dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n", + ret, extra_dbg ? extra_dbg : ""); + goto err_rst; } } @@ -188,6 +282,8 @@ static int vfio_platform_open(void *device_data) mutex_unlock(&driver_lock); return 0; +err_rst: + vfio_platform_irq_cleanup(vdev); err_irq: vfio_platform_regions_cleanup(vdev); err_reg: @@ -213,7 +309,7 @@ static long vfio_platform_ioctl(void *device_data, if (info.argsz < minsz) return -EINVAL; - if (vdev->reset) + if (vfio_platform_has_reset(vdev)) vdev->flags |= VFIO_DEVICE_FLAGS_RESET; info.flags = vdev->flags; info.num_regions = vdev->num_regions; @@ -312,10 +408,7 @@ static long vfio_platform_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - if (vdev->reset) - return vdev->reset(vdev); - else - return -EINVAL; + return vfio_platform_call_reset(vdev, NULL); } return -ENOTTY; @@ -544,6 +637,37 @@ static const struct vfio_device_ops vfio_platform_ops = { .mmap = vfio_platform_mmap, }; +int vfio_platform_of_probe(struct vfio_platform_device *vdev, + struct device *dev) +{ + int ret; + + ret = device_property_read_string(dev, "compatible", + &vdev->compat); + if (ret) + pr_err("VFIO: cannot retrieve compat for %s\n", + vdev->name); + + return ret; +} + +/* + * There can be two kernel build combinations. One build where + * ACPI is not selected in Kconfig and another one with the ACPI Kconfig. + * + * In the first case, vfio_platform_acpi_probe will return since + * acpi_disabled is 1. DT user will not see any kind of messages from + * ACPI. + * + * In the second case, both DT and ACPI is compiled in but the system is + * booting with any of these combinations. + * + * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine + * terminates immediately without any messages. + * + * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are + * valid checks. We cannot claim that this system is DT. + */ int vfio_platform_probe_common(struct vfio_platform_device *vdev, struct device *dev) { @@ -553,15 +677,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, if (!vdev) return -EINVAL; - ret = device_property_read_string(dev, "compatible", &vdev->compat); - if (ret) { - pr_err("VFIO: cannot retrieve compat for %s\n", vdev->name); - return -EINVAL; - } + ret = vfio_platform_acpi_probe(vdev, dev); + if (ret) + ret = vfio_platform_of_probe(vdev, dev); + + if (ret) + return ret; vdev->device = dev; - group = iommu_group_get(dev); + ret = vfio_platform_get_reset(vdev); + if (ret && vdev->reset_required) { + pr_err("vfio: no reset function found for device %s\n", + vdev->name); + return ret; + } + + group = vfio_iommu_group_get(dev); if (!group) { pr_err("VFIO: No IOMMU group for device %s\n", vdev->name); return -EINVAL; @@ -569,12 +701,10 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev); if (ret) { - iommu_group_put(group); + vfio_iommu_group_put(group, dev); return ret; } - vfio_platform_get_reset(vdev); - mutex_init(&vdev->igate); return 0; @@ -589,7 +719,7 @@ struct vfio_platform_device *vfio_platform_remove_common(struct device *dev) if (vdev) { vfio_platform_put_reset(vdev); - iommu_group_put(dev->iommu_group); + vfio_iommu_group_put(dev->iommu_group, dev); } return vdev; @@ -611,7 +741,7 @@ void vfio_platform_unregister_reset(const char *compat, mutex_lock(&driver_lock); list_for_each_entry_safe(iter, temp, &reset_list, link) { - if (!strcmp(iter->compat, compat) && (iter->reset == fn)) { + if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) { list_del(&iter->link); break; } diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h index 42816dd280cb..85ffe5d9d1ab 100644 --- a/drivers/vfio/platform/vfio_platform_private.h +++ b/drivers/vfio/platform/vfio_platform_private.h @@ -58,6 +58,7 @@ struct vfio_platform_device { struct mutex igate; struct module *parent_module; const char *compat; + const char *acpihid; struct module *reset_module; struct device *device; @@ -71,7 +72,9 @@ struct vfio_platform_device { struct resource* (*get_resource)(struct vfio_platform_device *vdev, int i); int (*get_irq)(struct vfio_platform_device *vdev, int i); - int (*reset)(struct vfio_platform_device *vdev); + int (*of_reset)(struct vfio_platform_device *vdev); + + bool reset_required; }; typedef int (*vfio_platform_reset_fn_t)(struct vfio_platform_device *vdev); @@ -80,7 +83,7 @@ struct vfio_platform_reset_node { struct list_head link; char *compat; struct module *owner; - vfio_platform_reset_fn_t reset; + vfio_platform_reset_fn_t of_reset; }; extern int vfio_platform_probe_common(struct vfio_platform_device *vdev, @@ -103,7 +106,7 @@ extern void vfio_platform_unregister_reset(const char *compat, static struct vfio_platform_reset_node __reset ## _node = { \ .owner = THIS_MODULE, \ .compat = __compat, \ - .reset = __reset, \ + .of_reset = __reset, \ }; \ __vfio_platform_register_reset(&__reset ## _node) diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 6fd6fa5469de..d1d70e0b011b 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1711,8 +1711,8 @@ EXPORT_SYMBOL_GPL(vfio_group_get_external_user); void vfio_group_put_external_user(struct vfio_group *group) { - vfio_group_put(group); vfio_group_try_dissolve_container(group); + vfio_group_put(group); } EXPORT_SYMBOL_GPL(vfio_group_put_external_user); diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index e5b14f52628f..939f057836e1 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/backlight.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/of.h> #include <linux/platform_data/lp855x.h> @@ -74,6 +75,7 @@ struct lp855x { struct lp855x_platform_data *pdata; struct pwm_device *pwm; struct regulator *supply; /* regulator for VDD input */ + struct regulator *enable; /* regulator for EN/VDDIO input */ }; static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data) @@ -433,6 +435,19 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) lp->supply = NULL; } + lp->enable = devm_regulator_get_optional(lp->dev, "enable"); + if (IS_ERR(lp->enable)) { + ret = PTR_ERR(lp->enable); + if (ret == -ENODEV) { + lp->enable = NULL; + } else { + if (ret != -EPROBE_DEFER) + dev_err(lp->dev, "error getting enable regulator: %d\n", + ret); + return ret; + } + } + if (lp->supply) { ret = regulator_enable(lp->supply); if (ret < 0) { @@ -441,6 +456,20 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) } } + if (lp->enable) { + ret = regulator_enable(lp->enable); + if (ret < 0) { + dev_err(lp->dev, "failed to enable vddio: %d\n", ret); + return ret; + } + + /* + * LP8555 datasheet says t_RESPONSE (time between VDDIO and + * I2C) is 1ms. + */ + usleep_range(1000, 2000); + } + i2c_set_clientdata(cl, lp); ret = lp855x_configure(lp); diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c index 8fe41caac38e..e2d7d039ce3b 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.c +++ b/drivers/video/fbdev/bfin_adv7393fb.c @@ -10,6 +10,8 @@ * TODO: Code Cleanup */ +#define DRIVER_NAME "bfin-adv7393" + #define pr_fmt(fmt) DRIVER_NAME ": " fmt #include <linux/module.h> diff --git a/drivers/video/fbdev/bfin_adv7393fb.h b/drivers/video/fbdev/bfin_adv7393fb.h index cd591b5152a5..afd0380e19e1 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.h +++ b/drivers/video/fbdev/bfin_adv7393fb.h @@ -59,8 +59,6 @@ enum { BLANK_OFF, }; -#define DRIVER_NAME "bfin-adv7393" - struct adv7393fb_modes { const s8 name[25]; /* Full name */ u16 xres; /* Active Horizonzal Pixels */ diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index 649b32f78c08..ff561073ee4e 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -273,7 +273,7 @@ static int clps711x_fb_probe(struct platform_device *pdev) } cfb->syscon = - syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1"); + syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1"); if (IS_ERR(cfb->syscon)) { ret = PTR_ERR(cfb->syscon); goto out_fb_release; @@ -376,7 +376,7 @@ static int clps711x_fb_remove(struct platform_device *pdev) } static const struct of_device_id clps711x_fb_dt_ids[] = { - { .compatible = "cirrus,clps711x-fb", }, + { .compatible = "cirrus,ep7209-fb", }, { } }; MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c index 8511c648a15c..9d78411a3bf7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c @@ -14,7 +14,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> struct panel_drv_data { @@ -25,7 +25,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index d811e6dcaef7..06e1db34541e 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -16,8 +16,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings dvic_default_timings = { .x_res = 640, @@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = { .detect = dvic_detect, }; -static int dvic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_dvi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - int i2c_bus_num; - - pdata = dev_get_platdata(&pdev->dev); - i2c_bus_num = pdata->i2c_bus_num; - - if (i2c_bus_num != -1) { - struct i2c_adapter *adapter; - - adapter = i2c_get_adapter(i2c_bus_num); - if (!adapter) { - dev_err(&pdev->dev, - "Failed to get I2C adapter, bus %d\n", - i2c_bus_num); - return -EPROBE_DEFER; - } - - ddata->i2c_adapter = adapter; - } - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - i2c_put_adapter(ddata->i2c_adapter); - - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dvic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = dvic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dvic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dvic_probe_of(pdev); + if (r) + return r; ddata->timings = dvic_default_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c index 6ee4129bc0c0..58d5803ede67 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c @@ -17,8 +17,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, @@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = { .set_hdmi_infoframe = hdmic_set_infoframe, }; -static int hdmic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_hdmi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->hpd_gpio = -ENODEV; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int hdmic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - if (dev_get_platdata(&pdev->dev)) { - r = hdmic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = hdmic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = hdmic_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->hpd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c index 8c246c213e06..a9a67167cc3d 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c @@ -20,7 +20,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c index d9048b3df495..8c0953d069b7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c @@ -15,8 +15,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = { .get_timings = tfp410_get_timings, }; -static int tfp410_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct encoder_tfp410_platform_data *pdata; - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->pd_gpio = pdata->power_down_gpio; - - ddata->data_lines = pdata->data_lines; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tfp410_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = tfp410_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = tfp410_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = tfp410_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->pd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c index 677e2545fcbe..80dc47347e21 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c index e780fd4f8b46..ace3d818afe5 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c @@ -16,7 +16,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 3414c2609320..b58012b82b6f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -25,8 +25,7 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> /* DSI Virtual channel. Hardcoded for now. */ @@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = { .memory_read = dsicm_memory_read, }; -static int dsicm_probe_pdata(struct platform_device *pdev) -{ - const struct panel_dsicm_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source\n"); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->reset_gpio = pdata->reset_gpio; - - if (pdata->use_ext_te) - ddata->ext_te_gpio = pdata->ext_te_gpio; - else - ddata->ext_te_gpio = -1; - - ddata->ulps_timeout = pdata->ulps_timeout; - - ddata->use_dsi_backlight = pdata->use_dsi_backlight; - - ddata->pin_config = pdata->pin_config; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dsicm_probe_of(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev) dev_dbg(dev, "probe\n"); + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->pdev = pdev; - if (dev_get_platdata(dev)) { - r = dsicm_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dsicm_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dsicm_probe_of(pdev); + if (r) + return r; ddata->timings.x_res = 864; ddata->timings.y_res = 480; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index 18eb60e9c9ec..f14691ce8d02 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -16,8 +16,7 @@ #include <linux/mutex.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int lb035q02_probe_pdata(struct spi_device *spi) -{ - const struct panel_lb035q02_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio, - GPIOF_OUT_INIT_LOW, "panel enable"); - if (r) - goto err_gpio; - - ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio); - - ddata->backlight_gpio = pdata->backlight_gpio; - - return 0; -err_gpio: - omap_dss_put_device(ddata->in); - return r; -} - static int lb035q02_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) struct omap_dss_device *dssdev; int r; + if (!spi->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; @@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = lb035q02_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = lb035q02_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = lb035q02_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->backlight_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index 8a928c9a2fc9..a2cbadd3eca3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -18,8 +18,7 @@ #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = { }; -static int nec_8048_probe_pdata(struct spi_device *spi) -{ - const struct panel_nec_nl8048hl11_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->qvga_gpio = pdata->qvga_gpio; - ddata->res_gpio = pdata->res_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int nec_8048_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->mode = SPI_MODE_0; spi->bits_per_word = 32; @@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = nec_8048_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = nec_8048_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = nec_8048_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->qvga_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c index 1954ec913ce5..a8be18a87fa0 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, - char *desc, struct gpio_desc **gpiod) -{ - int r; - - r = devm_gpio_request_one(dev, gpio, flags, desc); - if (r) { - *gpiod = NULL; - return r == -ENOENT ? 0 : r; - } - - *gpiod = gpio_to_desc(gpio); - - return 0; -} - -static int sharp_ls_probe_pdata(struct platform_device *pdev) -{ - const struct panel_sharp_ls037v7dw01_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW, - "lcd MO", &ddata->mo_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH, - "lcd LR", &ddata->lr_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH, - "lcd UD", &ddata->ud_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW, - "lcd RESB", &ddata->resb_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW, - "lcd INI", &ddata->ini_gpio); - if (r) - return r; - - return 0; -} - static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, const char *desc, struct gpio_desc **gpiod) { @@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = sharp_ls_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = sharp_ls_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = sharp_ls_probe_of(pdev); + if (r) + return r; ddata->videomode = sharp_ls_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 31efcca801bd..468560a6daae 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -33,7 +33,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #define MIPID_CMD_READ_DISP_ID 0x04 diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index 4d657f3ab679..b529a8c2b652 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -28,8 +28,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = { .check_timings = td028ttec1_panel_check_timings, }; -static int td028ttec1_panel_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td028ttec1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int td028ttec1_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 9; spi->mode = SPI_MODE_3; @@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) ddata->spi_dev = spi; - if (dev_get_platdata(&spi->dev)) { - r = td028ttec1_panel_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = td028ttec1_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = td028ttec1_probe_of(spi); + if (r) + return r; ddata->videomode = td028ttec1_panel_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 68e3b68a2920..51e628b85f4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -19,8 +19,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 @@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = { }; -static int tpo_td043_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td043mtea1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->nreset_gpio = pdata->nreset_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tpo_td043_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 16; spi->mode = SPI_MODE_0; @@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = tpo_td043_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = tpo_td043_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = tpo_td043_probe_of(spi); + if (r) + return r; ddata->mode = TPO_R02_MODE_800x480; memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c index 663ccc3bf4e5..2481f4871f66 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c @@ -23,7 +23,7 @@ #include <linux/spinlock.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index 5a87179b7312..29de4827589d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -35,7 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; register_pm_notifier(&omap_dss_pm_notif_block); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c index 6607db37a5e4..3691bde4ce0a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c @@ -26,7 +26,7 @@ #include <linux/interrupt.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 5491e304f4fe..7a75dfda9845 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -41,7 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c index 038c15b04215..59c9a5c47ca9 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c @@ -18,7 +18,7 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dispc.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c index 75b5286029ee..b3fdbfd0b82d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c @@ -25,7 +25,7 @@ #include <linux/platform_device.h> #include <linux/sysfs.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c index ef5b9027985d..dd5468695c43 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c index 7953e6a52346..da09806b940c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c @@ -34,7 +34,7 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index d63e59807707..9e4800a4e3d1 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -42,7 +42,7 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index bf407b6ba15c..d356a252ab4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -18,7 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 0078c4d1fc31..47d7f69ad9ad 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -41,7 +41,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h index 0184a8461df1..a3cc0ca8f9d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h @@ -73,6 +73,17 @@ #define FLD_MOD(orig, val, start, end) \ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + enum dss_io_pad_mode { DSS_IO_PAD_MODE_RESET, DSS_IO_PAD_MODE_RFBI, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c index c886a2927f73..8fc843b56b26 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c @@ -23,7 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h index 53616b02b613..f6de87e078b0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h @@ -23,7 +23,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> +#include <sound/omap-hdmi-audio.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 2e71aec838b1..926a6f20dbb2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -33,7 +33,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi4_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index aade6d99662a..0ee829a165c3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -38,7 +38,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi5_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c index 1b8fcc6c4ba1..189a5ad125a3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c @@ -4,7 +4,7 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c index 1f5d19c119ce..9a13c35fd6d8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c index 06e23a7c432c..eac3665aba6c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c index 7c544bc56fb5..705373e4cf38 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c @@ -14,7 +14,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c index a7414fb12830..9e2a67fdf4d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c @@ -26,7 +26,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c index 08a67f4f6a20..69f86d2cc274 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -69,7 +69,6 @@ int dss_init_overlay_managers(void) break; } - mgr->caps = 0; mgr->supported_displays = dss_feat_get_supported_displays(mgr->id); mgr->supported_outputs = diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c index 16072159bd24..bed9a978269d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/output.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c @@ -21,7 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c index 4cc5ddebfb34..f1f6c0aea752 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c @@ -26,7 +26,7 @@ #include <linux/kobject.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c index 2f7cee985cdd..d6c5d75d2ef8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c @@ -30,7 +30,7 @@ #include <linux/delay.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c index f974ddcd3b6e..0564c5606cd0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c @@ -22,7 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c index aea6a1d0fb20..562b0c4ae0c6 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c index d747cc6b59e1..c4be732a4714 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static struct { diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 26e0ee30adf8..392464da12e4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -37,7 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c index b1ec59e42940..a890540f2037 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 9ddfdd63b84c..ef69273074ba 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -30,7 +30,7 @@ #include <linux/export.h> #include <linux/sizes.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index d3af01c94a58..2fb90cb6803f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -30,7 +30,7 @@ #include <linux/platform_device.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c index 18fa9e1d0033..8087a009c54f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c @@ -29,7 +29,7 @@ #include <linux/mm.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index 623cd872a367..bcb9ff4a607d 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h @@ -31,7 +31,7 @@ #include <linux/dma-attrs.h> #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #ifdef DEBUG extern bool omapfb_debug; diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c index 10fbfd8ab963..b6bc4a0bda2a 100644 --- a/drivers/video/logo/logo.c +++ b/drivers/video/logo/logo.c @@ -36,11 +36,11 @@ static int __init fb_logo_late_init(void) late_initcall(fb_logo_late_init); -/* logo's are marked __initdata. Use __init_refok to tell +/* logo's are marked __initdata. Use __ref to tell * modpost that it is intended that this function uses data * marked __initdata. */ -const struct linux_logo * __init_refok fb_find_logo(int depth) +const struct linux_logo * __ref fb_find_logo(int depth) { const struct linux_logo *logo = NULL; diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index a2eec97d5064..bb09de633939 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) goto out; } - hdq_data->hdq_irqstatus = 0; - if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c index d488961a8c90..51f2f66d6555 100644 --- a/drivers/w1/slaves/w1_ds2406.c +++ b/drivers/w1/slaves/w1_ds2406.c @@ -153,16 +153,4 @@ static struct w1_family w1_family_12 = { .fid = W1_FAMILY_DS2406, .fops = &w1_f12_fops, }; - -static int __init w1_f12_init(void) -{ - return w1_register_family(&w1_family_12); -} - -static void __exit w1_f12_exit(void) -{ - w1_unregister_family(&w1_family_12); -} - -module_init(w1_f12_init); -module_exit(w1_f12_exit); +module_w1_family(w1_family_12); diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index 7dfa0e11688a..aec5958e66e9 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c @@ -351,16 +351,4 @@ static struct w1_family w1_family_29 = { .fid = W1_FAMILY_DS2408, .fops = &w1_f29_fops, }; - -static int __init w1_f29_init(void) -{ - return w1_register_family(&w1_family_29); -} - -static void __exit w1_f29_exit(void) -{ - w1_unregister_family(&w1_family_29); -} - -module_init(w1_f29_init); -module_exit(w1_f29_exit); +module_w1_family(w1_family_29); diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c index ee28fc1ff390..f2e1c51533b9 100644 --- a/drivers/w1/slaves/w1_ds2413.c +++ b/drivers/w1/slaves/w1_ds2413.c @@ -135,16 +135,4 @@ static struct w1_family w1_family_3a = { .fid = W1_FAMILY_DS2413, .fops = &w1_f3a_fops, }; - -static int __init w1_f3a_init(void) -{ - return w1_register_family(&w1_family_3a); -} - -static void __exit w1_f3a_exit(void) -{ - w1_unregister_family(&w1_family_3a); -} - -module_init(w1_f3a_init); -module_exit(w1_f3a_exit); +module_w1_family(w1_family_3a); diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c index 7e41b7d91fb5..4ab54fd9dde2 100644 --- a/drivers/w1/slaves/w1_ds2423.c +++ b/drivers/w1/slaves/w1_ds2423.c @@ -138,19 +138,7 @@ static struct w1_family w1_family_1d = { .fid = W1_COUNTER_DS2423, .fops = &w1_f1d_fops, }; - -static int __init w1_f1d_init(void) -{ - return w1_register_family(&w1_family_1d); -} - -static void __exit w1_f1d_exit(void) -{ - w1_unregister_family(&w1_family_1d); -} - -module_init(w1_f1d_init); -module_exit(w1_f1d_exit); +module_w1_family(w1_family_1d); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>"); diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c index 9c4ff9d28adc..80572cb63ba8 100644 --- a/drivers/w1/slaves/w1_ds2431.c +++ b/drivers/w1/slaves/w1_ds2431.c @@ -288,19 +288,7 @@ static struct w1_family w1_family_2d = { .fid = W1_EEPROM_DS2431, .fops = &w1_f2d_fops, }; - -static int __init w1_f2d_init(void) -{ - return w1_register_family(&w1_family_2d); -} - -static void __exit w1_f2d_fini(void) -{ - w1_unregister_family(&w1_family_2d); -} - -module_init(w1_f2d_init); -module_exit(w1_f2d_fini); +module_w1_family(w1_family_2d); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>"); diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c index 72319a968a9e..6cf378c89ecb 100644 --- a/drivers/w1/slaves/w1_ds2433.c +++ b/drivers/w1/slaves/w1_ds2433.c @@ -305,16 +305,4 @@ static struct w1_family w1_family_23 = { .fid = W1_EEPROM_DS2433, .fops = &w1_f23_fops, }; - -static int __init w1_f23_init(void) -{ - return w1_register_family(&w1_family_23); -} - -static void __exit w1_f23_fini(void) -{ - w1_unregister_family(&w1_family_23); -} - -module_init(w1_f23_init); -module_exit(w1_f23_fini); +module_w1_family(w1_family_23); diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c index d9079d48d112..ffa37f773b3b 100644 --- a/drivers/w1/slaves/w1_ds2760.c +++ b/drivers/w1/slaves/w1_ds2760.c @@ -121,25 +121,14 @@ static const struct attribute_group *w1_ds2760_groups[] = { NULL, }; -static DEFINE_IDA(bat_ida); - static int w1_ds2760_add_slave(struct w1_slave *sl) { int ret; - int id; struct platform_device *pdev; - id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); - if (id < 0) { - ret = id; - goto noid; - } - - pdev = platform_device_alloc("ds2760-battery", id); - if (!pdev) { - ret = -ENOMEM; - goto pdev_alloc_failed; - } + pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); @@ -148,24 +137,19 @@ static int w1_ds2760_add_slave(struct w1_slave *sl) dev_set_drvdata(&sl->dev, pdev); - goto success; + return 0; pdev_add_failed: platform_device_put(pdev); -pdev_alloc_failed: - ida_simple_remove(&bat_ida, id); -noid: -success: + return ret; } static void w1_ds2760_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); - int id = pdev->id; platform_device_unregister(pdev); - ida_simple_remove(&bat_ida, id); } static struct w1_family_ops w1_ds2760_fops = { @@ -178,28 +162,13 @@ static struct w1_family w1_ds2760_family = { .fid = W1_FAMILY_DS2760, .fops = &w1_ds2760_fops, }; - -static int __init w1_ds2760_init(void) -{ - pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n"); - ida_init(&bat_ida); - return w1_register_family(&w1_ds2760_family); -} - -static void __exit w1_ds2760_exit(void) -{ - w1_unregister_family(&w1_ds2760_family); - ida_destroy(&bat_ida); -} +module_w1_family(w1_ds2760_family); EXPORT_SYMBOL(w1_ds2760_read); EXPORT_SYMBOL(w1_ds2760_write); EXPORT_SYMBOL(w1_ds2760_store_eeprom); EXPORT_SYMBOL(w1_ds2760_recall_eeprom); -module_init(w1_ds2760_init); -module_exit(w1_ds2760_exit); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>"); MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip"); diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c index 50e85f7929d4..f5c2aa429a92 100644 --- a/drivers/w1/slaves/w1_ds2780.c +++ b/drivers/w1/slaves/w1_ds2780.c @@ -113,25 +113,14 @@ static const struct attribute_group *w1_ds2780_groups[] = { NULL, }; -static DEFINE_IDA(bat_ida); - static int w1_ds2780_add_slave(struct w1_slave *sl) { int ret; - int id; struct platform_device *pdev; - id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); - if (id < 0) { - ret = id; - goto noid; - } - - pdev = platform_device_alloc("ds2780-battery", id); - if (!pdev) { - ret = -ENOMEM; - goto pdev_alloc_failed; - } + pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); @@ -144,19 +133,15 @@ static int w1_ds2780_add_slave(struct w1_slave *sl) pdev_add_failed: platform_device_put(pdev); -pdev_alloc_failed: - ida_simple_remove(&bat_ida, id); -noid: + return ret; } static void w1_ds2780_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); - int id = pdev->id; platform_device_unregister(pdev); - ida_simple_remove(&bat_ida, id); } static struct w1_family_ops w1_ds2780_fops = { @@ -169,21 +154,7 @@ static struct w1_family w1_ds2780_family = { .fid = W1_FAMILY_DS2780, .fops = &w1_ds2780_fops, }; - -static int __init w1_ds2780_init(void) -{ - ida_init(&bat_ida); - return w1_register_family(&w1_ds2780_family); -} - -static void __exit w1_ds2780_exit(void) -{ - w1_unregister_family(&w1_ds2780_family); - ida_destroy(&bat_ida); -} - -module_init(w1_ds2780_init); -module_exit(w1_ds2780_exit); +module_w1_family(w1_ds2780_family); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>"); diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c index 1eb98fb1688d..9c03e014cf9e 100644 --- a/drivers/w1/slaves/w1_ds2781.c +++ b/drivers/w1/slaves/w1_ds2781.c @@ -17,7 +17,6 @@ #include <linux/types.h> #include <linux/platform_device.h> #include <linux/mutex.h> -#include <linux/idr.h> #include "../w1.h" #include "../w1_int.h" @@ -111,25 +110,14 @@ static const struct attribute_group *w1_ds2781_groups[] = { NULL, }; -static DEFINE_IDA(bat_ida); - static int w1_ds2781_add_slave(struct w1_slave *sl) { int ret; - int id; struct platform_device *pdev; - id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); - if (id < 0) { - ret = id; - goto noid; - } - - pdev = platform_device_alloc("ds2781-battery", id); - if (!pdev) { - ret = -ENOMEM; - goto pdev_alloc_failed; - } + pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); @@ -142,19 +130,15 @@ static int w1_ds2781_add_slave(struct w1_slave *sl) pdev_add_failed: platform_device_put(pdev); -pdev_alloc_failed: - ida_simple_remove(&bat_ida, id); -noid: + return ret; } static void w1_ds2781_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); - int id = pdev->id; platform_device_unregister(pdev); - ida_simple_remove(&bat_ida, id); } static struct w1_family_ops w1_ds2781_fops = { @@ -167,21 +151,7 @@ static struct w1_family w1_ds2781_family = { .fid = W1_FAMILY_DS2781, .fops = &w1_ds2781_fops, }; - -static int __init w1_ds2781_init(void) -{ - ida_init(&bat_ida); - return w1_register_family(&w1_ds2781_family); -} - -static void __exit w1_ds2781_exit(void) -{ - w1_unregister_family(&w1_ds2781_family); - ida_destroy(&bat_ida); -} - -module_init(w1_ds2781_init); -module_exit(w1_ds2781_exit); +module_w1_family(w1_ds2781_family); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>"); diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index 365d6dff21de..5e348d38ec5c 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c @@ -427,16 +427,4 @@ static struct w1_family w1_family_1C = { .fid = W1_FAMILY_DS28E04, .fops = &w1_f1C_fops, }; - -static int __init w1_f1C_init(void) -{ - return w1_register_family(&w1_family_1C); -} - -static void __exit w1_f1C_fini(void) -{ - w1_unregister_family(&w1_family_1C); -} - -module_init(w1_f1C_init); -module_exit(w1_f1C_fini); +module_w1_family(w1_family_1C); diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index ed5dcb80a1f7..10a7a0767187 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -88,4 +88,16 @@ struct w1_family * w1_family_registered(u8); void w1_unregister_family(struct w1_family *); int w1_register_family(struct w1_family *); +/** + * module_w1_driver() - Helper macro for registering a 1-Wire families + * @__w1_family: w1_family struct + * + * Helper macro for 1-Wire families which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_w1_family(__w1_family) \ + module_driver(__w1_family, w1_register_family, \ + w1_unregister_family) + #endif /* __W1_FAMILY_H */ diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b4b3e256491b..1bffe006ca9a 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -48,7 +48,6 @@ config WATCHDOG_NOWAYOUT config WATCHDOG_SYSFS bool "Read different watchdog information through sysfs" - default n help Say Y here if you want to enable watchdog device status read through sysfs attributes. @@ -516,6 +515,15 @@ config MAX63XX_WATCHDOG help Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. +config MAX77620_WATCHDOG + tristate "Maxim Max77620 Watchdog Timer" + depends on MFD_MAX77620 + help + This is the driver for the Max77620 watchdog timer. + Say 'Y' here to enable the watchdog timer support for + MAX77620 chips. To compile this driver as a module, + choose M here: the module will be called max77620_wdt. + config IMX2_WDT tristate "IMX2+ Watchdog" depends on ARCH_MXC || ARCH_LAYERSCAPE @@ -609,6 +617,16 @@ config QCOM_WDT To compile this driver as a module, choose M here: the module will be called qcom_wdt. +config MESON_GXBB_WATCHDOG + tristate "Amlogic Meson GXBB SoCs watchdog support" + depends on ARCH_MESON + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Amlogic Meson GXBB SoCs. + To compile this driver as a module, choose M here: the + module will be called meson_gxbb_wdt. + config MESON_WATCHDOG tristate "Amlogic Meson SoCs watchdog support" depends on ARCH_MESON @@ -669,6 +687,19 @@ config RENESAS_WDT This driver adds watchdog support for the integrated watchdogs in the Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT). +config ASPEED_WATCHDOG + tristate "Aspeed 2400 watchdog support" + depends on ARCH_ASPEED || COMPILE_TEST + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Apseed BMC SoCs. + + This driver is required to reboot the SoC. + + To compile this driver as a module, choose M here: the + module will be called aspeed_wdt. + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index a46e7c1380ac..c22ad3ea3539 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_ST_LPC_WATCHDOG) += st_lpc_wdt.o obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o +obj-$(CONFIG_MESON_GXBB_WATCHDOG) += meson_gxbb_wdt.o obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o @@ -74,6 +75,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o +obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -203,6 +205,7 @@ obj-$(CONFIG_TANGOX_WATCHDOG) += tangox_wdt.o obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o +obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c new file mode 100644 index 000000000000..f5ad8023c2e6 --- /dev/null +++ b/drivers/watchdog/aspeed_wdt.c @@ -0,0 +1,212 @@ +/* + * Copyright 2016 IBM Corporation + * + * Joel Stanley <joel@jms.id.au> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +struct aspeed_wdt { + struct watchdog_device wdd; + void __iomem *base; + u32 ctrl; +}; + +static const struct of_device_id aspeed_wdt_of_table[] = { + { .compatible = "aspeed,ast2400-wdt" }, + { .compatible = "aspeed,ast2500-wdt" }, + { }, +}; +MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); + +#define WDT_STATUS 0x00 +#define WDT_RELOAD_VALUE 0x04 +#define WDT_RESTART 0x08 +#define WDT_CTRL 0x0C +#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) +#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) +#define WDT_CTRL_1MHZ_CLK BIT(4) +#define WDT_CTRL_WDT_EXT BIT(3) +#define WDT_CTRL_WDT_INTR BIT(2) +#define WDT_CTRL_RESET_SYSTEM BIT(1) +#define WDT_CTRL_ENABLE BIT(0) + +#define WDT_RESTART_MAGIC 0x4755 + +/* 32 bits at 1MHz, in milliseconds */ +#define WDT_MAX_TIMEOUT_MS 4294967 +#define WDT_DEFAULT_TIMEOUT 30 +#define WDT_RATE_1MHZ 1000000 + +static struct aspeed_wdt *to_aspeed_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct aspeed_wdt, wdd); +} + +static void aspeed_wdt_enable(struct aspeed_wdt *wdt, int count) +{ + wdt->ctrl |= WDT_CTRL_ENABLE; + + writel(0, wdt->base + WDT_CTRL); + writel(count, wdt->base + WDT_RELOAD_VALUE); + writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); + writel(wdt->ctrl, wdt->base + WDT_CTRL); +} + +static int aspeed_wdt_start(struct watchdog_device *wdd) +{ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + + aspeed_wdt_enable(wdt, wdd->timeout * WDT_RATE_1MHZ); + + return 0; +} + +static int aspeed_wdt_stop(struct watchdog_device *wdd) +{ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + + wdt->ctrl &= ~WDT_CTRL_ENABLE; + writel(wdt->ctrl, wdt->base + WDT_CTRL); + + return 0; +} + +static int aspeed_wdt_ping(struct watchdog_device *wdd) +{ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + + writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); + + return 0; +} + +static int aspeed_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + u32 actual; + + wdd->timeout = timeout; + + actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000); + + writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE); + writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); + + return 0; +} + +static int aspeed_wdt_restart(struct watchdog_device *wdd, + unsigned long action, void *data) +{ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + + aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000); + + mdelay(1000); + + return 0; +} + +static const struct watchdog_ops aspeed_wdt_ops = { + .start = aspeed_wdt_start, + .stop = aspeed_wdt_stop, + .ping = aspeed_wdt_ping, + .set_timeout = aspeed_wdt_set_timeout, + .restart = aspeed_wdt_restart, + .owner = THIS_MODULE, +}; + +static const struct watchdog_info aspeed_wdt_info = { + .options = WDIOF_KEEPALIVEPING + | WDIOF_MAGICCLOSE + | WDIOF_SETTIMEOUT, + .identity = KBUILD_MODNAME, +}; + +static int aspeed_wdt_remove(struct platform_device *pdev) +{ + struct aspeed_wdt *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdd); + + return 0; +} + +static int aspeed_wdt_probe(struct platform_device *pdev) +{ + struct aspeed_wdt *wdt; + struct resource *res; + int ret; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); + + /* + * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only + * runs at 1MHz. We chose to always run at 1MHz, as there's no + * good reason to have a faster watchdog counter. + */ + wdt->wdd.info = &aspeed_wdt_info; + wdt->wdd.ops = &aspeed_wdt_ops; + wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS; + wdt->wdd.parent = &pdev->dev; + + wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT; + watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev); + + /* + * Control reset on a per-device basis to ensure the + * host is not affected by a BMC reboot, so only reset + * the SOC and not the full chip + */ + wdt->ctrl = WDT_CTRL_RESET_MODE_SOC | + WDT_CTRL_1MHZ_CLK | + WDT_CTRL_RESET_SYSTEM; + + if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) { + aspeed_wdt_start(&wdt->wdd); + set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); + } + + ret = watchdog_register_device(&wdt->wdd); + if (ret) { + dev_err(&pdev->dev, "failed to register\n"); + return ret; + } + + platform_set_drvdata(pdev, wdt); + + return 0; +} + +static struct platform_driver aspeed_watchdog_driver = { + .probe = aspeed_wdt_probe, + .remove = aspeed_wdt_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(aspeed_wdt_of_table), + }, +}; +module_platform_driver(aspeed_watchdog_driver); + +MODULE_DESCRIPTION("Aspeed Watchdog Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 2e6164c4abc0..4dddd8298a22 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -82,12 +82,6 @@ static int bcm2835_wdt_stop(struct watchdog_device *wdog) return 0; } -static int bcm2835_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t) -{ - wdog->timeout = t; - return 0; -} - static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog) { struct bcm2835_wdt *wdt = watchdog_get_drvdata(wdog); @@ -96,15 +90,14 @@ static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog) return WDOG_TICKS_TO_SECS(ret & PM_WDOG_TIME_SET); } -static struct watchdog_ops bcm2835_wdt_ops = { +static const struct watchdog_ops bcm2835_wdt_ops = { .owner = THIS_MODULE, .start = bcm2835_wdt_start, .stop = bcm2835_wdt_stop, - .set_timeout = bcm2835_wdt_set_timeout, .get_timeleft = bcm2835_wdt_get_timeleft, }; -static struct watchdog_info bcm2835_wdt_info = { +static const struct watchdog_info bcm2835_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Broadcom BCM2835 Watchdog timer", diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index a100f648880d..5d6b4e5f7989 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -34,6 +34,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; #define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN] #define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX] #define DA9063_WDG_TIMEOUT wdt_timeout[3] +#define DA9063_RESET_PROTECTION_MS 256 struct da9063_watchdog { struct da9063 *da9063; @@ -171,6 +172,7 @@ static int da9063_wdt_probe(struct platform_device *pdev) wdt->wdtdev.ops = &da9063_watchdog_ops; wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT; wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT; + wdt->wdtdev.min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS; wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT; wdt->wdtdev.parent = &pdev->dev; diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index d4ba262da7ba..1b7e9169072f 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -45,9 +45,11 @@ #define SIO_REG_DEVREV 0x22 /* Device revision */ #define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */ +#define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */ #define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */ #define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */ #define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */ +#define SIO_F81866_REG_GPIO1 0x2c /* F81866 GPIO1 Enable Register */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ @@ -60,6 +62,7 @@ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ #define SIO_F81865_ID 0x0704 /* Chipset ID */ +#define SIO_F81866_ID 0x1010 /* Chipset ID */ #define F71808FG_REG_WDO_CONF 0xf0 #define F71808FG_REG_WDT_CONF 0xf5 @@ -116,7 +119,8 @@ module_param(start_withtimeout, uint, 0); MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" " given initial timeout. Zero (default) disables this feature."); -enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 }; +enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865, + f81866}; static const char *f71808e_names[] = { "f71808fg", @@ -126,6 +130,7 @@ static const char *f71808e_names[] = { "f71882fg", "f71889fg", "f81865", + "f81866", }; /* Super-I/O Function prototypes */ @@ -370,6 +375,22 @@ static int watchdog_start(void) superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5); break; + case f81866: + /* Set pin 70 to WDTRST# */ + superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, + BIT(3) | BIT(0)); + superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, + BIT(2)); + /* + * GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0. + * The PIN 70(GPIO15/WDTRST) is controlled by 2Ch: + * BIT5: 0 -> WDTRST# + * 1 -> GPIO15 + */ + superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1, + BIT(5)); + break; + default: /* * 'default' label to shut up the compiler and catch @@ -382,7 +403,7 @@ static int watchdog_start(void) superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0); - if (watchdog.type == f81865) + if (watchdog.type == f81865 || watchdog.type == f81866) superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF, F81865_FLAG_WDOUT_EN); else @@ -788,6 +809,9 @@ static int __init f71808e_find(int sioaddr) case SIO_F81865_ID: watchdog.type = f81865; break; + case SIO_F81866_ID: + watchdog.type = f81866; + break; default: pr_info("Unrecognized Fintek device: %04x\n", (unsigned int)devid); diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c index ba066e4a707b..93457cabc178 100644 --- a/drivers/watchdog/gpio_wdt.c +++ b/drivers/watchdog/gpio_wdt.c @@ -151,6 +151,8 @@ static int gpio_wdt_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + platform_set_drvdata(pdev, priv); + priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags); if (!gpio_is_valid(priv->gpio)) return priv->gpio; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 0acc6c5f729d..54cab189a763 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -150,6 +150,7 @@ static inline u32 no_reboot_bit(void) u32 enable_bit; switch (iTCO_wdt_private.iTCO_version) { + case 5: case 3: enable_bit = 0x00000010; break; @@ -512,6 +513,7 @@ static int iTCO_wdt_probe(struct platform_device *dev) /* Clear out the (probably old) status */ switch (iTCO_wdt_private.iTCO_version) { + case 5: case 4: outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */ outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */ diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c new file mode 100644 index 000000000000..48b84df2afda --- /dev/null +++ b/drivers/watchdog/max77620_wdt.c @@ -0,0 +1,227 @@ +/* + * Maxim MAX77620 Watchdog Driver + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mfd/max77620.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/watchdog.h> + +static bool nowayout = WATCHDOG_NOWAYOUT; + +struct max77620_wdt { + struct device *dev; + struct regmap *rmap; + struct watchdog_device wdt_dev; +}; + +static int max77620_wdt_start(struct watchdog_device *wdt_dev) +{ + struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev); + + return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2, + MAX77620_WDTEN, MAX77620_WDTEN); +} + +static int max77620_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev); + + return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2, + MAX77620_WDTEN, 0); +} + +static int max77620_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev); + + return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3, + MAX77620_WDTC_MASK, 0x1); +} + +static int max77620_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev); + unsigned int wdt_timeout; + u8 regval; + int ret; + + switch (timeout) { + case 0 ... 2: + regval = MAX77620_TWD_2s; + wdt_timeout = 2; + break; + + case 3 ... 16: + regval = MAX77620_TWD_16s; + wdt_timeout = 16; + break; + + case 17 ... 64: + regval = MAX77620_TWD_64s; + wdt_timeout = 64; + break; + + default: + regval = MAX77620_TWD_128s; + wdt_timeout = 128; + break; + } + + ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3, + MAX77620_WDTC_MASK, 0x1); + if (ret < 0) + return ret; + + ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2, + MAX77620_TWD_MASK, regval); + if (ret < 0) + return ret; + + wdt_dev->timeout = wdt_timeout; + + return 0; +} + +static const struct watchdog_info max77620_wdt_info = { + .identity = "max77620-watchdog", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops max77620_wdt_ops = { + .start = max77620_wdt_start, + .stop = max77620_wdt_stop, + .ping = max77620_wdt_ping, + .set_timeout = max77620_wdt_set_timeout, +}; + +static int max77620_wdt_probe(struct platform_device *pdev) +{ + struct max77620_wdt *wdt; + struct watchdog_device *wdt_dev; + unsigned int regval; + int ret; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->dev = &pdev->dev; + wdt->rmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!wdt->rmap) { + dev_err(wdt->dev, "Failed to get parent regmap\n"); + return -ENODEV; + } + + wdt_dev = &wdt->wdt_dev; + wdt_dev->info = &max77620_wdt_info; + wdt_dev->ops = &max77620_wdt_ops; + wdt_dev->min_timeout = 2; + wdt_dev->max_timeout = 128; + wdt_dev->max_hw_heartbeat_ms = 128 * 1000; + + platform_set_drvdata(pdev, wdt); + + /* Enable WD_RST_WK - WDT expire results in a restart */ + ret = regmap_update_bits(wdt->rmap, MAX77620_REG_ONOFFCNFG2, + MAX77620_ONOFFCNFG2_WD_RST_WK, + MAX77620_ONOFFCNFG2_WD_RST_WK); + if (ret < 0) { + dev_err(wdt->dev, "Failed to set WD_RST_WK: %d\n", ret); + return ret; + } + + /* Set WDT clear in OFF and sleep mode */ + ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2, + MAX77620_WDTOFFC | MAX77620_WDTSLPC, + MAX77620_WDTOFFC | MAX77620_WDTSLPC); + if (ret < 0) { + dev_err(wdt->dev, "Failed to set WDT OFF mode: %d\n", ret); + return ret; + } + + /* Check if WDT running and if yes then set flags properly */ + ret = regmap_read(wdt->rmap, MAX77620_REG_CNFGGLBL2, ®val); + if (ret < 0) { + dev_err(wdt->dev, "Failed to read WDT CFG register: %d\n", ret); + return ret; + } + + switch (regval & MAX77620_TWD_MASK) { + case MAX77620_TWD_2s: + wdt_dev->timeout = 2; + break; + case MAX77620_TWD_16s: + wdt_dev->timeout = 16; + break; + case MAX77620_TWD_64s: + wdt_dev->timeout = 64; + break; + default: + wdt_dev->timeout = 128; + break; + } + + if (regval & MAX77620_WDTEN) + set_bit(WDOG_HW_RUNNING, &wdt_dev->status); + + watchdog_set_nowayout(wdt_dev, nowayout); + watchdog_set_drvdata(wdt_dev, wdt); + + ret = watchdog_register_device(wdt_dev); + if (ret < 0) { + dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int max77620_wdt_remove(struct platform_device *pdev) +{ + struct max77620_wdt *wdt = platform_get_drvdata(pdev); + + max77620_wdt_stop(&wdt->wdt_dev); + watchdog_unregister_device(&wdt->wdt_dev); + + return 0; +} + +static struct platform_device_id max77620_wdt_devtype[] = { + { .name = "max77620-watchdog", }, + { }, +}; + +static struct platform_driver max77620_wdt_driver = { + .driver = { + .name = "max77620-watchdog", + }, + .probe = max77620_wdt_probe, + .remove = max77620_wdt_remove, + .id_table = max77620_wdt_devtype, +}; + +module_platform_driver(max77620_wdt_driver); + +MODULE_DESCRIPTION("Max77620 watchdog timer driver"); + +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c new file mode 100644 index 000000000000..44d180a2c5e5 --- /dev/null +++ b/drivers/watchdog/meson_gxbb_wdt.c @@ -0,0 +1,270 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/watchdog.h> + +#define DEFAULT_TIMEOUT 30 /* seconds */ + +#define GXBB_WDT_CTRL_REG 0x0 +#define GXBB_WDT_TCNT_REG 0x8 +#define GXBB_WDT_RSET_REG 0xc + +#define GXBB_WDT_CTRL_CLKDIV_EN BIT(25) +#define GXBB_WDT_CTRL_CLK_EN BIT(24) +#define GXBB_WDT_CTRL_EE_RESET BIT(21) +#define GXBB_WDT_CTRL_EN BIT(18) +#define GXBB_WDT_CTRL_DIV_MASK (BIT(18) - 1) + +#define GXBB_WDT_TCNT_SETUP_MASK (BIT(16) - 1) +#define GXBB_WDT_TCNT_CNT_SHIFT 16 + +struct meson_gxbb_wdt { + void __iomem *reg_base; + struct watchdog_device wdt_dev; + struct clk *clk; +}; + +static int meson_gxbb_wdt_start(struct watchdog_device *wdt_dev) +{ + struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev); + + writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) | GXBB_WDT_CTRL_EN, + data->reg_base + GXBB_WDT_CTRL_REG); + + return 0; +} + +static int meson_gxbb_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev); + + writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) & ~GXBB_WDT_CTRL_EN, + data->reg_base + GXBB_WDT_CTRL_REG); + + return 0; +} + +static int meson_gxbb_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev); + + writel(0, data->reg_base + GXBB_WDT_RSET_REG); + + return 0; +} + +static int meson_gxbb_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev); + unsigned long tcnt = timeout * 1000; + + if (tcnt > GXBB_WDT_TCNT_SETUP_MASK) + tcnt = GXBB_WDT_TCNT_SETUP_MASK; + + wdt_dev->timeout = timeout; + + meson_gxbb_wdt_ping(wdt_dev); + + writel(tcnt, data->reg_base + GXBB_WDT_TCNT_REG); + + return 0; +} + +static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev) +{ + struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev); + unsigned long reg; + + reg = readl(data->reg_base + GXBB_WDT_TCNT_REG); + + return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) - + (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000; +} + +static const struct watchdog_ops meson_gxbb_wdt_ops = { + .start = meson_gxbb_wdt_start, + .stop = meson_gxbb_wdt_stop, + .ping = meson_gxbb_wdt_ping, + .set_timeout = meson_gxbb_wdt_set_timeout, + .get_timeleft = meson_gxbb_wdt_get_timeleft, +}; + +static const struct watchdog_info meson_gxbb_wdt_info = { + .identity = "Meson GXBB Watchdog", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, +}; + +static int __maybe_unused meson_gxbb_wdt_resume(struct device *dev) +{ + struct meson_gxbb_wdt *data = dev_get_drvdata(dev); + + if (watchdog_active(&data->wdt_dev)) + meson_gxbb_wdt_start(&data->wdt_dev); + + return 0; +} + +static int __maybe_unused meson_gxbb_wdt_suspend(struct device *dev) +{ + struct meson_gxbb_wdt *data = dev_get_drvdata(dev); + + if (watchdog_active(&data->wdt_dev)) + meson_gxbb_wdt_stop(&data->wdt_dev); + + return 0; +} + +static const struct dev_pm_ops meson_gxbb_wdt_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(meson_gxbb_wdt_suspend, meson_gxbb_wdt_resume) +}; + +static const struct of_device_id meson_gxbb_wdt_dt_ids[] = { + { .compatible = "amlogic,meson-gxbb-wdt", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids); + +static int meson_gxbb_wdt_probe(struct platform_device *pdev) +{ + struct meson_gxbb_wdt *data; + struct resource *res; + int ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->reg_base)) + return PTR_ERR(data->reg_base); + + data->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->clk)) + return PTR_ERR(data->clk); + + clk_prepare_enable(data->clk); + + platform_set_drvdata(pdev, data); + + data->wdt_dev.parent = &pdev->dev; + data->wdt_dev.info = &meson_gxbb_wdt_info; + data->wdt_dev.ops = &meson_gxbb_wdt_ops; + data->wdt_dev.max_hw_heartbeat_ms = GXBB_WDT_TCNT_SETUP_MASK; + data->wdt_dev.min_timeout = 1; + data->wdt_dev.timeout = DEFAULT_TIMEOUT; + watchdog_set_drvdata(&data->wdt_dev, data); + + /* Setup with 1ms timebase */ + writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) | + GXBB_WDT_CTRL_EE_RESET | + GXBB_WDT_CTRL_CLK_EN | + GXBB_WDT_CTRL_CLKDIV_EN, + data->reg_base + GXBB_WDT_CTRL_REG); + + meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout); + + ret = watchdog_register_device(&data->wdt_dev); + if (ret) { + clk_disable_unprepare(data->clk); + return ret; + } + + return 0; +} + +static int meson_gxbb_wdt_remove(struct platform_device *pdev) +{ + struct meson_gxbb_wdt *data = platform_get_drvdata(pdev); + + watchdog_unregister_device(&data->wdt_dev); + + clk_disable_unprepare(data->clk); + + return 0; +} + +static void meson_gxbb_wdt_shutdown(struct platform_device *pdev) +{ + struct meson_gxbb_wdt *data = platform_get_drvdata(pdev); + + meson_gxbb_wdt_stop(&data->wdt_dev); +} + +static struct platform_driver meson_gxbb_wdt_driver = { + .probe = meson_gxbb_wdt_probe, + .remove = meson_gxbb_wdt_remove, + .shutdown = meson_gxbb_wdt_shutdown, + .driver = { + .name = "meson-gxbb-wdt", + .pm = &meson_gxbb_wdt_pm_ops, + .of_match_table = meson_gxbb_wdt_dt_ids, + }, +}; + +module_platform_driver(meson_gxbb_wdt_driver); + +MODULE_ALIAS("platform:meson-gxbb-wdt"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_DESCRIPTION("Amlogic Meson GXBB Watchdog timer driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index bd917bb757b8..a0fabf6f92b0 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -294,6 +294,8 @@ static const struct pci_device_id tco_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS, + PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* End of list */ diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c index e936f15dc7c7..3ad5206d7935 100644 --- a/drivers/watchdog/pcwd.c +++ b/drivers/watchdog/pcwd.c @@ -992,19 +992,7 @@ static struct isa_driver pcwd_isa_driver = { }, }; -static int __init pcwd_init_module(void) -{ - return isa_register_driver(&pcwd_isa_driver, PCWD_ISA_NR_CARDS); -} - -static void __exit pcwd_cleanup_module(void) -{ - isa_unregister_driver(&pcwd_isa_driver); - pr_info("Watchdog Module Unloaded\n"); -} - -module_init(pcwd_init_module); -module_exit(pcwd_cleanup_module); +module_isa_driver(pcwd_isa_driver, PCWD_ISA_NR_CARDS); MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, " "Wim Van Sebroeck <wim@iguana.be>"); diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c index 962f58c03353..c797305f8338 100644 --- a/drivers/watchdog/pic32-dmt.c +++ b/drivers/watchdog/pic32-dmt.c @@ -176,8 +176,8 @@ static int pic32_dmt_probe(struct platform_device *pdev) struct watchdog_device *wdd = &pic32_dmt_wdd; dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL); - if (IS_ERR(dmt)) - return PTR_ERR(dmt); + if (!dmt) + return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmt->regs = devm_ioremap_resource(&pdev->dev, mem); @@ -245,7 +245,6 @@ static struct platform_driver pic32_dmt_driver = { .remove = pic32_dmt_remove, .driver = { .name = "pic32-dmt", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(pic32_dmt_of_ids), } }; diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c index 6047aa89a4d3..e2761068dc6f 100644 --- a/drivers/watchdog/pic32-wdt.c +++ b/drivers/watchdog/pic32-wdt.c @@ -174,8 +174,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev) struct resource *mem; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); - if (IS_ERR(wdt)) - return PTR_ERR(wdt); + if (!wdt) + return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt->regs = devm_ioremap_resource(&pdev->dev, mem); @@ -183,8 +183,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev) return PTR_ERR(wdt->regs); wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10); - if (IS_ERR(wdt->rst_base)) - return PTR_ERR(wdt->rst_base); + if (!wdt->rst_base) + return -ENOMEM; wdt->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(wdt->clk)) { @@ -251,7 +251,6 @@ static struct platform_driver pic32_wdt_driver = { .remove = pic32_wdt_drv_remove, .driver = { .name = "pic32-wdt", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(pic32_wdt_dt_ids), } }; diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index a043fa4f60e5..5796b5d1b3f2 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -18,19 +18,45 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/watchdog.h> +#include <linux/of_device.h> + +enum wdt_reg { + WDT_RST, + WDT_EN, + WDT_STS, + WDT_BARK_TIME, + WDT_BITE_TIME, +}; -#define WDT_RST 0x38 -#define WDT_EN 0x40 -#define WDT_STS 0x44 -#define WDT_BITE_TIME 0x5C +static const u32 reg_offset_data_apcs_tmr[] = { + [WDT_RST] = 0x38, + [WDT_EN] = 0x40, + [WDT_STS] = 0x44, + [WDT_BARK_TIME] = 0x4C, + [WDT_BITE_TIME] = 0x5C, +}; + +static const u32 reg_offset_data_kpss[] = { + [WDT_RST] = 0x4, + [WDT_EN] = 0x8, + [WDT_STS] = 0xC, + [WDT_BARK_TIME] = 0x10, + [WDT_BITE_TIME] = 0x14, +}; struct qcom_wdt { struct watchdog_device wdd; struct clk *clk; unsigned long rate; void __iomem *base; + const u32 *layout; }; +static void __iomem *wdt_addr(struct qcom_wdt *wdt, enum wdt_reg reg) +{ + return wdt->base + wdt->layout[reg]; +} + static inline struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd) { @@ -41,10 +67,11 @@ static int qcom_wdt_start(struct watchdog_device *wdd) { struct qcom_wdt *wdt = to_qcom_wdt(wdd); - writel(0, wdt->base + WDT_EN); - writel(1, wdt->base + WDT_RST); - writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME); - writel(1, wdt->base + WDT_EN); + writel(0, wdt_addr(wdt, WDT_EN)); + writel(1, wdt_addr(wdt, WDT_RST)); + writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME)); + writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME)); + writel(1, wdt_addr(wdt, WDT_EN)); return 0; } @@ -52,7 +79,7 @@ static int qcom_wdt_stop(struct watchdog_device *wdd) { struct qcom_wdt *wdt = to_qcom_wdt(wdd); - writel(0, wdt->base + WDT_EN); + writel(0, wdt_addr(wdt, WDT_EN)); return 0; } @@ -60,7 +87,7 @@ static int qcom_wdt_ping(struct watchdog_device *wdd) { struct qcom_wdt *wdt = to_qcom_wdt(wdd); - writel(1, wdt->base + WDT_RST); + writel(1, wdt_addr(wdt, WDT_RST)); return 0; } @@ -83,10 +110,11 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action, */ timeout = 128 * wdt->rate / 1000; - writel(0, wdt->base + WDT_EN); - writel(1, wdt->base + WDT_RST); - writel(timeout, wdt->base + WDT_BITE_TIME); - writel(1, wdt->base + WDT_EN); + writel(0, wdt_addr(wdt, WDT_EN)); + writel(1, wdt_addr(wdt, WDT_RST)); + writel(timeout, wdt_addr(wdt, WDT_BARK_TIME)); + writel(timeout, wdt_addr(wdt, WDT_BITE_TIME)); + writel(1, wdt_addr(wdt, WDT_EN)); /* * Actually make sure the above sequence hits hardware before sleeping. @@ -119,9 +147,16 @@ static int qcom_wdt_probe(struct platform_device *pdev) struct qcom_wdt *wdt; struct resource *res; struct device_node *np = pdev->dev.of_node; + const u32 *regs; u32 percpu_offset; int ret; + regs = of_device_get_match_data(&pdev->dev); + if (!regs) { + dev_err(&pdev->dev, "Unsupported QCOM WDT module\n"); + return -ENODEV; + } + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; @@ -172,6 +207,7 @@ static int qcom_wdt_probe(struct platform_device *pdev) wdt->wdd.min_timeout = 1; wdt->wdd.max_timeout = 0x10000000U / wdt->rate; wdt->wdd.parent = &pdev->dev; + wdt->layout = regs; if (readl(wdt->base + WDT_STS) & 1) wdt->wdd.bootstatus = WDIOF_CARDRESET; @@ -208,8 +244,9 @@ static int qcom_wdt_remove(struct platform_device *pdev) } static const struct of_device_id qcom_wdt_of_table[] = { - { .compatible = "qcom,kpss-timer" }, - { .compatible = "qcom,scss-timer" }, + { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr }, + { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr }, + { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss }, { }, }; MODULE_DEVICE_TABLE(of, qcom_wdt_of_table); diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index ad383f6f15fc..ce0c38bd0f00 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -180,15 +180,6 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd) return 0; } -static unsigned int sbsa_gwdt_status(struct watchdog_device *wdd) -{ - struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); - u32 status = readl(gwdt->control_base + SBSA_GWDT_WCS); - - /* is the watchdog timer running? */ - return (status & SBSA_GWDT_WCS_EN) << WDOG_ACTIVE; -} - static int sbsa_gwdt_start(struct watchdog_device *wdd) { struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); @@ -228,7 +219,6 @@ static struct watchdog_ops sbsa_gwdt_ops = { .owner = THIS_MODULE, .start = sbsa_gwdt_start, .stop = sbsa_gwdt_stop, - .status = sbsa_gwdt_status, .ping = sbsa_gwdt_keepalive, .set_timeout = sbsa_gwdt_set_timeout, .get_timeleft = sbsa_gwdt_get_timeleft, @@ -273,7 +263,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) wdd->info = &sbsa_gwdt_info; wdd->ops = &sbsa_gwdt_ops; wdd->min_timeout = 1; - wdd->max_timeout = U32_MAX / gwdt->clk; + wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000; wdd->timeout = DEFAULT_TIMEOUT; watchdog_set_drvdata(wdd, gwdt); watchdog_set_nowayout(wdd, nowayout); @@ -283,6 +273,8 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) dev_warn(dev, "System reset by WDT.\n"); wdd->bootstatus |= WDIOF_CARDRESET; } + if (status & SBSA_GWDT_WCS_EN) + set_bit(WDOG_HW_RUNNING, &wdd->status); if (action) { irq = platform_get_irq(pdev, 0); @@ -310,7 +302,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) * the timeout is (WOR * 2), so the maximum timeout should be doubled. */ if (!action) - wdd->max_timeout *= 2; + wdd->max_hw_heartbeat_ms *= 2; watchdog_init_timeout(wdd, timeout, dev); /* diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c index d0578ab2e636..3050a0031479 100644 --- a/drivers/watchdog/sirfsoc_wdt.c +++ b/drivers/watchdog/sirfsoc_wdt.c @@ -39,13 +39,18 @@ MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)"); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +static void __iomem *sirfsoc_wdt_base(struct watchdog_device *wdd) +{ + return (void __iomem __force *)watchdog_get_drvdata(wdd); +} + static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd) { u32 counter, match; void __iomem *wdt_base; int time_left; - wdt_base = watchdog_get_drvdata(wdd); + wdt_base = sirfsoc_wdt_base(wdd); counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO); match = readl(wdt_base + SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2)); @@ -61,7 +66,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd) void __iomem *wdt_base; timeout_ticks = wdd->timeout * CLOCK_FREQ; - wdt_base = watchdog_get_drvdata(wdd); + wdt_base = sirfsoc_wdt_base(wdd); /* Enable the latch before reading the LATCH_LO register */ writel(1, wdt_base + SIRFSOC_TIMER_LATCH); @@ -79,7 +84,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd) static int sirfsoc_wdt_enable(struct watchdog_device *wdd) { - void __iomem *wdt_base = watchdog_get_drvdata(wdd); + void __iomem *wdt_base = sirfsoc_wdt_base(wdd); sirfsoc_wdt_updatetimeout(wdd); /* @@ -96,7 +101,7 @@ static int sirfsoc_wdt_enable(struct watchdog_device *wdd) static int sirfsoc_wdt_disable(struct watchdog_device *wdd) { - void __iomem *wdt_base = watchdog_get_drvdata(wdd); + void __iomem *wdt_base = sirfsoc_wdt_base(wdd); writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN); writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN) @@ -150,7 +155,7 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); - watchdog_set_drvdata(&sirfsoc_wdd, base); + watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base); watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev); watchdog_set_nowayout(&sirfsoc_wdd, nowayout); diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 99a06f9e3930..b067edf246df 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -17,36 +17,19 @@ * * Software only watchdog driver. Unlike its big brother the WDT501P * driver this won't always recover a failed machine. - * - * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> : - * Modularised. - * Added soft_margin; use upon insmod to change the timer delay. - * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate - * minors. - * - * 19980911 Alan Cox - * Made SMP safe for 2.3.x - * - * 20011127 Joel Becker (jlbec@evilplan.org> - * Added soft_noboot; Allows testing the softdog trigger without - * requiring a recompile. - * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT. - * - * 20020530 Joel Becker <joel.becker@oracle.com> - * Added Matt Domsch's nowayout module option. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/types.h> +#include <linux/reboot.h> #include <linux/timer.h> +#include <linux/types.h> #include <linux/watchdog.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/jiffies.h> -#include <linux/kernel.h> #define TIMER_MARGIN 60 /* Default is 60 seconds */ static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */ @@ -71,25 +54,12 @@ module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); -/* - * Our timer - */ - -static void watchdog_fire(unsigned long); - -static struct timer_list watchdog_ticktock = - TIMER_INITIALIZER(watchdog_fire, 0, 0); - -/* - * If the timer expires.. - */ - -static void watchdog_fire(unsigned long data) +static void softdog_fire(unsigned long data) { module_put(THIS_MODULE); - if (soft_noboot) + if (soft_noboot) { pr_crit("Triggered - Reboot ignored\n"); - else if (soft_panic) { + } else if (soft_panic) { pr_crit("Initiating panic\n"); panic("Software Watchdog Timer expired"); } else { @@ -99,35 +69,24 @@ static void watchdog_fire(unsigned long data) } } -/* - * Softdog operations - */ +static struct timer_list softdog_ticktock = + TIMER_INITIALIZER(softdog_fire, 0, 0); static int softdog_ping(struct watchdog_device *w) { - if (!mod_timer(&watchdog_ticktock, jiffies+(w->timeout*HZ))) + if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ))) __module_get(THIS_MODULE); return 0; } static int softdog_stop(struct watchdog_device *w) { - if (del_timer(&watchdog_ticktock)) + if (del_timer(&softdog_ticktock)) module_put(THIS_MODULE); return 0; } -static int softdog_set_timeout(struct watchdog_device *w, unsigned int t) -{ - w->timeout = t; - return 0; -} - -/* - * Kernel Interfaces - */ - static struct watchdog_info softdog_info = { .identity = "Software Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, @@ -137,29 +96,21 @@ static struct watchdog_ops softdog_ops = { .owner = THIS_MODULE, .start = softdog_ping, .stop = softdog_stop, - .set_timeout = softdog_set_timeout, }; static struct watchdog_device softdog_dev = { .info = &softdog_info, .ops = &softdog_ops, .min_timeout = 1, - .max_timeout = 0xFFFF + .max_timeout = 65535, + .timeout = TIMER_MARGIN, }; -static int __init watchdog_init(void) +static int __init softdog_init(void) { int ret; - /* Check that the soft_margin value is within it's range; - if not reset to the default */ - if (soft_margin < 1 || soft_margin > 65535) { - pr_info("soft_margin must be 0 < soft_margin < 65536, using %d\n", - TIMER_MARGIN); - return -EINVAL; - } - softdog_dev.timeout = soft_margin; - + watchdog_init_timeout(&softdog_dev, soft_margin, NULL); watchdog_set_nowayout(&softdog_dev, nowayout); watchdog_stop_on_reboot(&softdog_dev); @@ -167,19 +118,18 @@ static int __init watchdog_init(void) if (ret) return ret; - pr_info("Software Watchdog Timer: 0.08 initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n", - soft_noboot, soft_margin, soft_panic, nowayout); + pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n", + soft_noboot, softdog_dev.timeout, soft_panic, nowayout); return 0; } +module_init(softdog_init); -static void __exit watchdog_exit(void) +static void __exit softdog_exit(void) { watchdog_unregister_device(&softdog_dev); } - -module_init(watchdog_init); -module_exit(watchdog_exit); +module_exit(softdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c index cfbed7e051b6..202c4b9cc921 100644 --- a/drivers/watchdog/tangox_wdt.c +++ b/drivers/watchdog/tangox_wdt.c @@ -149,7 +149,7 @@ static int tangox_wdt_probe(struct platform_device *pdev) dev->wdt.ops = &tangox_wdt_ops; dev->wdt.timeout = DEFAULT_TIMEOUT; dev->wdt.min_timeout = 1; - dev->wdt.max_timeout = (U32_MAX - 1) / dev->clk_rate; + dev->wdt.max_hw_heartbeat_ms = (U32_MAX - 1) / dev->clk_rate; watchdog_init_timeout(&dev->wdt, timeout, &pdev->dev); watchdog_set_nowayout(&dev->wdt, nowayout); @@ -170,7 +170,7 @@ static int tangox_wdt_probe(struct platform_device *pdev) * already running. */ if (readl(dev->base + WD_COUNTER)) { - set_bit(WDOG_ACTIVE, &dev->wdt.status); + set_bit(WDOG_HW_RUNNING, &dev->wdt.status); tangox_wdt_start(&dev->wdt); } diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 7c3ba58ae1be..6abb83cd7681 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -88,7 +88,7 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd) * Check that we have valid min and max timeout values, if * not reset them both to 0 (=not used or unknown) */ - if (wdd->min_timeout > wdd->max_timeout) { + if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) { pr_info("Invalid min and max timeout values, resetting to 0!\n"); wdd->min_timeout = 0; wdd->max_timeout = 0; @@ -329,6 +329,43 @@ void watchdog_unregister_device(struct watchdog_device *wdd) EXPORT_SYMBOL_GPL(watchdog_unregister_device); +static void devm_watchdog_unregister_device(struct device *dev, void *res) +{ + watchdog_unregister_device(*(struct watchdog_device **)res); +} + +/** + * devm_watchdog_register_device() - resource managed watchdog_register_device() + * @dev: device that is registering this watchdog device + * @wdd: watchdog device + * + * Managed watchdog_register_device(). For watchdog device registered by this + * function, watchdog_unregister_device() is automatically called on driver + * detach. See watchdog_register_device() for more information. + */ +int devm_watchdog_register_device(struct device *dev, + struct watchdog_device *wdd) +{ + struct watchdog_device **rcwdd; + int ret; + + rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd), + GFP_KERNEL); + if (!rcwdd) + return -ENOMEM; + + ret = watchdog_register_device(wdd); + if (!ret) { + *rcwdd = wdd; + devres_add(dev, rcwdd); + } else { + devres_free(rcwdd); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_watchdog_register_device); + static int __init watchdog_deferred_registration(void) { mutex_lock(&wtd_deferred_reg_mutex); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 3595cffa24ea..040bf8382f46 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -69,6 +69,7 @@ struct watchdog_core_data { unsigned long status; /* Internal status bits */ #define _WDOG_DEV_OPEN 0 /* Opened ? */ #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */ +#define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */ }; /* the dev_t structure to store the dynamically allocated watchdog devices */ @@ -92,9 +93,13 @@ static inline bool watchdog_need_worker(struct watchdog_device *wdd) * thus is aware that the framework supports generating heartbeat * requests. * - Userspace requests a longer timeout than the hardware can handle. + * + * Alternatively, if userspace has not opened the watchdog + * device, we take care of feeding the watchdog if it is + * running. */ - return hm && ((watchdog_active(wdd) && t > hm) || - (t && !watchdog_active(wdd) && watchdog_hw_running(wdd))); + return (hm && watchdog_active(wdd) && t > hm) || + (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)); } static long watchdog_next_keepalive(struct watchdog_device *wdd) @@ -107,7 +112,7 @@ static long watchdog_next_keepalive(struct watchdog_device *wdd) unsigned int hw_heartbeat_ms; virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms); - hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms); + hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms); keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2); if (!watchdog_active(wdd)) @@ -180,6 +185,8 @@ static int watchdog_ping(struct watchdog_device *wdd) if (!watchdog_active(wdd) && !watchdog_hw_running(wdd)) return 0; + set_bit(_WDOG_KEEPALIVE, &wd_data->status); + wd_data->last_keepalive = jiffies; return __watchdog_ping(wdd); } @@ -219,6 +226,8 @@ static int watchdog_start(struct watchdog_device *wdd) if (watchdog_active(wdd)) return 0; + set_bit(_WDOG_KEEPALIVE, &wd_data->status); + started_at = jiffies; if (watchdog_hw_running(wdd) && wdd->ops->ping) err = wdd->ops->ping(wdd); @@ -258,10 +267,12 @@ static int watchdog_stop(struct watchdog_device *wdd) return -EBUSY; } - if (wdd->ops->stop) + if (wdd->ops->stop) { + clear_bit(WDOG_HW_RUNNING, &wdd->status); err = wdd->ops->stop(wdd); - else + } else { set_bit(WDOG_HW_RUNNING, &wdd->status); + } if (err == 0) { clear_bit(WDOG_ACTIVE, &wdd->status); @@ -282,10 +293,27 @@ static int watchdog_stop(struct watchdog_device *wdd) static unsigned int watchdog_get_status(struct watchdog_device *wdd) { - if (!wdd->ops->status) - return 0; + struct watchdog_core_data *wd_data = wdd->wd_data; + unsigned int status; + + if (wdd->ops->status) + status = wdd->ops->status(wdd); + else + status = wdd->bootstatus & (WDIOF_CARDRESET | + WDIOF_OVERHEAT | + WDIOF_FANFAULT | + WDIOF_EXTERN1 | + WDIOF_EXTERN2 | + WDIOF_POWERUNDER | + WDIOF_POWEROVER); - return wdd->ops->status(wdd); + if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status)) + status |= WDIOF_MAGICCLOSE; + + if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status)) + status |= WDIOF_KEEPALIVEPING; + + return status; } /* @@ -361,7 +389,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, status = watchdog_get_status(wdd); mutex_unlock(&wd_data->lock); - return sprintf(buf, "%u\n", status); + return sprintf(buf, "0x%x\n", status); } static DEVICE_ATTR_RO(status); @@ -429,9 +457,7 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, struct watchdog_device *wdd = dev_get_drvdata(dev); umode_t mode = attr->mode; - if (attr == &dev_attr_status.attr && !wdd->ops->status) - mode = 0; - else if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) + if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) mode = 0; return mode; @@ -948,17 +974,22 @@ int __init watchdog_dev_init(void) err = class_register(&watchdog_class); if (err < 0) { pr_err("couldn't register class\n"); - return err; + goto err_register; } err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog"); if (err < 0) { pr_err("watchdog: unable to allocate char dev region\n"); - class_unregister(&watchdog_class); - return err; + goto err_alloc; } return 0; + +err_alloc: + class_unregister(&watchdog_class); +err_register: + destroy_workqueue(watchdog_wq); + return err; } /* diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c index cbe373de3659..fa1efef3c96e 100644 --- a/drivers/watchdog/ziirave_wdt.c +++ b/drivers/watchdog/ziirave_wdt.c @@ -339,7 +339,7 @@ static int ziirave_wdt_remove(struct i2c_client *client) } static struct i2c_device_id ziirave_wdt_id[] = { - { "ziirave-wdt", 0 }, + { "rave-wdt", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ziirave_wdt_id); |