diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/thunderbolt/Kconfig | 1 | ||||
-rw-r--r-- | drivers/thunderbolt/domain.c | 18 | ||||
-rw-r--r-- | drivers/thunderbolt/icm.c | 33 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.h | 1 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 603 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 7 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 40 |
7 files changed, 680 insertions, 23 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index a9cc724985ad..f4869c38c7e4 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -6,6 +6,7 @@ menuconfig THUNDERBOLT select CRC32 select CRYPTO select CRYPTO_HASH + select NVMEM help Thunderbolt Controller driver. This driver is required if you want to hotplug Thunderbolt devices on Apple hardware or on PCs diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index f71b63e90016..9f2dcd48974d 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -426,6 +426,23 @@ err_free_tfm: return ret; } +/** + * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths + * @tb: Domain whose PCIe paths to disconnect + * + * This needs to be called in preparation for NVM upgrade of the host + * controller. Makes sure all PCIe paths are disconnected. + * + * Return %0 on success and negative errno in case of error. + */ +int tb_domain_disconnect_pcie_paths(struct tb *tb) +{ + if (!tb->cm_ops->disconnect_pcie_paths) + return -EPERM; + + return tb->cm_ops->disconnect_pcie_paths(tb); +} + int tb_domain_init(void) { return bus_register(&tb_bus_type); @@ -435,4 +452,5 @@ void tb_domain_exit(void) { bus_unregister(&tb_bus_type); ida_destroy(&tb_domain_ida); + tb_switch_exit(); } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 0ffa4ec249ac..8ee340290219 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -54,6 +54,7 @@ * where ICM needs to be started manually * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides * (only set when @upstream_port is not %NULL) + * @safe_mode: ICM is in safe mode * @is_supported: Checks if we can support ICM on this controller * @get_mode: Read and return the ICM firmware mode (optional) * @get_route: Find a route string for given switch @@ -65,6 +66,7 @@ struct icm { struct delayed_work rescan_work; struct pci_dev *upstream_port; int vnd_cap; + bool safe_mode; bool (*is_supported)(struct tb *tb); int (*get_mode)(struct tb *tb); int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); @@ -852,6 +854,10 @@ static int icm_firmware_init(struct tb *tb) ret = icm->get_mode(tb); switch (ret) { + case NHI_FW_SAFE_MODE: + icm->safe_mode = true; + break; + case NHI_FW_CM_MODE: /* Ask ICM to accept all Thunderbolt devices */ nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); @@ -879,12 +885,20 @@ static int icm_firmware_init(struct tb *tb) static int icm_driver_ready(struct tb *tb) { + struct icm *icm = tb_priv(tb); int ret; ret = icm_firmware_init(tb); if (ret) return ret; + if (icm->safe_mode) { + tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); + tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); + tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); + return 0; + } + return __icm_driver_ready(tb, &tb->security_level); } @@ -975,12 +989,23 @@ static void icm_complete(struct tb *tb) static int icm_start(struct tb *tb) { + struct icm *icm = tb_priv(tb); int ret; - tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); + if (icm->safe_mode) + tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); + else + tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); if (!tb->root_switch) return -ENODEV; + /* + * NVM upgrade has not been tested on Apple systems and they + * don't provide images publicly either. To be on the safe side + * prevent root switch NVM upgrade on Macs for now. + */ + tb->root_switch->no_nvm_upgrade = is_apple(); + ret = tb_switch_add(tb->root_switch); if (ret) tb_switch_put(tb->root_switch); @@ -998,6 +1023,11 @@ static void icm_stop(struct tb *tb) nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); } +static int icm_disconnect_pcie_paths(struct tb *tb) +{ + return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); +} + /* Falcon Ridge and Alpine Ridge */ static const struct tb_cm_ops icm_fr_ops = { .driver_ready = icm_driver_ready, @@ -1009,6 +1039,7 @@ static const struct tb_cm_ops icm_fr_ops = { .approve_switch = icm_fr_approve_switch, .add_switch_key = icm_fr_add_switch_key, .challenge_switch_key = icm_fr_challenge_switch_key, + .disconnect_pcie_paths = icm_disconnect_pcie_paths, }; struct tb *icm_probe(struct tb_nhi *nhi) diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 953864ae0ab3..5b5bb2c436be 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -155,6 +155,7 @@ enum nhi_fw_mode { enum nhi_mailbox_cmd { NHI_MAILBOX_SAVE_DEVS = 0x05, + NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, NHI_MAILBOX_DRV_UNLOADS = 0x07, NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, }; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 1524edf42ee8..ab3e8f410444 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -5,13 +5,395 @@ */ #include <linux/delay.h> +#include <linux/idr.h> +#include <linux/nvmem-provider.h> +#include <linux/sizes.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include "tb.h" /* Switch authorization from userspace is serialized by this lock */ static DEFINE_MUTEX(switch_lock); +/* Switch NVM support */ + +#define NVM_DEVID 0x05 +#define NVM_VERSION 0x08 +#define NVM_CSS 0x10 +#define NVM_FLASH_SIZE 0x45 + +#define NVM_MIN_SIZE SZ_32K +#define NVM_MAX_SIZE SZ_512K + +static DEFINE_IDA(nvm_ida); + +struct nvm_auth_status { + struct list_head list; + uuid_be uuid; + u32 status; +}; + +/* + * Hold NVM authentication failure status per switch This information + * needs to stay around even when the switch gets power cycled so we + * keep it separately. + */ +static LIST_HEAD(nvm_auth_status_cache); +static DEFINE_MUTEX(nvm_auth_status_lock); + +static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) +{ + struct nvm_auth_status *st; + + list_for_each_entry(st, &nvm_auth_status_cache, list) { + if (!uuid_be_cmp(st->uuid, *sw->uuid)) + return st; + } + + return NULL; +} + +static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) +{ + struct nvm_auth_status *st; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + mutex_unlock(&nvm_auth_status_lock); + + *status = st ? st->status : 0; +} + +static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) +{ + struct nvm_auth_status *st; + + if (WARN_ON(!sw->uuid)) + return; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + + if (!st) { + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto unlock; + + memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); + INIT_LIST_HEAD(&st->list); + list_add_tail(&st->list, &nvm_auth_status_cache); + } + + st->status = status; +unlock: + mutex_unlock(&nvm_auth_status_lock); +} + +static void nvm_clear_auth_status(const struct tb_switch *sw) +{ + struct nvm_auth_status *st; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + if (st) { + list_del(&st->list); + kfree(st); + } + mutex_unlock(&nvm_auth_status_lock); +} + +static int nvm_validate_and_write(struct tb_switch *sw) +{ + unsigned int image_size, hdr_size; + const u8 *buf = sw->nvm->buf; + u16 ds_size; + int ret; + + if (!buf) + return -EINVAL; + + image_size = sw->nvm->buf_data_size; + if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) + return -EINVAL; + + /* + * FARB pointer must point inside the image and must at least + * contain parts of the digital section we will be reading here. + */ + hdr_size = (*(u32 *)buf) & 0xffffff; + if (hdr_size + NVM_DEVID + 2 >= image_size) + return -EINVAL; + + /* Digital section start should be aligned to 4k page */ + if (!IS_ALIGNED(hdr_size, SZ_4K)) + return -EINVAL; + + /* + * Read digital section size and check that it also fits inside + * the image. + */ + ds_size = *(u16 *)(buf + hdr_size); + if (ds_size >= image_size) + return -EINVAL; + + if (!sw->safe_mode) { + u16 device_id; + + /* + * Make sure the device ID in the image matches the one + * we read from the switch config space. + */ + device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); + if (device_id != sw->config.device_id) + return -EINVAL; + + if (sw->generation < 3) { + /* Write CSS headers first */ + ret = dma_port_flash_write(sw->dma_port, + DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, + DMA_PORT_CSS_MAX_SIZE); + if (ret) + return ret; + } + + /* Skip headers in the image */ + buf += hdr_size; + image_size -= hdr_size; + } + + return dma_port_flash_write(sw->dma_port, 0, buf, image_size); +} + +static int nvm_authenticate_host(struct tb_switch *sw) +{ + int ret; + + /* + * Root switch NVM upgrade requires that we disconnect the + * existing PCIe paths first (in case it is not in safe mode + * already). + */ + if (!sw->safe_mode) { + ret = tb_domain_disconnect_pcie_paths(sw->tb); + if (ret) + return ret; + /* + * The host controller goes away pretty soon after this if + * everything goes well so getting timeout is expected. + */ + ret = dma_port_flash_update_auth(sw->dma_port); + return ret == -ETIMEDOUT ? 0 : ret; + } + + /* + * From safe mode we can get out by just power cycling the + * switch. + */ + dma_port_power_cycle(sw->dma_port); + return 0; +} + +static int nvm_authenticate_device(struct tb_switch *sw) +{ + int ret, retries = 10; + + ret = dma_port_flash_update_auth(sw->dma_port); + if (ret && ret != -ETIMEDOUT) + return ret; + + /* + * Poll here for the authentication status. It takes some time + * for the device to respond (we get timeout for a while). Once + * we get response the device needs to be power cycled in order + * to the new NVM to be taken into use. + */ + do { + u32 status; + + ret = dma_port_flash_update_auth_status(sw->dma_port, &status); + if (ret < 0 && ret != -ETIMEDOUT) + return ret; + if (ret > 0) { + if (status) { + tb_sw_warn(sw, "failed to authenticate NVM\n"); + nvm_set_auth_status(sw, status); + } + + tb_sw_info(sw, "power cycling the switch now\n"); + dma_port_power_cycle(sw->dma_port); + return 0; + } + + msleep(500); + } while (--retries); + + return -ETIMEDOUT; +} + +static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct tb_switch *sw = priv; + + return dma_port_flash_read(sw->dma_port, offset, val, bytes); +} + +static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct tb_switch *sw = priv; + int ret = 0; + + if (mutex_lock_interruptible(&switch_lock)) + return -ERESTARTSYS; + + /* + * Since writing the NVM image might require some special steps, + * for example when CSS headers are written, we cache the image + * locally here and handle the special cases when the user asks + * us to authenticate the image. + */ + if (!sw->nvm->buf) { + sw->nvm->buf = vmalloc(NVM_MAX_SIZE); + if (!sw->nvm->buf) { + ret = -ENOMEM; + goto unlock; + } + } + + sw->nvm->buf_data_size = offset + bytes; + memcpy(sw->nvm->buf + offset, val, bytes); + +unlock: + mutex_unlock(&switch_lock); + + return ret; +} + +static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, + size_t size, bool active) +{ + struct nvmem_config config; + + memset(&config, 0, sizeof(config)); + + if (active) { + config.name = "nvm_active"; + config.reg_read = tb_switch_nvm_read; + } else { + config.name = "nvm_non_active"; + config.reg_write = tb_switch_nvm_write; + } + + config.id = id; + config.stride = 4; + config.word_size = 4; + config.size = size; + config.dev = &sw->dev; + config.owner = THIS_MODULE; + config.root_only = true; + config.priv = sw; + + return nvmem_register(&config); +} + +static int tb_switch_nvm_add(struct tb_switch *sw) +{ + struct nvmem_device *nvm_dev; + struct tb_switch_nvm *nvm; + u32 val; + int ret; + + if (!sw->dma_port) + return 0; + + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!nvm) + return -ENOMEM; + + nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); + + /* + * If the switch is in safe-mode the only accessible portion of + * the NVM is the non-active one where userspace is expected to + * write new functional NVM. + */ + if (!sw->safe_mode) { + u32 nvm_size, hdr_size; + + ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, + sizeof(val)); + if (ret) + goto err_ida; + + hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; + nvm_size = (SZ_1M << (val & 7)) / 8; + nvm_size = (nvm_size - hdr_size) / 2; + + ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, + sizeof(val)); + if (ret) + goto err_ida; + + nvm->major = val >> 16; + nvm->minor = val >> 8; + + nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); + if (IS_ERR(nvm_dev)) { + ret = PTR_ERR(nvm_dev); + goto err_ida; + } + nvm->active = nvm_dev; + } + + nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); + if (IS_ERR(nvm_dev)) { + ret = PTR_ERR(nvm_dev); + goto err_nvm_active; + } + nvm->non_active = nvm_dev; + + mutex_lock(&switch_lock); + sw->nvm = nvm; + mutex_unlock(&switch_lock); + + return 0; + +err_nvm_active: + if (nvm->active) + nvmem_unregister(nvm->active); +err_ida: + ida_simple_remove(&nvm_ida, nvm->id); + kfree(nvm); + + return ret; +} + +static void tb_switch_nvm_remove(struct tb_switch *sw) +{ + struct tb_switch_nvm *nvm; + + mutex_lock(&switch_lock); + nvm = sw->nvm; + sw->nvm = NULL; + mutex_unlock(&switch_lock); + + if (!nvm) + return; + + /* Remove authentication status in case the switch is unplugged */ + if (!nvm->authenticating) + nvm_clear_auth_status(sw); + + nvmem_unregister(nvm->non_active); + if (nvm->active) + nvmem_unregister(nvm->active); + ida_simple_remove(&nvm_ida, nvm->id); + vfree(nvm->buf); + kfree(nvm); +} + /* port utility functions */ static const char *tb_port_type(struct tb_regs_port_header *port) @@ -448,6 +830,83 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(key); +static ssize_t nvm_authenticate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + u32 status; + + nvm_get_auth_status(sw, &status); + return sprintf(buf, "%#x\n", status); +} + +static ssize_t nvm_authenticate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct tb_switch *sw = tb_to_switch(dev); + bool val; + int ret; + + if (mutex_lock_interruptible(&switch_lock)) + return -ERESTARTSYS; + + /* If NVMem devices are not yet added */ + if (!sw->nvm) { + ret = -EAGAIN; + goto exit_unlock; + } + + ret = kstrtobool(buf, &val); + if (ret) + goto exit_unlock; + + /* Always clear the authentication status */ + nvm_clear_auth_status(sw); + + if (val) { + ret = nvm_validate_and_write(sw); + if (ret) + goto exit_unlock; + + sw->nvm->authenticating = true; + + if (!tb_route(sw)) + ret = nvm_authenticate_host(sw); + else + ret = nvm_authenticate_device(sw); + } + +exit_unlock: + mutex_unlock(&switch_lock); + + if (ret) + return ret; + return count; +} +static DEVICE_ATTR_RW(nvm_authenticate); + +static ssize_t nvm_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + int ret; + + if (mutex_lock_interruptible(&switch_lock)) + return -ERESTARTSYS; + + if (sw->safe_mode) + ret = -ENODATA; + else if (!sw->nvm) + ret = -EAGAIN; + else + ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); + + mutex_unlock(&switch_lock); + + return ret; +} +static DEVICE_ATTR_RO(nvm_version); + static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -480,6 +939,8 @@ static struct attribute *switch_attrs[] = { &dev_attr_device.attr, &dev_attr_device_name.attr, &dev_attr_key.attr, + &dev_attr_nvm_authenticate.attr, + &dev_attr_nvm_version.attr, &dev_attr_vendor.attr, &dev_attr_vendor_name.attr, &dev_attr_unique_id.attr, @@ -498,9 +959,14 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, sw->security_level == TB_SECURITY_SECURE) return attr->mode; return 0; + } else if (attr == &dev_attr_nvm_authenticate.attr || + attr == &dev_attr_nvm_version.attr) { + if (sw->dma_port) + return attr->mode; + return 0; } - return attr->mode; + return sw->safe_mode ? 0 : attr->mode; } static struct attribute_group switch_group = { @@ -652,6 +1118,45 @@ err_free_sw_ports: } /** + * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode + * @tb: Pointer to the owning domain + * @parent: Parent device for this switch + * @route: Route string for this switch + * + * This creates a switch in safe mode. This means the switch pretty much + * lacks all capabilities except DMA configuration port before it is + * flashed with a valid NVM firmware. + * + * The returned switch must be released by calling tb_switch_put(). + * + * Return: Pointer to the allocated switch or %NULL in case of failure + */ +struct tb_switch * +tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) +{ + struct tb_switch *sw; + + sw = kzalloc(sizeof(*sw), GFP_KERNEL); + if (!sw) + return NULL; + + sw->tb = tb; + sw->config.depth = tb_route_length(route); + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->safe_mode = true; + + device_initialize(&sw->dev); + sw->dev.parent = parent; + sw->dev.bus = &tb_bus_type; + sw->dev.type = &tb_switch_type; + sw->dev.groups = switch_groups; + dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); + + return sw; +} + +/** * tb_switch_configure() - Uploads configuration to the switch * @sw: Switch to configure * @@ -717,8 +1222,11 @@ static void tb_switch_set_uuid(struct tb_switch *sw) sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); } -static void tb_switch_add_dma_port(struct tb_switch *sw) +static int tb_switch_add_dma_port(struct tb_switch *sw) { + u32 status; + int ret; + switch (sw->generation) { case 3: break; @@ -726,14 +1234,49 @@ static void tb_switch_add_dma_port(struct tb_switch *sw) case 2: /* Only root switch can be upgraded */ if (tb_route(sw)) - return; + return 0; break; default: - return; + /* + * DMA port is the only thing available when the switch + * is in safe mode. + */ + if (!sw->safe_mode) + return 0; + break; } + if (sw->no_nvm_upgrade) + return 0; + sw->dma_port = dma_port_alloc(sw); + if (!sw->dma_port) + return 0; + + /* + * Check status of the previous flash authentication. If there + * is one we need to power cycle the switch in any case to make + * it functional again. + */ + ret = dma_port_flash_update_auth_status(sw->dma_port, &status); + if (ret <= 0) + return ret; + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); + tb_switch_set_uuid(sw); + nvm_set_auth_status(sw, status); + } + + tb_sw_info(sw, "power cycling the switch now\n"); + dma_port_power_cycle(sw->dma_port); + + /* + * We return error here which causes the switch adding failure. + * It should appear back after power cycle is complete. + */ + return -ESHUTDOWN; } /** @@ -759,29 +1302,41 @@ int tb_switch_add(struct tb_switch *sw) * to the userspace. NVM can be accessed through DMA * configuration based mailbox. */ - tb_switch_add_dma_port(sw); - - /* read drom */ - ret = tb_drom_read(sw); - if (ret) { - tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); + ret = tb_switch_add_dma_port(sw); + if (ret) return ret; - } - tb_sw_info(sw, "uid: %#llx\n", sw->uid); - tb_switch_set_uuid(sw); + if (!sw->safe_mode) { + /* read drom */ + ret = tb_drom_read(sw); + if (ret) { + tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); + return ret; + } + tb_sw_info(sw, "uid: %#llx\n", sw->uid); - for (i = 0; i <= sw->config.max_port_number; i++) { - if (sw->ports[i].disabled) { - tb_port_info(&sw->ports[i], "disabled by eeprom\n"); - continue; + tb_switch_set_uuid(sw); + + for (i = 0; i <= sw->config.max_port_number; i++) { + if (sw->ports[i].disabled) { + tb_port_info(&sw->ports[i], "disabled by eeprom\n"); + continue; + } + ret = tb_init_port(&sw->ports[i]); + if (ret) + return ret; } - ret = tb_init_port(&sw->ports[i]); - if (ret) - return ret; } - return device_add(&sw->dev); + ret = device_add(&sw->dev); + if (ret) + return ret; + + ret = tb_switch_nvm_add(sw); + if (ret) + device_del(&sw->dev); + + return ret; } /** @@ -808,6 +1363,7 @@ void tb_switch_remove(struct tb_switch *sw) if (!sw->is_unplugged) tb_plug_events_active(sw, false); + tb_switch_nvm_remove(sw); device_unregister(&sw->dev); } @@ -976,3 +1532,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) return NULL; } + +void tb_switch_exit(void) +{ + ida_destroy(&nvm_ida); +} diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index ad2304bad592..1b02ca0b6129 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -369,6 +369,13 @@ static int tb_start(struct tb *tb) if (!tb->root_switch) return -ENOMEM; + /* + * ICM firmware upgrade needs running firmware and in native + * mode that is not available so disable firmware upgrade of the + * root switch. + */ + tb->root_switch->no_nvm_upgrade = true; + ret = tb_switch_configure(tb->root_switch); if (ret) { tb_switch_put(tb->root_switch); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index a998b3a251d5..3d9f64676e58 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -7,6 +7,7 @@ #ifndef TB_H_ #define TB_H_ +#include <linux/nvmem-provider.h> #include <linux/pci.h> #include <linux/uuid.h> @@ -15,6 +16,30 @@ #include "dma_port.h" /** + * struct tb_switch_nvm - Structure holding switch NVM information + * @major: Major version number of the active NVM portion + * @minor: Minor version number of the active NVM portion + * @id: Identifier used with both NVM portions + * @active: Active portion NVMem device + * @non_active: Non-active portion NVMem device + * @buf: Buffer where the NVM image is stored before it is written to + * the actual NVM flash device + * @buf_data_size: Number of bytes actually consumed by the new NVM + * image + * @authenticating: The switch is authenticating the new NVM + */ +struct tb_switch_nvm { + u8 major; + u8 minor; + int id; + struct nvmem_device *active; + struct nvmem_device *non_active; + void *buf; + size_t buf_data_size; + bool authenticating; +}; + +/** * enum tb_security_level - Thunderbolt security level * @TB_SECURITY_NONE: No security, legacy mode * @TB_SECURITY_USER: User approval required at minimum @@ -39,7 +64,8 @@ enum tb_security_level { * @ports: Ports in this switch * @dma_port: If the switch has port supporting DMA configuration based * mailbox this will hold the pointer to that (%NULL - * otherwise). + * otherwise). If set it also means the switch has + * upgradeable NVM. * @tb: Pointer to the domain the switch belongs to * @uid: Unique ID of the switch * @uuid: UUID of the switch (or %NULL if not supported) @@ -51,6 +77,9 @@ enum tb_security_level { * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) + * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) + * @no_nvm_upgrade: Prevent NVM upgrade of this switch + * @safe_mode: The switch is in safe-mode * @authorized: Whether the switch is authorized by user or policy * @work: Work used to automatically authorize a switch * @security_level: Switch supported security level @@ -81,6 +110,9 @@ struct tb_switch { int cap_plug_events; bool is_unplugged; u8 *drom; + struct tb_switch_nvm *nvm; + bool no_nvm_upgrade; + bool safe_mode; unsigned int authorized; struct work_struct work; enum tb_security_level security_level; @@ -172,6 +204,7 @@ struct tb_path { * @approve_switch: Approve switch * @add_switch_key: Add key to switch * @challenge_switch_key: Challenge switch using key + * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); @@ -187,6 +220,7 @@ struct tb_cm_ops { int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, const u8 *challenge, u8 *response); + int (*disconnect_pcie_paths)(struct tb *tb); }; /** @@ -340,6 +374,7 @@ extern struct device_type tb_switch_type; int tb_domain_init(void); void tb_domain_exit(void); +void tb_switch_exit(void); struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); int tb_domain_add(struct tb *tb); @@ -351,6 +386,7 @@ void tb_domain_complete(struct tb *tb); int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); +int tb_domain_disconnect_pcie_paths(struct tb *tb); static inline void tb_domain_put(struct tb *tb) { @@ -359,6 +395,8 @@ static inline void tb_domain_put(struct tb *tb) struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, u64 route); +struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, + struct device *parent, u64 route); int tb_switch_configure(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); |