summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c18
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c21
-rw-r--r--drivers/iommu/s390-iommu.c23
-rw-r--r--drivers/net/dsa/mv88e6060.c114
-rw-r--r--drivers/net/dsa/mv88e6060.h111
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/dlink/Kconfig5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c55
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/icplus/Kconfig13
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2300
-rw-r--r--drivers/net/ethernet/icplus/ipg.h748
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c76
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c10
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/vitesse.c16
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/s390/cio/chsc.c37
-rw-r--r--drivers/s390/cio/chsc.h15
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/Makefile7
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
81 files changed, 1308 insertions, 3744 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad280b9..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
goto out_err;
}
- params_head = section_head->params;
+ params_head = section.params;
while (params_head) {
if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d464fb..306f75700bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@ struct amdgpu_clock {
* Fences.
*/
struct amdgpu_fence_driver {
- struct amdgpu_ring *ring;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
- struct delayed_work lockup_work;
+ struct timer_list fallback_timer;
wait_queue_head_t fence_queue;
};
@@ -917,8 +916,8 @@ struct amdgpu_ring {
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ struct amdgpu_bo *bo;
+ uint64_t addr;
};
struct amdgpu_vm_id {
@@ -926,8 +925,6 @@ struct amdgpu_vm_id {
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
- /* last use of vmid */
- struct fence *last_id_use;
};
struct amdgpu_vm {
@@ -957,24 +954,70 @@ struct amdgpu_vm {
/* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
+ /* for interval tree */
+ spinlock_t it_lock;
};
struct amdgpu_vm_manager {
- struct fence *active[AMDGPU_NUM_VM];
- uint32_t max_pfn;
+ struct {
+ struct fence *active;
+ atomic_long_t owner;
+ } ids[AMDGPU_NUM_VM];
+
+ uint32_t max_pfn;
/* number of VMIDs */
- unsigned nvm;
+ unsigned nvm;
/* vram base address for page table entry */
- u64 vram_base_offset;
+ u64 vram_base_offset;
/* is vm enabled? */
- bool enabled;
- /* for hw to save the PD addr on suspend/resume */
- uint32_t saved_table_addr[AMDGPU_NUM_VM];
+ bool enabled;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct amdgpu_ring *vm_pte_funcs_ring;
};
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *head);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync);
+void amdgpu_vm_flush(struct amdgpu_ring *ring,
+ struct amdgpu_vm *vm,
+ struct fence *updates);
+void amdgpu_vm_fence(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct fence *fence);
+uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ struct ttm_mem_reg *mem);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_free_job(struct amdgpu_job *job);
+
/*
* context related structures
*/
@@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser {
/* relocations */
struct amdgpu_bo_list_entry *vm_bos;
struct list_head validated;
+ struct fence *fence;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
@@ -1226,7 +1270,7 @@ struct amdgpu_job {
struct amdgpu_device *adev;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
- struct mutex job_lock;
+ void *owner;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *job);
};
@@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_ctx *ctx,
- struct amdgpu_ib *ibs,
- uint32_t num_ibs);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2358,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/*
- * vm
- */
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct list_head *head);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm,
- struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
-int amdgpu_vm_free_job(struct amdgpu_job *job);
-/*
* functions used by amdgpu_encoder.c
*/
struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..3afcf0237c25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_ctx *ctx,
- struct amdgpu_ib *ibs,
- uint32_t num_ibs)
-{
- struct amdgpu_cs_parser *parser;
- int i;
-
- parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
- if (!parser)
- return NULL;
-
- parser->adev = adev;
- parser->filp = filp;
- parser->ctx = ctx;
- parser->ibs = ibs;
- parser->num_ibs = num_ibs;
- for (i = 0; i < num_ibs; i++)
- ibs[i].ctx = ctx;
-
- return parser;
-}
-
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
+ unsigned i;
+
if (!error) {
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &parser->ibs[parser->num_ibs-1].fence->base);
+ &parser->validated,
+ parser->fence);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
-}
+ fence_put(parser->fence);
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
if (parser->bo_list)
@@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (!amdgpu_enable_scheduler)
- {
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
- if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
- }
-
- kfree(parser);
-}
-
-/**
- * cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
- amdgpu_cs_parser_fini_early(parser, error, backoff);
- amdgpu_cs_parser_fini_late(parser);
+ if (parser->ibs)
+ for (i = 0; i < parser->num_ibs; i++)
+ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+ kfree(parser->ibs);
+ if (parser->uf.bo)
+ drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
r = amdgpu_bo_vm_update_pte(parser, vm);
- if (r) {
- goto out;
- }
- amdgpu_cs_sync_rings(parser);
- if (!amdgpu_enable_scheduler)
- r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
- parser->filp);
+ if (!r)
+ amdgpu_cs_sync_rings(parser);
-out:
return r;
}
@@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_cs *cs = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_cs_parser *parser;
+ struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
int i, r;
if (!adev->accel_working)
return -EBUSY;
- parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
- if (!parser)
- return -ENOMEM;
- r = amdgpu_cs_parser_init(parser, data);
+ parser.adev = adev;
+ parser.filp = filp;
+
+ r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(parser, r, false);
+ amdgpu_cs_parser_fini(&parser, r, false);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
mutex_lock(&vm->mutex);
- r = amdgpu_cs_parser_relocs(parser);
+ r = amdgpu_cs_parser_relocs(&parser);
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
else if (!r) {
reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, parser);
+ r = amdgpu_cs_ib_fill(adev, &parser);
}
if (!r) {
- r = amdgpu_cs_dependencies(adev, parser);
+ r = amdgpu_cs_dependencies(adev, &parser);
if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
}
@@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r)
goto out;
- for (i = 0; i < parser->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
+ for (i = 0; i < parser.num_ibs; i++)
+ trace_amdgpu_cs(&parser, i);
- r = amdgpu_cs_ib_vm_chunk(adev, parser);
+ r = amdgpu_cs_ib_vm_chunk(adev, &parser);
if (r)
goto out;
- if (amdgpu_enable_scheduler && parser->num_ibs) {
+ if (amdgpu_enable_scheduler && parser.num_ibs) {
+ struct amdgpu_ring * ring = parser.ibs->ring;
+ struct amd_sched_fence *fence;
struct amdgpu_job *job;
- struct amdgpu_ring * ring = parser->ibs->ring;
+
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) {
r = -ENOMEM;
goto out;
}
+
job->base.sched = &ring->sched;
- job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
- job->adev = parser->adev;
- job->ibs = parser->ibs;
- job->num_ibs = parser->num_ibs;
- job->base.owner = parser->filp;
- mutex_init(&job->job_lock);
+ job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
+ job->adev = parser.adev;
+ job->owner = parser.filp;
+ job->free_job = amdgpu_cs_free_job;
+
+ job->ibs = parser.ibs;
+ job->num_ibs = parser.num_ibs;
+ parser.ibs = NULL;
+ parser.num_ibs = 0;
+
if (job->ibs[job->num_ibs - 1].user) {
- memcpy(&job->uf, &parser->uf,
- sizeof(struct amdgpu_user_fence));
+ job->uf = parser.uf;
job->ibs[job->num_ibs - 1].user = &job->uf;
+ parser.uf.bo = NULL;
}
- job->free_job = amdgpu_cs_free_job;
- mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job(&job->base);
- if (r) {
- mutex_unlock(&job->job_lock);
+ fence = amd_sched_fence_create(job->base.s_entity,
+ parser.filp);
+ if (!fence) {
+ r = -ENOMEM;
amdgpu_cs_free_job(job);
kfree(job);
goto out;
}
- cs->out.handle =
- amdgpu_ctx_add_fence(parser->ctx, ring,
- &job->base.s_fence->base);
- parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+ job->base.s_fence = fence;
+ parser.fence = fence_get(&fence->base);
- list_sort(NULL, &parser->validated, cmp_size_smaller_first);
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &job->base.s_fence->base);
+ cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
+ &fence->base);
+ job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
- mutex_unlock(&job->job_lock);
- amdgpu_cs_parser_fini_late(parser);
- mutex_unlock(&vm->mutex);
- return 0;
+ trace_amdgpu_cs_ioctl(job);
+ amd_sched_entity_push_job(&job->base);
+
+ } else {
+ struct amdgpu_fence *fence;
+
+ r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
+ parser.filp);
+ fence = parser.ibs[parser.num_ibs - 1].fence;
+ parser.fence = fence_get(&fence->base);
+ cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
}
- cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
- amdgpu_cs_parser_fini(parser, r, reserved_buffers);
+ amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
mutex_unlock(&vm->mutex);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d72205bb5..3671f9f220bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
* that the the relevant GPU caches have been flushed.
*/
+static struct kmem_cache *amdgpu_fence_slab;
+static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+
/**
* amdgpu_fence_write - write a fence value
*
@@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
}
/**
- * amdgpu_fence_schedule_check - schedule lockup check
- *
- * @ring: pointer to struct amdgpu_ring
- *
- * Queues a delayed work item to check for lockups.
- */
-static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
-{
- /*
- * Do not reset the timer here with mod_delayed_work,
- * this can livelock in an interaction with TTM delayed destroy.
- */
- queue_delayed_work(system_power_efficient_wq,
- &ring->fence_drv.lockup_work,
- AMDGPU_FENCE_JIFFIES_TIMEOUT);
-}
-
-/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
struct amdgpu_device *adev = ring->adev;
/* we are protected by the ring emission mutex */
- *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
@@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
(*fence)->seq,
AMDGPU_FENCE_FLAG_INT);
- trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
return 0;
}
/**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+ mod_timer(&ring->fence_drv.fallback_timer,
+ jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
* amdgpu_fence_activity - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
if (seq < last_emitted)
- amdgpu_fence_schedule_check(ring);
+ amdgpu_fence_schedule_fallback(ring);
return wake;
}
/**
- * amdgpu_fence_check_lockup - check for hardware lockup
+ * amdgpu_fence_process - process a fence
*
- * @work: delayed work item
+ * @adev: amdgpu_device pointer
+ * @ring: ring index the fence is associated with
*
- * Checks for fence activity and if there is none probe
- * the hardware if a lockup occured.
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
*/
-static void amdgpu_fence_check_lockup(struct work_struct *work)
+void amdgpu_fence_process(struct amdgpu_ring *ring)
{
- struct amdgpu_fence_driver *fence_drv;
- struct amdgpu_ring *ring;
-
- fence_drv = container_of(work, struct amdgpu_fence_driver,
- lockup_work.work);
- ring = fence_drv->ring;
-
if (amdgpu_fence_activity(ring))
wake_up_all(&ring->fence_drv.fence_queue);
}
/**
- * amdgpu_fence_process - process a fence
+ * amdgpu_fence_fallback - fallback for hardware interrupts
*
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
+ * @work: delayed work item
*
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * Checks for fence activity.
*/
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+static void amdgpu_fence_fallback(unsigned long arg)
{
- if (amdgpu_fence_activity(ring))
- wake_up_all(&ring->fence_drv.fence_queue);
+ struct amdgpu_ring *ring = (void *)arg;
+
+ amdgpu_fence_process(ring);
}
/**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
return 0;
- amdgpu_fence_schedule_check(ring);
+ amdgpu_fence_schedule_fallback(ring);
wait_event(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_seq_signaled(ring, seq))));
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
atomic64_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
- amdgpu_fence_check_lockup);
- ring->fence_drv.ring = ring;
+ setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
+ (unsigned long)ring);
init_waitqueue_head(&ring->fence_drv.fence_queue);
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
+ if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
+ amdgpu_fence_slab = kmem_cache_create(
+ "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_fence_slab)
+ return -ENOMEM;
+ }
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+ kmem_cache_destroy(amdgpu_fence_slab);
mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
amd_sched_fini(&ring->sched);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
fence->fence_wake.func = amdgpu_fence_check_signaled;
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f);
- amdgpu_fence_schedule_check(ring);
+ if (!timer_pending(&ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(ring);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
+static void amdgpu_fence_release(struct fence *f)
+{
+ struct amdgpu_fence *fence = to_amdgpu_fence(f);
+ kmem_cache_free(amdgpu_fence_slab, fence);
+}
+
const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
.signaled = amdgpu_fence_is_signaled,
.wait = fence_default_wait,
- .release = NULL,
+ .release = amdgpu_fence_release,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 087332858853..00c5b580f56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
+ r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
if (r)
@@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *rbo;
struct amdgpu_bo_va *bo_va;
+ struct ttm_validate_buffer tv, tv_pd;
+ struct ww_acquire_ctx ticket;
+ struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
int r = 0;
@@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
mutex_lock(&fpriv->vm.mutex);
rbo = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(rbo, false);
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
+ tv.bo = &rbo->tbo;
+ tv.shared = true;
+ list_add(&tv.head, &list);
+
+ if (args->operation == AMDGPU_VA_OP_MAP) {
+ tv_pd.bo = &fpriv->vm.page_directory->tbo;
+ tv_pd.shared = true;
+ list_add(&tv_pd.head, &list);
+ }
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj);
@@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ drm_gem_object_unreference_unlocked(gobj);
mutex_unlock(&fpriv->vm.mutex);
return -ENOENT;
}
@@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
-
+ ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
mutex_unlock(&fpriv->vm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e65987743871..9e25edafa721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
int r;
if (size) {
- r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+ r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
}
if (ib->vm)
- amdgpu_vm_fence(adev, ib->vm, ib->fence);
+ amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
amdgpu_ring_unlock_commit(ring);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff4567798..ea756e77b023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ struct amdgpu_sa_bo **sa_bo,
+ unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31dc194..8b88edb0434b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
return false;
}
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager,
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8aca680..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
- mutex_lock(&job->job_lock);
- r = amdgpu_ib_schedule(job->adev,
- job->num_ibs,
- job->ibs,
- job->base.owner);
+ trace_amdgpu_sched_run_job(job);
+ r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
@@ -61,8 +59,6 @@ err:
if (job->free_job)
job->free_job(job);
- mutex_unlock(&job->job_lock);
- fence_put(&job->base.s_fence->base);
kfree(job);
return fence ? &fence->base : NULL;
}
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
return -ENOMEM;
job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+ job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+ if (!job->base.s_fence) {
+ kfree(job);
+ return -ENOMEM;
+ }
+ *f = fence_get(&job->base.s_fence->base);
+
job->adev = adev;
job->ibs = ibs;
job->num_ibs = num_ibs;
- job->base.owner = owner;
- mutex_init(&job->job_lock);
+ job->owner = owner;
job->free_job = free_job;
- mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job(&job->base);
- if (r) {
- mutex_unlock(&job->job_lock);
- kfree(job);
- return r;
- }
- *f = fence_get(&job->base.s_fence->base);
- mutex_unlock(&job->job_lock);
+ amd_sched_entity_push_job(&job->base);
} else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52ec6fe..1caaf201b708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+ r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd05217..dd005c336c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
return -EINVAL;
}
- if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
- (count >= AMDGPU_NUM_SYNCS)) {
+ if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
+ r = fence_wait(&fence->base, true);
+ if (r)
+ return r;
+ continue;
+ }
+
+ if (count >= AMDGPU_NUM_SYNCS) {
/* not enough room, wait manually */
r = fence_wait(&fence->base, false);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf72a2e..8f9834ab1bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
__entry->fences)
);
+TRACE_EVENT(amdgpu_cs_ioctl,
+ TP_PROTO(struct amdgpu_job *job),
+ TP_ARGS(job),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_device *, adev)
+ __field(struct amd_sched_job *, sched_job)
+ __field(struct amdgpu_ib *, ib)
+ __field(struct fence *, fence)
+ __field(char *, ring_name)
+ __field(u32, num_ibs)
+ ),
+
+ TP_fast_assign(
+ __entry->adev = job->adev;
+ __entry->sched_job = &job->base;
+ __entry->ib = job->ibs;
+ __entry->fence = &job->base.s_fence->base;
+ __entry->ring_name = job->ibs[0].ring->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+ __entry->adev, __entry->sched_job, __entry->ib,
+ __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+TRACE_EVENT(amdgpu_sched_run_job,
+ TP_PROTO(struct amdgpu_job *job),
+ TP_ARGS(job),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_device *, adev)
+ __field(struct amd_sched_job *, sched_job)
+ __field(struct amdgpu_ib *, ib)
+ __field(struct fence *, fence)
+ __field(char *, ring_name)
+ __field(u32, num_ibs)
+ ),
+
+ TP_fast_assign(
+ __entry->adev = job->adev;
+ __entry->sched_job = &job->base;
+ __entry->ib = job->ibs;
+ __entry->fence = &job->base.s_fence->base;
+ __entry->ring_name = job->ibs[0].ring->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+ __entry->adev, __entry->sched_job, __entry->ib,
+ __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+
TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(unsigned vmid, int ring),
TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
);
-DECLARE_EVENT_CLASS(amdgpu_fence_request,
-
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
- TP_ARGS(dev, ring, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(int, ring)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->ring = ring;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, ring=%d, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
-
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
- TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
-
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
- TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
-
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
- TP_ARGS(dev, ring, seqno)
-);
-
DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9fc26d..d4bac5f49939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
ret = drm_mm_dump_table(m, mm);
spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM)
- seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
+ seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size,
- atomic64_read(&adev->vram_usage) >> 20,
- atomic64_read(&adev->vram_vis_usage) >> 20);
+ (u64)atomic64_read(&adev->vram_usage) >> 20,
+ (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a48560..159ce54bbd8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i;
/* check if the id is still valid */
- if (vm_id->id && vm_id->last_id_use &&
- vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
- trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
- return 0;
+ if (vm_id->id) {
+ unsigned id = vm_id->id;
+ long owner;
+
+ owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+ if (owner == (long)vm) {
+ trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+ return 0;
+ }
}
/* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.nvm; ++i) {
- struct fence *fence = adev->vm_manager.active[i];
+ struct fence *fence = adev->vm_manager.ids[i].active;
struct amdgpu_ring *fring;
if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (choices[i]) {
struct fence *fence;
- fence = adev->vm_manager.active[choices[i]];
+ fence = adev->vm_manager.ids[choices[i]].active;
vm_id->id = choices[i];
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct fence *flushed_updates = vm_id->flushed_updates;
- bool is_earlier = false;
-
- if (flushed_updates && updates) {
- BUG_ON(flushed_updates->context != updates->context);
- is_earlier = (updates->seqno - flushed_updates->seqno <=
- INT_MAX) ? true : false;
- }
+ bool is_later;
- if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
- is_earlier) {
+ if (!flushed_updates)
+ is_later = true;
+ else if (!updates)
+ is_later = false;
+ else
+ is_later = fence_is_later(updates, flushed_updates);
+ if (pd_addr != vm_id->pd_gpu_addr || is_later) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
- if (is_earlier) {
+ if (is_later) {
vm_id->flushed_updates = fence_get(updates);
fence_put(flushed_updates);
}
- if (!flushed_updates)
- vm_id->flushed_updates = fence_get(updates);
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
}
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
- unsigned ridx = fence->ring->idx;
- unsigned vm_id = vm->ids[ridx].id;
-
- fence_put(adev->vm_manager.active[vm_id]);
- adev->vm_manager.active[vm_id] = fence_get(&fence->base);
+ struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
+ unsigned vm_id = vm->ids[ring->idx].id;
- fence_put(vm->ids[ridx].last_id_use);
- vm->ids[ridx].last_id_use = fence_get(&fence->base);
+ fence_put(adev->vm_manager.ids[vm_id].active);
+ adev->vm_manager.ids[vm_id].active = fence_get(fence);
+ atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
}
/**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
*
* @adev: amdgpu_device pointer
* @bo: bo to clear
+ *
+ * need to reserve bo first before calling it.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr;
int r;
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- return r;
-
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
- goto error_unreserve;
+ goto error;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
- goto error_unreserve;
+ goto error;
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (!r)
amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
- if (amdgpu_enable_scheduler) {
- amdgpu_bo_unreserve(bo);
+ if (amdgpu_enable_scheduler)
return 0;
- }
+
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+error:
return r;
}
@@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
* Add a mapping of the BO at the specefied addr into the VM.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
- amdgpu_bo_unreserve(bo_va->bo);
+ size == 0 || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
- }
/* make sure object fit at this offset */
eaddr = saddr + size;
- if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
- amdgpu_bo_unreserve(bo_va->bo);
+ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
- }
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
- amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL;
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+ spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1);
- amdgpu_bo_unreserve(bo_va->bo);
r = -EINVAL;
goto error;
}
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
- amdgpu_bo_unreserve(bo_va->bo);
r = -ENOMEM;
goto error;
}
@@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
+ spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_map(bo_va, mapping);
/* Make sure the page tables are allocated */
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (eaddr > vm->max_pde_used)
vm->max_pde_used = eaddr;
- amdgpu_bo_unreserve(bo_va->bo);
-
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (vm->page_tables[pt_idx].bo)
continue;
- ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, resv, &pt);
- ww_mutex_unlock(&resv->lock);
if (r)
goto error_free;
@@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
error_free:
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
@@ -1119,7 +1110,7 @@ error:
* Remove a mapping of the BO at the specefied addr from the VM.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break;
}
- if (&mapping->list == &bo_va->invalids) {
- amdgpu_bo_unreserve(bo_va->bo);
+ if (&mapping->list == &bo_va->invalids)
return -ENOENT;
- }
}
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
list_add(&mapping->list, &vm->freed);
else
kfree(mapping);
- amdgpu_bo_unreserve(bo_va->bo);
return 0;
}
@@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
+ spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
+ spin_unlock(&vm->it_lock);
kfree(mapping);
}
@@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL;
- vm->ids[i].last_id_use = NULL;
}
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
@@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
-
+ spin_lock_init(&vm->it_lock);
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
NULL, NULL, &vm->page_directory);
if (r)
return r;
-
+ r = amdgpu_bo_reserve(vm->page_directory, false);
+ if (r) {
+ amdgpu_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
+ return r;
+ }
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+ amdgpu_bo_unreserve(vm->page_directory);
if (r) {
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
@@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
-
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ unsigned id = vm->ids[i].id;
+
+ atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
+ (long)vm, 0);
fence_put(vm->ids[i].flushed_updates);
- fence_put(vm->ids[i].last_id_use);
}
mutex_destroy(&vm->mutex);
}
+
+/**
+ * amdgpu_vm_manager_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < AMDGPU_NUM_VM; ++i)
+ fence_put(adev->vm_manager.ids[i].active);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5df8e7..57a2e347f04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf756d40..e1dcab98e249 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
- mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
- mmPCIE_INDEX, 0xffffffff, 0x0140001c,
- mmPCIE_DATA, 0x000f0000, 0x00000000,
- mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
- mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
- adev->gfx.config.max_texture_channel_caches = 8;
+ adev->gfx.config.max_texture_channel_caches = 16;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
}
case CHIP_FIJI:
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 18:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 19:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 20:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 23:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 26:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 30:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 7:
+ /* unused idx */
+ continue;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ break;
case CHIP_TONGA:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TONGA:
- case CHIP_FIJI:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x0000002A);
break;
+ case CHIP_FIJI:
+ amdgpu_ring_write(ring, 0x3a00161a);
+ amdgpu_ring_write(ring, 0x0000002e);
+ break;
case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc73fff..7427d8cd4c43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/boniare_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
/**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
WREG32(mmVM_L2_CNTL, tmp);
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
static int gmc_v7_0_sw_fini(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
- for (i = 0; i < AMDGPU_NUM_VM; ++i)
- fence_put(adev->vm_manager.active[i]);
+ amdgpu_vm_manager_fini(adev);
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
static int gmc_v7_0_suspend(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
- for (i = 0; i < AMDGPU_NUM_VM; ++i)
- fence_put(adev->vm_manager.active[i]);
+ amdgpu_vm_manager_fini(adev);
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e74e3b4..cb0e50ebb528 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
WREG32(mmVM_L2_CNTL, tmp);
tmp = RREG32(mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
static int gmc_v8_0_sw_fini(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
- for (i = 0; i < AMDGPU_NUM_VM; ++i)
- fence_put(adev->vm_manager.active[i]);
+ amdgpu_vm_manager_fini(adev);
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
static int gmc_v8_0_suspend(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
- for (i = 0; i < AMDGPU_NUM_VM; ++i)
- fence_put(adev->vm_manager.active[i]);
+ amdgpu_vm_manager_fini(adev);
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50acc971..c89dc777768f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
TP_ARGS(sched_job),
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
+ __field(struct amd_sched_job *, sched_job)
+ __field(struct fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
+ __entry->sched_job = sched_job;
+ __entry->fence = &sched_job->s_fence->base;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
- TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->name, __entry->job_count,
- __entry->hw_job_count)
+ TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
);
+
+TRACE_EVENT(amd_sched_process_job,
+ TP_PROTO(struct amd_sched_fence *fence),
+ TP_ARGS(fence),
+ TP_STRUCT__entry(
+ __field(struct fence *, fence)
+ ),
+
+ TP_fast_assign(
+ __entry->fence = &fence->base;
+ ),
+ TP_printk("fence=%p signaled", __entry->fence)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..ea30d6ad4c13 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@ static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+struct kmem_cache *sched_fence_slab;
+atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
@@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
*
* Returns 0 for success, negative error code otherwise.
*/
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
- struct amd_sched_fence *fence = amd_sched_fence_create(
- entity, sched_job->owner);
-
- if (!fence)
- return -ENOMEM;
-
- fence_get(&fence->base);
- sched_job->s_fence = fence;
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
trace_amd_sched_job(sched_job);
- return 0;
}
/**
@@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
list_del_init(&s_fence->list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
+ trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
atomic_set(&sched->hw_rq_count, 0);
+ if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+ }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
+ if (atomic_dec_and_test(&sched_fence_slab_ref))
+ kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9aced041..939692b14f4b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,6 +30,9 @@
struct amd_gpu_scheduler;
struct amd_sched_rq;
+extern struct kmem_cache *sched_fence_slab;
+extern atomic_t sched_fence_slab_ref;
+
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@@ -76,7 +79,6 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- void *owner;
};
extern const struct fence_ops amd_sched_fence_ops;
@@ -128,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
uint32_t jobs);
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638094f4..8d2130b9ff05 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
struct amd_sched_fence *fence = NULL;
unsigned seq;
- fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
@@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
return true;
}
+static void amd_sched_fence_release(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
const struct fence_ops amd_sched_fence_ops = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = NULL,
+ .release = amd_sched_fence_release,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068fe5d88..a7bf6a90eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
BUG_ON(pixels_current == pixels_prev);
+ if (!handle || !file_priv) {
+ mga_hide_cursor(mdev);
+ return 0;
+ }
+
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
goto out_unreserve1;
}
- if (!handle) {
- mga_hide_cursor(mdev);
- ret = 0;
- goto out1;
- }
-
/* Move cursor buffers into VRAM if they aren't already */
if (!pixels_1->pin_count) {
ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3024883b844..84d45633d28c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+ /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
+ */
+ if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
- bo->flags &= ~RADEON_GEM_GTT_WC;
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
- DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
- "better performance thanks to write-combining\n");
- bo->flags &= ~RADEON_GEM_GTT_WC;
+ if (bo->flags & RADEON_GEM_GTT_WC)
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#endif
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde23400..f4f03dcc1530 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
- if (!ret)
- rdev->pm.sysfs_initialized = true;
+ rdev->pm.sysfs_initialized = true;
}
mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46042e0..a82b891ae1fe 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f4768591e..265064c62d49 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
struct drm_connector *connector;
drm_for_each_connector(connector, crtc->dev) {
- if (connector && connector->state->crtc == crtc) {
+ if (connector->state->crtc == crtc) {
struct drm_encoder *encoder = connector->encoder;
struct vc4_encoder *vc4_encoder =
to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
dlist_next++;
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist);
+ (u32 __iomem *)vc4_crtc->dlist -
+ (u32 __iomem *)vc4->hvs->dlist);
/* Make the next display list start after ours. */
vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
* that will take too much.
*/
primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
- if (!primary_plane) {
+ if (IS_ERR(primary_plane)) {
dev_err(dev, "failed to construct primary plane\n");
ret = PTR_ERR(primary_plane);
goto err;
}
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
- if (!cursor_plane) {
+ if (IS_ERR(cursor_plane)) {
dev_err(dev, "failed to construct cursor plane\n");
ret = PTR_ERR(cursor_plane);
goto err_primary;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e730605edcc..d5db9e0f3b73 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = {
.remove = vc4_platform_drm_remove,
.driver = {
.name = "vc4-drm",
- .owner = THIS_MODULE,
.of_match_table = vc4_of_match,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f672a4..8098c5b21ba4 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
for (i = 0; i < 64; i += 4) {
DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
- ((uint32_t *)vc4->hvs->dlist)[i + 0],
- ((uint32_t *)vc4->hvs->dlist)[i + 1],
- ((uint32_t *)vc4->hvs->dlist)[i + 2],
- ((uint32_t *)vc4->hvs->dlist)[i + 3]);
+ readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
+ readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
+ readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
+ readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10c0147..887f3caad0be 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc;
}
-struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
+static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
@@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
return &vc4_state->base;
}
-void vc4_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void vc4_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
@@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
}
/* Called during init to allocate the plane's atomic state. */
-void vc4_plane_reset(struct drm_plane *plane)
+static void vc4_plane_reset(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
@@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
int crtc_w = state->crtc_w;
int crtc_h = state->crtc_h;
+ if (state->crtc_w << 16 != state->src_w ||
+ state->crtc_h << 16 != state->src_h) {
+ /* We don't support scaling yet, which involves
+ * allocating the LBM memory for scaling temporary
+ * storage, and putting filter kernels in the HVS
+ * context.
+ */
+ return -EINVAL;
+ }
+
if (crtc_x < 0) {
offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
crtc_w += crtc_x;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b0043ad94..8f59f057cdf4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@ config SENSORS_APPLESMC
config SENSORS_ARM_SCPI
tristate "ARM SCPI Sensors"
depends on ARM_SCPI_PROTOCOL
+ depends on THERMAL || !THERMAL_OF
help
This driver provides support for temperature, voltage, current
and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1471,6 +1472,7 @@ config SENSORS_INA209
config SENSORS_INA2XX
tristate "Texas Instruments INA219 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for INA219, INA220, INA226,
INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956941b1..0af7fd311979 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
static int applesmc_init_smcreg_try(void)
{
struct applesmc_registers *s = &smcreg;
- bool left_light_sensor, right_light_sensor;
+ bool left_light_sensor = 0, right_light_sensor = 0;
unsigned int count;
u8 tmp[1];
int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241bbf9af..7e20567bc369 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
- int ret;
+ int ret, idx;
scpi_ops = get_scpi_ops();
if (!scpi_ops)
@@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
scpi_sensors->scpi_ops = scpi_ops;
- for (i = 0; i < nr_sensors; i++) {
- struct sensor_data *sensor = &scpi_sensors->data[i];
+ for (i = 0, idx = 0; i < nr_sensors; i++) {
+ struct sensor_data *sensor = &scpi_sensors->data[idx];
ret = scpi_ops->sensor_get_info(i, &sensor->info);
if (ret)
@@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
num_power++;
break;
default:
- break;
+ continue;
}
sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
sensor->dev_attr_label.show = scpi_show_label;
sensor->dev_attr_label.attr.name = sensor->label;
- scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr;
- scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr;
+ scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
+ scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
- sysfs_attr_init(scpi_sensors->attrs[i << 1]);
- sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]);
+ sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
+ sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
+ idx++;
}
scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
zone->sensor_id = i;
zone->scpi_sensors = scpi_sensors;
- zone->tzd = thermal_zone_of_sensor_register(dev, i, zone,
- &scpi_sensor_ops);
+ zone->tzd = thermal_zone_of_sensor_register(dev,
+ sensor->info.sensor_id, zone, &scpi_sensor_ops);
/*
* The call to thermal_zone_of_sensor_register returns
* an error for sensors that are not associated with
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags, nr_pages, i;
+ unsigned long *entry;
int rc = 0;
if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
for (i = 0; i < nr_pages; i++) {
- dma_update_cpu_trans(s390_domain->dma_table, page_addr,
- dma_addr, flags);
+ entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto undo_cpu_trans;
+ }
+ dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
break;
}
spin_unlock(&s390_domain->list_lock);
+
+undo_cpu_trans:
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+ flags = ZPCI_PTE_INVALID;
+ while (i-- > 0) {
+ page_addr -= PAGE_SIZE;
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(s390_domain->dma_table,
+ dma_addr);
+ if (!entry)
+ break;
+ dma_update_cpu_trans(entry, page_addr, flags);
+ }
+ }
spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
return rc;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577755f6..0527f485c3dc 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>
-
-#define REG_PORT(p) (8 + (p))
-#define REG_GLOBAL 0x0f
+#include "mv88e6060.h"
static int reg_read(struct dsa_switch *ds, int addr, int reg)
{
@@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
if (bus == NULL)
return NULL;
- ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
+ ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
- if (ret == 0x0600)
+ if (ret == PORT_SWITCH_ID_6060)
return "Marvell 88E6060 (A0)";
- if (ret == 0x0601 || ret == 0x0602)
+ if (ret == PORT_SWITCH_ID_6060_R1 ||
+ ret == PORT_SWITCH_ID_6060_R2)
return "Marvell 88E6060 (B0)";
- if ((ret & 0xfff0) == 0x0600)
+ if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
return "Marvell 88E6060";
}
@@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
unsigned long timeout;
/* Set all ports to the disabled state. */
- for (i = 0; i < 6; i++) {
- ret = REG_READ(REG_PORT(i), 0x04);
- REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ for (i = 0; i < MV88E6060_PORTS; i++) {
+ ret = REG_READ(REG_PORT(i), PORT_CONTROL);
+ REG_WRITE(REG_PORT(i), PORT_CONTROL,
+ ret & ~PORT_CONTROL_STATE_MASK);
}
/* Wait for transmit queues to drain. */
usleep_range(2000, 4000);
/* Reset the switch. */
- REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
+ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_SWRESET |
+ GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+ GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & 0x8000) == 0x0000)
+ ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ if (ret & GLOBAL_STATUS_INIT_READY)
break;
usleep_range(1000, 2000);
@@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
/* Enable automatic address learning, set the address
* database size to 1024 entries, and set the default aging
* time to 5 minutes.
*/
- REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
+ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+ GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
return 0;
}
@@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
- REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
+ REG_WRITE(addr, PORT_CONTROL,
+ dsa_is_cpu_port(ds, p) ?
+ PORT_CONTROL_TRAILER |
+ PORT_CONTROL_INGRESS_MODE |
+ PORT_CONTROL_STATE_FORWARDING :
+ PORT_CONTROL_STATE_FORWARDING);
/* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
*/
- REG_WRITE(addr, 0x06,
- ((p & 0xf) << 12) |
- (dsa_is_cpu_port(ds, p) ?
- ds->phys_port_mask :
- (1 << ds->dst->cpu_port)));
+ REG_WRITE(addr, PORT_VLAN_MAP,
+ ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
+ (dsa_is_cpu_port(ds, p) ?
+ ds->phys_port_mask :
+ BIT(ds->dst->cpu_port)));
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
- REG_WRITE(addr, 0x0b, 1 << p);
+ REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
return 0;
}
@@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < MV88E6060_PORTS; i++) {
ret = mv88e6060_setup_port(ds, i);
if (ret < 0)
return ret;
@@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
{
- REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
- REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+ /* Use the same MAC Address as FD Pause frames for all ports */
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
return 0;
}
static int mv88e6060_port_to_phy_addr(int port)
{
- if (port >= 0 && port <= 5)
+ if (port >= 0 && port < MV88E6060_PORTS)
return port;
return -1;
}
@@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return reg_write(ds, addr, regnum, val);
}
-static void mv88e6060_poll_link(struct dsa_switch *ds)
-{
- int i;
-
- for (i = 0; i < DSA_MAX_PORTS; i++) {
- struct net_device *dev;
- int uninitialized_var(port_status);
- int link;
- int speed;
- int duplex;
- int fc;
-
- dev = ds->ports[i];
- if (dev == NULL)
- continue;
-
- link = 0;
- if (dev->flags & IFF_UP) {
- port_status = reg_read(ds, REG_PORT(i), 0x00);
- if (port_status < 0)
- continue;
-
- link = !!(port_status & 0x1000);
- }
-
- if (!link) {
- if (netif_carrier_ok(dev)) {
- netdev_info(dev, "link down\n");
- netif_carrier_off(dev);
- }
- continue;
- }
-
- speed = (port_status & 0x0100) ? 100 : 10;
- duplex = (port_status & 0x0200) ? 1 : 0;
- fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
-
- if (!netif_carrier_ok(dev)) {
- netdev_info(dev,
- "link up, %d Mb/s, %s duplex, flow control %sabled\n",
- speed,
- duplex ? "full" : "half",
- fc ? "en" : "dis");
- netif_carrier_on(dev);
- }
- }
-}
-
static struct dsa_switch_driver mv88e6060_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_TRAILER,
.probe = mv88e6060_probe,
@@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
.set_addr = mv88e6060_set_addr,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
- .poll_link = mv88e6060_poll_link,
};
static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000000..cc9b2ed4aff4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
+/*
+ * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
+ * Copyright (c) 2015 Neil Armstrong
+ *
+ * Based on mv88e6xxx.h
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6060_H
+#define __MV88E6060_H
+
+#define MV88E6060_PORTS 6
+
+#define REG_PORT(p) (0x8 + (p))
+#define PORT_STATUS 0x00
+#define PORT_STATUS_PAUSE_EN BIT(15)
+#define PORT_STATUS_MY_PAUSE BIT(14)
+#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
+#define PORT_STATUS_RESOLVED BIT(13)
+#define PORT_STATUS_LINK BIT(12)
+#define PORT_STATUS_PORTMODE BIT(11)
+#define PORT_STATUS_PHYMODE BIT(10)
+#define PORT_STATUS_DUPLEX BIT(9)
+#define PORT_STATUS_SPEED BIT(8)
+#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_6060 0x0600
+#define PORT_SWITCH_ID_6060_MASK 0xfff0
+#define PORT_SWITCH_ID_6060_R1 0x0601
+#define PORT_SWITCH_ID_6060_R2 0x0602
+#define PORT_CONTROL 0x04
+#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
+#define PORT_CONTROL_TRAILER BIT(14)
+#define PORT_CONTROL_HEADER BIT(11)
+#define PORT_CONTROL_INGRESS_MODE BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
+#define PORT_CONTROL_STATE_MASK 0x03
+#define PORT_CONTROL_STATE_DISABLED 0x00
+#define PORT_CONTROL_STATE_BLOCKING 0x01
+#define PORT_CONTROL_STATE_LEARNING 0x02
+#define PORT_CONTROL_STATE_FORWARDING 0x03
+#define PORT_VLAN_MAP 0x06
+#define PORT_VLAN_MAP_DBNUM_SHIFT 12
+#define PORT_VLAN_MAP_TABLE_MASK 0x1f
+#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
+#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
+#define PORT_RX_CNTR 0x10
+#define PORT_TX_CNTR 0x11
+
+#define REG_GLOBAL 0x0f
+#define GLOBAL_STATUS 0x00
+#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
+#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
+#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
+#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
+#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
+#define GLOBAL_STATUS_INIT_READY BIT(11)
+#define GLOBAL_STATUS_ATU_FULL BIT(3)
+#define GLOBAL_STATUS_ATU_DONE BIT(2)
+#define GLOBAL_STATUS_PHY_INT BIT(1)
+#define GLOBAL_STATUS_EEINT BIT(0)
+#define GLOBAL_MAC_01 0x01
+#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
+#define GLOBAL_MAC_23 0x02
+#define GLOBAL_MAC_45 0x03
+#define GLOBAL_CONTROL 0x04
+#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
+#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
+#define GLOBAL_CONTROL_CTRMODE BIT(8)
+#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
+#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
+#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
+#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
+#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
+#define GLOBAL_ATU_OP 0x0b
+#define GLOBAL_ATU_OP_BUSY BIT(15)
+#define GLOBAL_ATU_OP_NOP (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA 0x0c
+#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
+#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
+#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
+#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
+#define GLOBAL_ATU_MAC_01 0x0d
+#define GLOBAL_ATU_MAC_23 0x0e
+#define GLOBAL_ATU_MAC_45 0x0f
+
+#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa7597dab9..955d06b9cdba 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
-source "drivers/net/ethernet/icplus/Kconfig"
config JME
tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808110a1..4a2ee98738f0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
-obj-$(CONFIG_IP1000) += icplus/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5dbaff..c9b036789184 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* VF with OLD Hypervisor or old PF do not support filtering */
if (IS_PF(bp)) {
- if (CHIP_IS_E1x(bp))
+ if (chip_is_e1x)
bp->accept_any_vlan = true;
else
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97d7614..b89504405b72 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
#endif
/* For PCI-E Advanced Error Recovery (AER) Interface */
-static struct pci_error_handlers liquidio_err_handler = {
+static const struct pci_error_handlers liquidio_err_handler = {
.error_detected = liquidio_pcie_error_detected,
.mmio_enabled = liquidio_pcie_mmio_enabled,
.slot_reset = liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..7f709cbdcd87 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1583,8 +1583,14 @@ err_disable_device:
static void nicvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct nicvf *nic = netdev_priv(netdev);
- struct net_device *pnetdev = nic->pnicvf->netdev;
+ struct nicvf *nic;
+ struct net_device *pnetdev;
+
+ if (!netdev)
+ return;
+
+ nic = netdev_priv(netdev);
+ pnetdev = nic->pnicvf->netdev;
/* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d0b9d4..ebdc83247bb6 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
if NET_VENDOR_DLINK
config DL2K
- tristate "DL2000/TC902x-based Gigabit Ethernet support"
+ tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
depends on PCI
select CRC32
---help---
- This driver supports DL2000/TC902x-based Gigabit ethernet cards,
+ This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
which includes
D-Link DGE-550T Gigabit Ethernet Adapter.
D-Link DL2000-based Gigabit Ethernet Adapter.
Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
+ ICPlus IP1000A-based cards
To compile this driver as a module, choose M here: the
module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fcdaaaf..ccca4799c27b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_unmap_rx;
+ if (np->chip_id == CHIP_IP1000A &&
+ (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
+ /* PHY magic taken from ipg driver, undocumented registers */
+ mii_write(dev, np->phy_addr, 31, 0x0001);
+ mii_write(dev, np->phy_addr, 27, 0x01e0);
+ mii_write(dev, np->phy_addr, 31, 0x0002);
+ mii_write(dev, np->phy_addr, 27, 0xeb8e);
+ mii_write(dev, np->phy_addr, 31, 0x0000);
+ mii_write(dev, np->phy_addr, 30, 0x005e);
+ /* advertise 1000BASE-T half & full duplex, prefer MASTER */
+ mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
+ }
+
/* Fiber device? */
np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
@@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
for (i = 0; i < 6; i++)
dev->dev_addr[i] = psrom->mac_addr[i];
+ if (np->chip_id == CHIP_IP1000A) {
+ np->led_mode = psrom->led_mode;
+ return 0;
+ }
+
if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
return 0;
}
@@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
return 0;
}
+static void rio_set_led_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->ioaddr;
+ u32 mode;
+
+ if (np->chip_id != CHIP_IP1000A)
+ return;
+
+ mode = dr32(ASICCtrl);
+ mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
+
+ if (np->led_mode & 0x01)
+ mode |= IPG_AC_LED_MODE;
+ if (np->led_mode & 0x02)
+ mode |= IPG_AC_LED_MODE_BIT_1;
+ if (np->led_mode & 0x08)
+ mode |= IPG_AC_LED_SPEED;
+
+ dw32(ASICCtrl, mode);
+}
+
static int
rio_open (struct net_device *dev)
{
@@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
mdelay(10);
+ rio_set_led_mode(dev);
+
/* DebugCtrl bit 4, 5, 9 must set */
dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
@@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
alloc_list (dev);
- /* Get station address */
- for (i = 0; i < 6; i++)
- dw8(StationAddr0 + i, dev->dev_addr[i]);
+ /* Set station address */
+ /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
+ * too. However, it doesn't work on IP1000A so we use 16-bit access.
+ */
+ for (i = 0; i < 3; i++)
+ dw16(StationAddr0 + 2 * i,
+ cpu_to_le16(((u16 *)dev->dev_addr)[i]));
set_multicast (dev);
if (np->coalesce) {
@@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
break;
mdelay (1);
}
+ rio_set_led_mode(dev);
rio_free_tx (dev, 1);
/* Reset TFDListPtr */
dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
break;
mdelay (1);
}
+ rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
@@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
dev->name, int_status);
dw16(ASICCtrl + 2, GlobalReset | HostReset);
mdelay (500);
+ rio_set_led_mode(dev);
}
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b007069..8f4f61262d5c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
ResetBusy = 0x0400,
};
+#define IPG_AC_LED_MODE BIT(14)
+#define IPG_AC_LED_SPEED BIT(27)
+#define IPG_AC_LED_MODE_BIT_1 BIT(29)
+
/* Transmit Frame Control bits */
enum TFC_bits {
DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@ typedef struct t_SROM {
u16 asic_ctrl; /* 0x02 */
u16 sub_vendor_id; /* 0x04 */
u16 sub_system_id; /* 0x06 */
- u16 reserved1[12]; /* 0x08-0x1f */
+ u16 pci_base_1; /* 0x08 (IP1000A only) */
+ u16 pci_base_2; /* 0x0a (IP1000A only) */
+ u16 led_mode; /* 0x0c (IP1000A only) */
+ u16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
@@ -397,6 +404,7 @@ struct netdev_private {
u16 advertising; /* NWay media advertisement */
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
+ u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
};
/* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@ struct netdev_private {
class_mask of the class are honored during the comparison.
driver_data Data private to the driver.
*/
+#define CHIP_IP1000A 1
static const struct pci_device_id rio_pci_tbl[] = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
+ { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
+ { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
+ { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
{ }
};
MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e425853..734f655c99c1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
- struct be_rx_obj *rxo;
- int status = 0, i, j;
- u8 rsstable[128];
+ int status;
u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
if (rss_flags == adapter->rss_info.rss_flags)
- return status;
-
- if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rss_qs) {
- for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
- break;
- rsstable[j + i] = rxo->rss_id;
- }
- }
- }
+ return 0;
status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
- rss_flags, 128, adapter->rss_info.rss_hkey);
+ rss_flags, RSS_INDIR_TABLE_LEN,
+ adapter->rss_info.rss_hkey);
if (!status)
adapter->rss_info.rss_flags = rss_flags;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a977f8da..b6ad02909d6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
- 128, rss_key);
+ RSS_INDIR_TABLE_LEN, rss_key);
if (rc) {
rss->rss_flags = RSS_ENABLE_NONE;
return rc;
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9d2e26..000000000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# IC Plus device configuration
-#
-
-config IP1000
- tristate "IP1000 Gigabit Ethernet support"
- depends on PCI
- select MII
- ---help---
- This driver supports IP1000 gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1f36aa..000000000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the IC Plus device drivers
-#
-
-obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af83f070..000000000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
-/*
- * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
- *
- * Copyright (C) 2003, 2007 IC Plus Corp
- *
- * Original Author:
- *
- * Craig Rich
- * Sundance Technology, Inc.
- * www.sundanceti.com
- * craig_rich@sundanceti.com
- *
- * Current Maintainer:
- *
- * Sorbica Shieh.
- * http://www.icplus.com.tw
- * sorbica@icplus.com.tw
- *
- * Jesse Huang
- * http://www.icplus.com.tw
- * jesse@icplus.com.tw
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/crc32.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/mii.h>
-#include <linux/mutex.h>
-
-#include <asm/div64.h>
-
-#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
-#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
-#define IPG_RESET_MASK \
- (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
- IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
- IPG_AC_AUTO_INIT)
-
-#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
-#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
-#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
-
-#define ipg_r32(reg) ioread32(ioaddr + (reg))
-#define ipg_r16(reg) ioread16(ioaddr + (reg))
-#define ipg_r8(reg) ioread8(ioaddr + (reg))
-
-enum {
- netdev_io_size = 128
-};
-
-#include "ipg.h"
-#define DRV_NAME "ipg"
-
-MODULE_AUTHOR("IC Plus Corp. 2003");
-MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
-MODULE_LICENSE("GPL");
-
-/*
- * Defaults
- */
-#define IPG_MAX_RXFRAME_SIZE 0x0600
-#define IPG_RXFRAG_SIZE 0x0600
-#define IPG_RXSUPPORT_SIZE 0x0600
-#define IPG_IS_JUMBO false
-
-/*
- * Variable record -- index by leading revision/length
- * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
- */
-static const unsigned short DefaultPhyParam[] = {
- /* 11/12/03 IP1000A v1-3 rev=0x40 */
- /*--------------------------------------------------------------------------
- (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
- 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
- 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
- --------------------------------------------------------------------------*/
- /* 12/17/03 IP1000A v1-4 rev=0x40 */
- (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
- 0x0000,
- 30, 0x005e, 9, 0x0700,
- /* 01/09/04 IP1000A v1-5 rev=0x41 */
- (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
- 0x0000,
- 30, 0x005e, 9, 0x0700,
- 0x0000
-};
-
-static const char * const ipg_brand_name[] = {
- "IC PLUS IP1000 1000/100/10 based NIC",
- "Sundance Technology ST2021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
- "D-Link NIC IP1000A"
-};
-
-static const struct pci_device_id ipg_pci_tbl[] = {
- { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
- { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(DLINK, 0x9021), 2 },
- { PCI_VDEVICE(DLINK, 0x4020), 3 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
-
-static inline void __iomem *ipg_ioaddr(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- return sp->ioaddr;
-}
-
-#ifdef IPG_DEBUG
-static void ipg_dump_rfdlist(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
- u32 offset;
-
- IPG_DEBUG_MSG("_dump_rfdlist\n");
-
- netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
- netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
- netdev_info(dev, "RFDList start address = %016lx\n",
- (unsigned long)sp->rxd_map);
- netdev_info(dev, "RFDListPtr register = %08x%08x\n",
- ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
-
- for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
- offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
- netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
- i, offset, (unsigned long)sp->rxd[i].next_desc);
- offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
- netdev_info(dev, "%02x %04x RFS = %016lx\n",
- i, offset, (unsigned long)sp->rxd[i].rfs);
- offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
- netdev_info(dev, "%02x %04x frag_info = %016lx\n",
- i, offset, (unsigned long)sp->rxd[i].frag_info);
- }
-}
-
-static void ipg_dump_tfdlist(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
- u32 offset;
-
- IPG_DEBUG_MSG("_dump_tfdlist\n");
-
- netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
- netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
- netdev_info(dev, "TFDList start address = %016lx\n",
- (unsigned long) sp->txd_map);
- netdev_info(dev, "TFDListPtr register = %08x%08x\n",
- ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
-
- for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
- offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
- netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
- i, offset, (unsigned long)sp->txd[i].next_desc);
-
- offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
- netdev_info(dev, "%02x %04x TFC = %016lx\n",
- i, offset, (unsigned long) sp->txd[i].tfc);
- offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
- netdev_info(dev, "%02x %04x frag_info = %016lx\n",
- i, offset, (unsigned long) sp->txd[i].frag_info);
- }
-}
-#endif
-
-static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
-{
- ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
- ndelay(IPG_PC_PHYCTRLWAIT_NS);
-}
-
-static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
-{
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
-}
-
-static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
- phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
-
- ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
-}
-
-static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
- ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
- phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
-}
-
-static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
- u16 bit_data;
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
-
- bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
-
- return bit_data;
-}
-
-/*
- * Read a register from the Physical Layer device located
- * on the IPG NIC, using the IPG PHYCTRL register.
- */
-static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
-{
- void __iomem *ioaddr = ipg_ioaddr(dev);
- /*
- * The GMII mangement frame structure for a read is as follows:
- *
- * |Preamble|st|op|phyad|regad|ta| data |idle|
- * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
- *
- * <32 1s> = 32 consecutive logic 1 values
- * A = bit of Physical Layer device address (MSB first)
- * R = bit of register address (MSB first)
- * z = High impedance state
- * D = bit of read data (MSB first)
- *
- * Transmission order is 'Preamble' field first, bits transmitted
- * left to right (first to last).
- */
- struct {
- u32 field;
- unsigned int len;
- } p[] = {
- { GMII_PREAMBLE, 32 }, /* Preamble */
- { GMII_ST, 2 }, /* ST */
- { GMII_READ, 2 }, /* OP */
- { phy_id, 5 }, /* PHYAD */
- { phy_reg, 5 }, /* REGAD */
- { 0x0000, 2 }, /* TA */
- { 0x0000, 16 }, /* DATA */
- { 0x0000, 1 } /* IDLE */
- };
- unsigned int i, j;
- u8 polarity, data;
-
- polarity = ipg_r8(PHY_CTRL);
- polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
-
- /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
- for (j = 0; j < 5; j++) {
- for (i = 0; i < p[j].len; i++) {
- /* For each variable length field, the MSB must be
- * transmitted first. Rotate through the field bits,
- * starting with the MSB, and move each bit into the
- * the 1st (2^1) bit position (this is the bit position
- * corresponding to the MgmtData bit of the PhyCtrl
- * register for the IPG).
- *
- * Example: ST = 01;
- *
- * First write a '0' to bit 1 of the PhyCtrl
- * register, then write a '1' to bit 1 of the
- * PhyCtrl register.
- *
- * To do this, right shift the MSB of ST by the value:
- * [field length - 1 - #ST bits already written]
- * then left shift this result by 1.
- */
- data = (p[j].field >> (p[j].len - 1 - i)) << 1;
- data &= IPG_PC_MGMTDATA;
- data |= polarity | IPG_PC_MGMTDIR;
-
- ipg_drive_phy_ctl_low_high(ioaddr, data);
- }
- }
-
- send_three_state(ioaddr, polarity);
-
- read_phy_bit(ioaddr, polarity);
-
- /*
- * For a read cycle, the bits for the next two fields (TA and
- * DATA) are driven by the PHY (the IPG reads these bits).
- */
- for (i = 0; i < p[6].len; i++) {
- p[6].field |=
- (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
- }
-
- send_three_state(ioaddr, polarity);
- send_three_state(ioaddr, polarity);
- send_three_state(ioaddr, polarity);
- send_end(ioaddr, polarity);
-
- /* Return the value of the DATA field. */
- return p[6].field;
-}
-
-/*
- * Write to a register from the Physical Layer device located
- * on the IPG NIC, using the IPG PHYCTRL register.
- */
-static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
-{
- void __iomem *ioaddr = ipg_ioaddr(dev);
- /*
- * The GMII mangement frame structure for a read is as follows:
- *
- * |Preamble|st|op|phyad|regad|ta| data |idle|
- * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
- *
- * <32 1s> = 32 consecutive logic 1 values
- * A = bit of Physical Layer device address (MSB first)
- * R = bit of register address (MSB first)
- * z = High impedance state
- * D = bit of write data (MSB first)
- *
- * Transmission order is 'Preamble' field first, bits transmitted
- * left to right (first to last).
- */
- struct {
- u32 field;
- unsigned int len;
- } p[] = {
- { GMII_PREAMBLE, 32 }, /* Preamble */
- { GMII_ST, 2 }, /* ST */
- { GMII_WRITE, 2 }, /* OP */
- { phy_id, 5 }, /* PHYAD */
- { phy_reg, 5 }, /* REGAD */
- { 0x0002, 2 }, /* TA */
- { val & 0xffff, 16 }, /* DATA */
- { 0x0000, 1 } /* IDLE */
- };
- unsigned int i, j;
- u8 polarity, data;
-
- polarity = ipg_r8(PHY_CTRL);
- polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
-
- /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
- for (j = 0; j < 7; j++) {
- for (i = 0; i < p[j].len; i++) {
- /* For each variable length field, the MSB must be
- * transmitted first. Rotate through the field bits,
- * starting with the MSB, and move each bit into the
- * the 1st (2^1) bit position (this is the bit position
- * corresponding to the MgmtData bit of the PhyCtrl
- * register for the IPG).
- *
- * Example: ST = 01;
- *
- * First write a '0' to bit 1 of the PhyCtrl
- * register, then write a '1' to bit 1 of the
- * PhyCtrl register.
- *
- * To do this, right shift the MSB of ST by the value:
- * [field length - 1 - #ST bits already written]
- * then left shift this result by 1.
- */
- data = (p[j].field >> (p[j].len - 1 - i)) << 1;
- data &= IPG_PC_MGMTDATA;
- data |= polarity | IPG_PC_MGMTDIR;
-
- ipg_drive_phy_ctl_low_high(ioaddr, data);
- }
- }
-
- /* The last cycle is a tri-state, so read from the PHY. */
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
- ipg_r8(PHY_CTRL);
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
-}
-
-static void ipg_set_led_mode(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- u32 mode;
-
- mode = ipg_r32(ASIC_CTRL);
- mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
-
- if ((sp->led_mode & 0x03) > 1)
- mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
-
- if ((sp->led_mode & 0x01) == 1)
- mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
-
- if ((sp->led_mode & 0x08) == 8)
- mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
-
- ipg_w32(mode, ASIC_CTRL);
-}
-
-static void ipg_set_phy_set(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- int physet;
-
- physet = ipg_r8(PHY_SET);
- physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
- physet |= ((sp->led_mode & 0x70) >> 4);
- ipg_w8(physet, PHY_SET);
-}
-
-static int ipg_reset(struct net_device *dev, u32 resetflags)
-{
- /* Assert functional resets via the IPG AsicCtrl
- * register as specified by the 'resetflags' input
- * parameter.
- */
- void __iomem *ioaddr = ipg_ioaddr(dev);
- unsigned int timeout_count = 0;
-
- IPG_DEBUG_MSG("_reset\n");
-
- ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
-
- /* Delay added to account for problem with 10Mbps reset. */
- mdelay(IPG_AC_RESETWAIT);
-
- while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
- mdelay(IPG_AC_RESETWAIT);
- if (++timeout_count > IPG_AC_RESET_TIMEOUT)
- return -ETIME;
- }
- /* Set LED Mode in Asic Control */
- ipg_set_led_mode(dev);
-
- /* Set PHYSet Register Value */
- ipg_set_phy_set(dev);
- return 0;
-}
-
-/* Find the GMII PHY address. */
-static int ipg_find_phyaddr(struct net_device *dev)
-{
- unsigned int phyaddr, i;
-
- for (i = 0; i < 32; i++) {
- u32 status;
-
- /* Search for the correct PHY address among 32 possible. */
- phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
-
- /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
- GMII_PHY_ID1
- */
-
- status = mdio_read(dev, phyaddr, MII_BMSR);
-
- if ((status != 0xFFFF) && (status != 0))
- return phyaddr;
- }
-
- return 0x1f;
-}
-
-/*
- * Configure IPG based on result of IEEE 802.3 PHY
- * auto-negotiation.
- */
-static int ipg_config_autoneg(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int txflowcontrol;
- unsigned int rxflowcontrol;
- unsigned int fullduplex;
- u32 mac_ctrl_val;
- u32 asicctrl;
- u8 phyctrl;
- const char *speed;
- const char *duplex;
- const char *tx_desc;
- const char *rx_desc;
-
- IPG_DEBUG_MSG("_config_autoneg\n");
-
- asicctrl = ipg_r32(ASIC_CTRL);
- phyctrl = ipg_r8(PHY_CTRL);
- mac_ctrl_val = ipg_r32(MAC_CTRL);
-
- /* Set flags for use in resolving auto-negotiation, assuming
- * non-1000Mbps, half duplex, no flow control.
- */
- fullduplex = 0;
- txflowcontrol = 0;
- rxflowcontrol = 0;
-
- /* To accommodate a problem in 10Mbps operation,
- * set a global flag if PHY running in 10Mbps mode.
- */
- sp->tenmbpsmode = 0;
-
- /* Determine actual speed of operation. */
- switch (phyctrl & IPG_PC_LINK_SPEED) {
- case IPG_PC_LINK_SPEED_10MBPS:
- speed = "10Mbps";
- sp->tenmbpsmode = 1;
- break;
- case IPG_PC_LINK_SPEED_100MBPS:
- speed = "100Mbps";
- break;
- case IPG_PC_LINK_SPEED_1000MBPS:
- speed = "1000Mbps";
- break;
- default:
- speed = "undefined!";
- return 0;
- }
-
- netdev_info(dev, "Link speed = %s\n", speed);
- if (sp->tenmbpsmode == 1)
- netdev_info(dev, "10Mbps operational mode enabled\n");
-
- if (phyctrl & IPG_PC_DUPLEX_STATUS) {
- fullduplex = 1;
- txflowcontrol = 1;
- rxflowcontrol = 1;
- }
-
- /* Configure full duplex, and flow control. */
- if (fullduplex == 1) {
-
- /* Configure IPG for full duplex operation. */
-
- duplex = "full";
-
- mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
-
- if (txflowcontrol == 1) {
- tx_desc = "";
- mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
- } else {
- tx_desc = "no ";
- mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
- }
-
- if (rxflowcontrol == 1) {
- rx_desc = "";
- mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
- } else {
- rx_desc = "no ";
- mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
- }
- } else {
- duplex = "half";
- tx_desc = "no ";
- rx_desc = "no ";
- mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
- ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
- ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
- }
-
- netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
- duplex, tx_desc, rx_desc);
- ipg_w32(mac_ctrl_val, MAC_CTRL);
-
- return 0;
-}
-
-/* Determine and configure multicast operation and set
- * receive mode for IPG.
- */
-static void ipg_nic_set_multicast_list(struct net_device *dev)
-{
- void __iomem *ioaddr = ipg_ioaddr(dev);
- struct netdev_hw_addr *ha;
- unsigned int hashindex;
- u32 hashtable[2];
- u8 receivemode;
-
- IPG_DEBUG_MSG("_nic_set_multicast_list\n");
-
- receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
-
- if (dev->flags & IFF_PROMISC) {
- /* NIC to be configured in promiscuous mode. */
- receivemode = IPG_RM_RECEIVEALLFRAMES;
- } else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->flags & IFF_MULTICAST) &&
- (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
- /* NIC to be configured to receive all multicast
- * frames. */
- receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
- /* NIC to be configured to receive selected
- * multicast addresses. */
- receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
- }
-
- /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
- * The IPG applies a cyclic-redundancy-check (the same CRC
- * used to calculate the frame data FCS) to the destination
- * address all incoming multicast frames whose destination
- * address has the multicast bit set. The least significant
- * 6 bits of the CRC result are used as an addressing index
- * into the hash table. If the value of the bit addressed by
- * this index is a 1, the frame is passed to the host system.
- */
-
- /* Clear hashtable. */
- hashtable[0] = 0x00000000;
- hashtable[1] = 0x00000000;
-
- /* Cycle through all multicast addresses to filter. */
- netdev_for_each_mc_addr(ha, dev) {
- /* Calculate CRC result for each multicast address. */
- hashindex = crc32_le(0xffffffff, ha->addr,
- ETH_ALEN);
-
- /* Use only the least significant 6 bits. */
- hashindex = hashindex & 0x3F;
-
- /* Within "hashtable", set bit number "hashindex"
- * to a logic 1.
- */
- set_bit(hashindex, (void *)hashtable);
- }
-
- /* Write the value of the hashtable, to the 4, 16 bit
- * HASHTABLE IPG registers.
- */
- ipg_w32(hashtable[0], HASHTABLE_0);
- ipg_w32(hashtable[1], HASHTABLE_1);
-
- ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
-
- IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
-}
-
-static int ipg_io_config(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = ipg_ioaddr(dev);
- u32 origmacctrl;
- u32 restoremacctrl;
-
- IPG_DEBUG_MSG("_io_config\n");
-
- origmacctrl = ipg_r32(MAC_CTRL);
-
- restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
-
- /* Based on compilation option, determine if FCS is to be
- * stripped on receive frames by IPG.
- */
- if (!IPG_STRIP_FCS_ON_RX)
- restoremacctrl |= IPG_MC_RCV_FCS;
-
- /* Determine if transmitter and/or receiver are
- * enabled so we may restore MACCTRL correctly.
- */
- if (origmacctrl & IPG_MC_TX_ENABLED)
- restoremacctrl |= IPG_MC_TX_ENABLE;
-
- if (origmacctrl & IPG_MC_RX_ENABLED)
- restoremacctrl |= IPG_MC_RX_ENABLE;
-
- /* Transmitter and receiver must be disabled before setting
- * IFSSelect.
- */
- ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
- IPG_MC_RSVD_MASK, MAC_CTRL);
-
- /* Now that transmitter and receiver are disabled, write
- * to IFSSelect.
- */
- ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
-
- /* Set RECEIVEMODE register. */
- ipg_nic_set_multicast_list(dev);
-
- ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
-
- ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
- ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
- ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
- ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
- ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
- ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
- ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
- IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
- IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
- IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
- ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
- ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
-
- /* IPG multi-frag frame bug workaround.
- * Per silicon revision B3 eratta.
- */
- ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
-
- /* IPG TX poll now bug workaround.
- * Per silicon revision B3 eratta.
- */
- ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
-
- /* IPG RX poll now bug workaround.
- * Per silicon revision B3 eratta.
- */
- ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
-
- /* Now restore MACCTRL to original setting. */
- ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
-
- /* Disable unused RMON statistics. */
- ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
-
- /* Disable unused MIB statistics. */
- ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
- IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
- IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
- IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
- IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
- IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
-
- return 0;
-}
-
-/*
- * Create a receive buffer within system memory and update
- * NIC private structure appropriately.
- */
-static int ipg_get_rxbuff(struct net_device *dev, int entry)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- struct ipg_rx *rxfd = sp->rxd + entry;
- struct sk_buff *skb;
- u64 rxfragsize;
-
- IPG_DEBUG_MSG("_get_rxbuff\n");
-
- skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
- if (!skb) {
- sp->rx_buff[entry] = NULL;
- return -ENOMEM;
- }
-
- /* Save the address of the sk_buff structure. */
- sp->rx_buff[entry] = skb;
-
- rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
-
- /* Set the RFD fragment length. */
- rxfragsize = sp->rxfrag_size;
- rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
-
- return 0;
-}
-
-static int init_rfdlist(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
-
- IPG_DEBUG_MSG("_init_rfdlist\n");
-
- for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
- struct ipg_rx *rxfd = sp->rxd + i;
-
- if (sp->rx_buff[i]) {
- pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_irq(sp->rx_buff[i]);
- sp->rx_buff[i] = NULL;
- }
-
- /* Clear out the RFS field. */
- rxfd->rfs = 0x0000000000000000;
-
- if (ipg_get_rxbuff(dev, i) < 0) {
- /*
- * A receive buffer was not ready, break the
- * RFD list here.
- */
- IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
-
- /* Just in case we cannot allocate a single RFD.
- * Should not occur.
- */
- if (i == 0) {
- netdev_err(dev, "No memory available for RFD list\n");
- return -ENOMEM;
- }
- }
-
- rxfd->next_desc = cpu_to_le64(sp->rxd_map +
- sizeof(struct ipg_rx)*(i + 1));
- }
- sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
-
- sp->rx_current = 0;
- sp->rx_dirty = 0;
-
- /* Write the location of the RFDList to the IPG. */
- ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
- ipg_w32(0x00000000, RFD_LIST_PTR_1);
-
- return 0;
-}
-
-static void init_tfdlist(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
-
- IPG_DEBUG_MSG("_init_tfdlist\n");
-
- for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
- struct ipg_tx *txfd = sp->txd + i;
-
- txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
-
- if (sp->tx_buff[i]) {
- dev_kfree_skb_irq(sp->tx_buff[i]);
- sp->tx_buff[i] = NULL;
- }
-
- txfd->next_desc = cpu_to_le64(sp->txd_map +
- sizeof(struct ipg_tx)*(i + 1));
- }
- sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
-
- sp->tx_current = 0;
- sp->tx_dirty = 0;
-
- /* Write the location of the TFDList to the IPG. */
- IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
- (u32) sp->txd_map);
- ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
- ipg_w32(0x00000000, TFD_LIST_PTR_1);
-
- sp->reset_current_tfd = 1;
-}
-
-/*
- * Free all transmit buffers which have already been transferred
- * via DMA to the IPG.
- */
-static void ipg_nic_txfree(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- unsigned int released, pending, dirty;
-
- IPG_DEBUG_MSG("_nic_txfree\n");
-
- pending = sp->tx_current - sp->tx_dirty;
- dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
-
- for (released = 0; released < pending; released++) {
- struct sk_buff *skb = sp->tx_buff[dirty];
- struct ipg_tx *txfd = sp->txd + dirty;
-
- IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
-
- /* Look at each TFD's TFC field beginning
- * at the last freed TFD up to the current TFD.
- * If the TFDDone bit is set, free the associated
- * buffer.
- */
- if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
- break;
-
- /* Free the transmit buffer. */
- if (skb) {
- pci_unmap_single(sp->pdev,
- le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
- skb->len, PCI_DMA_TODEVICE);
-
- dev_kfree_skb_irq(skb);
-
- sp->tx_buff[dirty] = NULL;
- }
- dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
- }
-
- sp->tx_dirty += released;
-
- if (netif_queue_stopped(dev) &&
- (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
- netif_wake_queue(dev);
- }
-}
-
-static void ipg_tx_timeout(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
-
- ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
- IPG_AC_FIFO);
-
- spin_lock_irq(&sp->lock);
-
- /* Re-configure after DMA reset. */
- if (ipg_io_config(dev) < 0)
- netdev_info(dev, "Error during re-configuration\n");
-
- init_tfdlist(dev);
-
- spin_unlock_irq(&sp->lock);
-
- ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
- MAC_CTRL);
-}
-
-/*
- * For TxComplete interrupts, free all transmit
- * buffers which have already been transferred via DMA
- * to the IPG.
- */
-static void ipg_nic_txcleanup(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
-
- IPG_DEBUG_MSG("_nic_txcleanup\n");
-
- for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
- /* Reading the TXSTATUS register clears the
- * TX_COMPLETE interrupt.
- */
- u32 txstatusdword = ipg_r32(TX_STATUS);
-
- IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
-
- /* Check for Transmit errors. Error bits only valid if
- * TX_COMPLETE bit in the TXSTATUS register is a 1.
- */
- if (!(txstatusdword & IPG_TS_TX_COMPLETE))
- break;
-
- /* If in 10Mbps mode, indicate transmit is ready. */
- if (sp->tenmbpsmode) {
- netif_wake_queue(dev);
- }
-
- /* Transmit error, increment stat counters. */
- if (txstatusdword & IPG_TS_TX_ERROR) {
- IPG_DEBUG_MSG("Transmit error\n");
- sp->stats.tx_errors++;
- }
-
- /* Late collision, re-enable transmitter. */
- if (txstatusdword & IPG_TS_LATE_COLLISION) {
- IPG_DEBUG_MSG("Late collision on transmit\n");
- ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
- IPG_MC_RSVD_MASK, MAC_CTRL);
- }
-
- /* Maximum collisions, re-enable transmitter. */
- if (txstatusdword & IPG_TS_TX_MAX_COLL) {
- IPG_DEBUG_MSG("Maximum collisions on transmit\n");
- ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
- IPG_MC_RSVD_MASK, MAC_CTRL);
- }
-
- /* Transmit underrun, reset and re-enable
- * transmitter.
- */
- if (txstatusdword & IPG_TS_TX_UNDERRUN) {
- IPG_DEBUG_MSG("Transmitter underrun\n");
- sp->stats.tx_fifo_errors++;
- ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
- IPG_AC_NETWORK | IPG_AC_FIFO);
-
- /* Re-configure after DMA reset. */
- if (ipg_io_config(dev) < 0) {
- netdev_info(dev, "Error during re-configuration\n");
- }
- init_tfdlist(dev);
-
- ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
- IPG_MC_RSVD_MASK, MAC_CTRL);
- }
- }
-
- ipg_nic_txfree(dev);
-}
-
-/* Provides statistical information about the IPG NIC. */
-static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- u16 temp1;
- u16 temp2;
-
- IPG_DEBUG_MSG("_nic_get_stats\n");
-
- /* Check to see if the NIC has been initialized via nic_open,
- * before trying to read statistic registers.
- */
- if (!netif_running(dev))
- return &sp->stats;
-
- sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
- sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
- sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
- sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
- temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
- sp->stats.rx_errors += temp1;
- sp->stats.rx_missed_errors += temp1;
- temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
- ipg_r32(IPG_LATECOLLISIONS);
- temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
- sp->stats.collisions += temp1;
- sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
- sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
- ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
- sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
-
- /* detailed tx_errors */
- sp->stats.tx_carrier_errors += temp2;
-
- /* detailed rx_errors */
- sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
- ipg_r16(IPG_FRAMETOOLONGERRORS);
- sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
-
- /* Unutilized IPG statistic registers. */
- ipg_r32(IPG_MCSTFRAMESRCVDOK);
-
- return &sp->stats;
-}
-
-/* Restore used receive buffers. */
-static int ipg_nic_rxrestore(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- const unsigned int curr = sp->rx_current;
- unsigned int dirty = sp->rx_dirty;
-
- IPG_DEBUG_MSG("_nic_rxrestore\n");
-
- for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
- unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
-
- /* rx_copybreak may poke hole here and there. */
- if (sp->rx_buff[entry])
- continue;
-
- /* Generate a new receive buffer to replace the
- * current buffer (which will be released by the
- * Linux system).
- */
- if (ipg_get_rxbuff(dev, entry) < 0) {
- IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
-
- break;
- }
-
- /* Reset the RFS field. */
- sp->rxd[entry].rfs = 0x0000000000000000;
- }
- sp->rx_dirty = dirty;
-
- return 0;
-}
-
-/* use jumboindex and jumbosize to control jumbo frame status
- * initial status is jumboindex=-1 and jumbosize=0
- * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
- * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
- * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
- * previous receiving and need to continue dumping the current one
- */
-enum {
- NORMAL_PACKET,
- ERROR_PACKET
-};
-
-enum {
- FRAME_NO_START_NO_END = 0,
- FRAME_WITH_START = 1,
- FRAME_WITH_END = 10,
- FRAME_WITH_START_WITH_END = 11
-};
-
-static void ipg_nic_rx_free_skb(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
-
- if (sp->rx_buff[entry]) {
- struct ipg_rx *rxfd = sp->rxd + entry;
-
- pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_irq(sp->rx_buff[entry]);
- sp->rx_buff[entry] = NULL;
- }
-}
-
-static int ipg_nic_rx_check_frame_type(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
- int type = FRAME_NO_START_NO_END;
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
- type += FRAME_WITH_START;
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
- type += FRAME_WITH_END;
- return type;
-}
-
-static int ipg_nic_rx_check_error(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
- struct ipg_rx *rxfd = sp->rxd + entry;
-
- if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
- (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
- IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
- IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
- IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
- (unsigned long) rxfd->rfs);
-
- /* Increment general receive error statistic. */
- sp->stats.rx_errors++;
-
- /* Increment detailed receive error statistics. */
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
-
- sp->stats.rx_fifo_errors++;
- }
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occurred\n");
- sp->stats.rx_length_errors++;
- }
-
- /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
- * error count handled by a IPG statistic register.
- */
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occurred\n");
- sp->stats.rx_frame_errors++;
- }
-
- /* Do nothing for IPG_RFS_RXFCSERROR, error count
- * handled by a IPG statistic register.
- */
-
- /* Free the memory associated with the RX
- * buffer since it is erroneous and we will
- * not pass it to higher layer processes.
- */
- if (sp->rx_buff[entry]) {
- pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_irq(sp->rx_buff[entry]);
- sp->rx_buff[entry] = NULL;
- }
- return ERROR_PACKET;
- }
- return NORMAL_PACKET;
-}
-
-static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
- struct ipg_nic_private *sp,
- struct ipg_rx *rxfd, unsigned entry)
-{
- struct ipg_jumbo *jumbo = &sp->jumbo;
- struct sk_buff *skb;
- int framelen;
-
- if (jumbo->found_start) {
- dev_kfree_skb_irq(jumbo->skb);
- jumbo->found_start = 0;
- jumbo->current_size = 0;
- jumbo->skb = NULL;
- }
-
- /* 1: found error, 0 no error */
- if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
- return;
-
- skb = sp->rx_buff[entry];
- if (!skb)
- return;
-
- /* accept this frame and send to upper layer */
- framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
- if (framelen > sp->rxfrag_size)
- framelen = sp->rxfrag_size;
-
- skb_put(skb, framelen);
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
- netif_rx(skb);
- sp->rx_buff[entry] = NULL;
-}
-
-static void ipg_nic_rx_with_start(struct net_device *dev,
- struct ipg_nic_private *sp,
- struct ipg_rx *rxfd, unsigned entry)
-{
- struct ipg_jumbo *jumbo = &sp->jumbo;
- struct pci_dev *pdev = sp->pdev;
- struct sk_buff *skb;
-
- /* 1: found error, 0 no error */
- if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
- return;
-
- /* accept this frame and send to upper layer */
- skb = sp->rx_buff[entry];
- if (!skb)
- return;
-
- if (jumbo->found_start)
- dev_kfree_skb_irq(jumbo->skb);
-
- pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
- skb_put(skb, sp->rxfrag_size);
-
- jumbo->found_start = 1;
- jumbo->current_size = sp->rxfrag_size;
- jumbo->skb = skb;
-
- sp->rx_buff[entry] = NULL;
-}
-
-static void ipg_nic_rx_with_end(struct net_device *dev,
- struct ipg_nic_private *sp,
- struct ipg_rx *rxfd, unsigned entry)
-{
- struct ipg_jumbo *jumbo = &sp->jumbo;
-
- /* 1: found error, 0 no error */
- if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
- struct sk_buff *skb = sp->rx_buff[entry];
-
- if (!skb)
- return;
-
- if (jumbo->found_start) {
- int framelen, endframelen;
-
- framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
-
- endframelen = framelen - jumbo->current_size;
- if (framelen > sp->rxsupport_size)
- dev_kfree_skb_irq(jumbo->skb);
- else {
- memcpy(skb_put(jumbo->skb, endframelen),
- skb->data, endframelen);
-
- jumbo->skb->protocol =
- eth_type_trans(jumbo->skb, dev);
-
- skb_checksum_none_assert(jumbo->skb);
- netif_rx(jumbo->skb);
- }
- }
-
- jumbo->found_start = 0;
- jumbo->current_size = 0;
- jumbo->skb = NULL;
-
- ipg_nic_rx_free_skb(dev);
- } else {
- dev_kfree_skb_irq(jumbo->skb);
- jumbo->found_start = 0;
- jumbo->current_size = 0;
- jumbo->skb = NULL;
- }
-}
-
-static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
- struct ipg_nic_private *sp,
- struct ipg_rx *rxfd, unsigned entry)
-{
- struct ipg_jumbo *jumbo = &sp->jumbo;
-
- /* 1: found error, 0 no error */
- if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
- struct sk_buff *skb = sp->rx_buff[entry];
-
- if (skb) {
- if (jumbo->found_start) {
- jumbo->current_size += sp->rxfrag_size;
- if (jumbo->current_size <= sp->rxsupport_size) {
- memcpy(skb_put(jumbo->skb,
- sp->rxfrag_size),
- skb->data, sp->rxfrag_size);
- }
- }
- ipg_nic_rx_free_skb(dev);
- }
- } else {
- dev_kfree_skb_irq(jumbo->skb);
- jumbo->found_start = 0;
- jumbo->current_size = 0;
- jumbo->skb = NULL;
- }
-}
-
-static int ipg_nic_rx_jumbo(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- unsigned int curr = sp->rx_current;
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
-
- IPG_DEBUG_MSG("_nic_rx\n");
-
- for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
- unsigned int entry = curr % IPG_RFDLIST_LENGTH;
- struct ipg_rx *rxfd = sp->rxd + entry;
-
- if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
- break;
-
- switch (ipg_nic_rx_check_frame_type(dev)) {
- case FRAME_WITH_START_WITH_END:
- ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
- break;
- case FRAME_WITH_START:
- ipg_nic_rx_with_start(dev, sp, rxfd, entry);
- break;
- case FRAME_WITH_END:
- ipg_nic_rx_with_end(dev, sp, rxfd, entry);
- break;
- case FRAME_NO_START_NO_END:
- ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
- break;
- }
- }
-
- sp->rx_current = curr;
-
- if (i == IPG_MAXRFDPROCESS_COUNT) {
- /* There are more RFDs to process, however the
- * allocated amount of RFD processing time has
- * expired. Assert Interrupt Requested to make
- * sure we come back to process the remaining RFDs.
- */
- ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
- }
-
- ipg_nic_rxrestore(dev);
-
- return 0;
-}
-
-static int ipg_nic_rx(struct net_device *dev)
-{
- /* Transfer received Ethernet frames to higher network layers. */
- struct ipg_nic_private *sp = netdev_priv(dev);
- unsigned int curr = sp->rx_current;
- void __iomem *ioaddr = sp->ioaddr;
- struct ipg_rx *rxfd;
- unsigned int i;
-
- IPG_DEBUG_MSG("_nic_rx\n");
-
-#define __RFS_MASK \
- cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
-
- for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
- unsigned int entry = curr % IPG_RFDLIST_LENGTH;
- struct sk_buff *skb = sp->rx_buff[entry];
- unsigned int framelen;
-
- rxfd = sp->rxd + entry;
-
- if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
- break;
-
- /* Get received frame length. */
- framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
-
- /* Check for jumbo frame arrival with too small
- * RXFRAG_SIZE.
- */
- if (framelen > sp->rxfrag_size) {
- IPG_DEBUG_MSG
- ("RFS FrameLen > allocated fragment size\n");
-
- framelen = sp->rxfrag_size;
- }
-
- if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
- (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
- IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
- IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
-
- IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
- (unsigned long int) rxfd->rfs);
-
- /* Increment general receive error statistic. */
- sp->stats.rx_errors++;
-
- /* Increment detailed receive error statistics. */
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
- sp->stats.rx_fifo_errors++;
- }
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occurred\n");
- sp->stats.rx_length_errors++;
- }
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
- /* Do nothing, error count handled by a IPG
- * statistic register.
- */
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occurred\n");
- sp->stats.rx_frame_errors++;
- }
-
- if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
- /* Do nothing, error count handled by a IPG
- * statistic register.
- */
-
- /* Free the memory associated with the RX
- * buffer since it is erroneous and we will
- * not pass it to higher layer processes.
- */
- if (skb) {
- __le64 info = rxfd->frag_info;
-
- pci_unmap_single(sp->pdev,
- le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_irq(skb);
- }
- } else {
-
- /* Adjust the new buffer length to accommodate the size
- * of the received frame.
- */
- skb_put(skb, framelen);
-
- /* Set the buffer's protocol field to Ethernet. */
- skb->protocol = eth_type_trans(skb, dev);
-
- /* The IPG encountered an error with (or
- * there were no) IP/TCP/UDP checksums.
- * This may or may not indicate an invalid
- * IP/TCP/UDP frame was received. Let the
- * upper layer decide.
- */
- skb_checksum_none_assert(skb);
-
- /* Hand off frame for higher layer processing.
- * The function netif_rx() releases the sk_buff
- * when processing completes.
- */
- netif_rx(skb);
- }
-
- /* Assure RX buffer is not reused by IPG. */
- sp->rx_buff[entry] = NULL;
- }
-
- /*
- * If there are more RFDs to process and the allocated amount of RFD
- * processing time has expired, assert Interrupt Requested to make
- * sure we come back to process the remaining RFDs.
- */
- if (i == IPG_MAXRFDPROCESS_COUNT)
- ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
-
-#ifdef IPG_DEBUG
- /* Check if the RFD list contained no receive frame data. */
- if (!i)
- sp->EmptyRFDListCount++;
-#endif
- while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
- !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
- (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
- unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
-
- rxfd = sp->rxd + entry;
-
- IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
-
- /* An unexpected event, additional code needed to handle
- * properly. So for the time being, just disregard the
- * frame.
- */
-
- /* Free the memory associated with the RX
- * buffer since it is erroneous and we will
- * not pass it to higher layer processes.
- */
- if (sp->rx_buff[entry]) {
- pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_irq(sp->rx_buff[entry]);
- }
-
- /* Assure RX buffer is not reused by IPG. */
- sp->rx_buff[entry] = NULL;
- }
-
- sp->rx_current = curr;
-
- /* Check to see if there are a minimum number of used
- * RFDs before restoring any (should improve performance.)
- */
- if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
- ipg_nic_rxrestore(dev);
-
- return 0;
-}
-
-static void ipg_reset_after_host_error(struct work_struct *work)
-{
- struct ipg_nic_private *sp =
- container_of(work, struct ipg_nic_private, task.work);
- struct net_device *dev = sp->dev;
-
- /*
- * Acknowledge HostError interrupt by resetting
- * IPG DMA and HOST.
- */
- ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
-
- init_rfdlist(dev);
- init_tfdlist(dev);
-
- if (ipg_io_config(dev) < 0) {
- netdev_info(dev, "Cannot recover from PCI error\n");
- schedule_delayed_work(&sp->task, HZ);
- }
-}
-
-static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
-{
- struct net_device *dev = dev_inst;
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int handled = 0;
- u16 status;
-
- IPG_DEBUG_MSG("_interrupt_handler\n");
-
- if (sp->is_jumbo)
- ipg_nic_rxrestore(dev);
-
- spin_lock(&sp->lock);
-
- /* Get interrupt source information, and acknowledge
- * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
- * IntRequested, MacControlFrame, LinkEvent) interrupts
- * if issued. Also, all IPG interrupts are disabled by
- * reading IntStatusAck.
- */
- status = ipg_r16(INT_STATUS_ACK);
-
- IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
-
- /* Shared IRQ of remove event. */
- if (!(status & IPG_IS_RSVD_MASK))
- goto out_enable;
-
- handled = 1;
-
- if (unlikely(!netif_running(dev)))
- goto out_unlock;
-
- /* If RFDListEnd interrupt, restore all used RFDs. */
- if (status & IPG_IS_RFD_LIST_END) {
- IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
-
- /* The RFD list end indicates an RFD was encountered
- * with a 0 NextPtr, or with an RFDDone bit set to 1
- * (indicating the RFD is not read for use by the
- * IPG.) Try to restore all RFDs.
- */
- ipg_nic_rxrestore(dev);
-
-#ifdef IPG_DEBUG
- /* Increment the RFDlistendCount counter. */
- sp->RFDlistendCount++;
-#endif
- }
-
- /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
- * IntRequested interrupt, process received frames. */
- if ((status & IPG_IS_RX_DMA_PRIORITY) ||
- (status & IPG_IS_RFD_LIST_END) ||
- (status & IPG_IS_RX_DMA_COMPLETE) ||
- (status & IPG_IS_INT_REQUESTED)) {
-#ifdef IPG_DEBUG
- /* Increment the RFD list checked counter if interrupted
- * only to check the RFD list. */
- if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
- IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
- (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
- IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
- IPG_IS_UPDATE_STATS)))
- sp->RFDListCheckedCount++;
-#endif
-
- if (sp->is_jumbo)
- ipg_nic_rx_jumbo(dev);
- else
- ipg_nic_rx(dev);
- }
-
- /* If TxDMAComplete interrupt, free used TFDs. */
- if (status & IPG_IS_TX_DMA_COMPLETE)
- ipg_nic_txfree(dev);
-
- /* TxComplete interrupts indicate one of numerous actions.
- * Determine what action to take based on TXSTATUS register.
- */
- if (status & IPG_IS_TX_COMPLETE)
- ipg_nic_txcleanup(dev);
-
- /* If UpdateStats interrupt, update Linux Ethernet statistics */
- if (status & IPG_IS_UPDATE_STATS)
- ipg_nic_get_stats(dev);
-
- /* If HostError interrupt, reset IPG. */
- if (status & IPG_IS_HOST_ERROR) {
- IPG_DDEBUG_MSG("HostError Interrupt\n");
-
- schedule_delayed_work(&sp->task, 0);
- }
-
- /* If LinkEvent interrupt, resolve autonegotiation. */
- if (status & IPG_IS_LINK_EVENT) {
- if (ipg_config_autoneg(dev) < 0)
- netdev_info(dev, "Auto-negotiation error\n");
- }
-
- /* If MACCtrlFrame interrupt, do nothing. */
- if (status & IPG_IS_MAC_CTRL_FRAME)
- IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
-
- /* If RxComplete interrupt, do nothing. */
- if (status & IPG_IS_RX_COMPLETE)
- IPG_DEBUG_MSG("RxComplete interrupt\n");
-
- /* If RxEarly interrupt, do nothing. */
- if (status & IPG_IS_RX_EARLY)
- IPG_DEBUG_MSG("RxEarly interrupt\n");
-
-out_enable:
- /* Re-enable IPG interrupts. */
- ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
- IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
- IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
-out_unlock:
- spin_unlock(&sp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void ipg_rx_clear(struct ipg_nic_private *sp)
-{
- unsigned int i;
-
- for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
- if (sp->rx_buff[i]) {
- struct ipg_rx *rxfd = sp->rxd + i;
-
- dev_kfree_skb_irq(sp->rx_buff[i]);
- sp->rx_buff[i] = NULL;
- pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
- sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- }
- }
-}
-
-static void ipg_tx_clear(struct ipg_nic_private *sp)
-{
- unsigned int i;
-
- for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
- if (sp->tx_buff[i]) {
- struct ipg_tx *txfd = sp->txd + i;
-
- pci_unmap_single(sp->pdev,
- le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
- sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
-
- dev_kfree_skb_irq(sp->tx_buff[i]);
-
- sp->tx_buff[i] = NULL;
- }
- }
-}
-
-static int ipg_nic_open(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- struct pci_dev *pdev = sp->pdev;
- int rc;
-
- IPG_DEBUG_MSG("_nic_open\n");
-
- sp->rx_buf_sz = sp->rxsupport_size;
-
- /* Check for interrupt line conflicts, and request interrupt
- * line for IPG.
- *
- * IMPORTANT: Disable IPG interrupts prior to registering
- * IRQ.
- */
- ipg_w16(0x0000, INT_ENABLE);
-
- /* Register the interrupt line to be used by the IPG within
- * the Linux system.
- */
- rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
- dev->name, dev);
- if (rc < 0) {
- netdev_info(dev, "Error when requesting interrupt\n");
- goto out;
- }
-
- dev->irq = pdev->irq;
-
- rc = -ENOMEM;
-
- sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
- &sp->rxd_map, GFP_KERNEL);
- if (!sp->rxd)
- goto err_free_irq_0;
-
- sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
- &sp->txd_map, GFP_KERNEL);
- if (!sp->txd)
- goto err_free_rx_1;
-
- rc = init_rfdlist(dev);
- if (rc < 0) {
- netdev_info(dev, "Error during configuration\n");
- goto err_free_tx_2;
- }
-
- init_tfdlist(dev);
-
- rc = ipg_io_config(dev);
- if (rc < 0) {
- netdev_info(dev, "Error during configuration\n");
- goto err_release_tfdlist_3;
- }
-
- /* Resolve autonegotiation. */
- if (ipg_config_autoneg(dev) < 0)
- netdev_info(dev, "Auto-negotiation error\n");
-
- /* initialize JUMBO Frame control variable */
- sp->jumbo.found_start = 0;
- sp->jumbo.current_size = 0;
- sp->jumbo.skb = NULL;
-
- /* Enable transmit and receive operation of the IPG. */
- ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
- IPG_MC_RSVD_MASK, MAC_CTRL);
-
- netif_start_queue(dev);
-out:
- return rc;
-
-err_release_tfdlist_3:
- ipg_tx_clear(sp);
- ipg_rx_clear(sp);
-err_free_tx_2:
- dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
-err_free_rx_1:
- dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
-err_free_irq_0:
- free_irq(pdev->irq, dev);
- goto out;
-}
-
-static int ipg_nic_stop(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- struct pci_dev *pdev = sp->pdev;
-
- IPG_DEBUG_MSG("_nic_stop\n");
-
- netif_stop_queue(dev);
-
- IPG_DUMPTFDLIST(dev);
-
- do {
- (void) ipg_r16(INT_STATUS_ACK);
-
- ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
-
- synchronize_irq(pdev->irq);
- } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
-
- ipg_rx_clear(sp);
-
- ipg_tx_clear(sp);
-
- pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
- pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
-
- free_irq(pdev->irq, dev);
-
- return 0;
-}
-
-static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
- unsigned long flags;
- struct ipg_tx *txfd;
-
- IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
-
- /* If in 10Mbps mode, stop the transmit queue so
- * no more transmit frames are accepted.
- */
- if (sp->tenmbpsmode)
- netif_stop_queue(dev);
-
- if (sp->reset_current_tfd) {
- sp->reset_current_tfd = 0;
- entry = 0;
- }
-
- txfd = sp->txd + entry;
-
- sp->tx_buff[entry] = skb;
-
- /* Clear all TFC fields, except TFDDONE. */
- txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
-
- /* Specify the TFC field within the TFD. */
- txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
- (IPG_TFC_FRAMEID & sp->tx_current) |
- (IPG_TFC_FRAGCOUNT & (1 << 24)));
- /*
- * 16--17 (WordAlign) <- 3 (disable),
- * 0--15 (FrameId) <- sp->tx_current,
- * 24--27 (FragCount) <- 1
- */
-
- /* Request TxComplete interrupts at an interval defined
- * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
- * Request TxComplete interrupt for every frame
- * if in 10Mbps mode to accommodate problem with 10Mbps
- * processing.
- */
- if (sp->tenmbpsmode)
- txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
- txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
- /* Based on compilation option, determine if FCS is to be
- * appended to transmit frame by IPG.
- */
- if (!(IPG_APPEND_FCS_ON_TX))
- txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
-
- /* Based on compilation option, determine if IP, TCP and/or
- * UDP checksums are to be added to transmit frame by IPG.
- */
- if (IPG_ADD_IPCHECKSUM_ON_TX)
- txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
-
- if (IPG_ADD_TCPCHECKSUM_ON_TX)
- txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
-
- if (IPG_ADD_UDPCHECKSUM_ON_TX)
- txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
-
- /* Based on compilation option, determine if VLAN tag info is to be
- * inserted into transmit frame by IPG.
- */
- if (IPG_INSERT_MANUAL_VLAN_TAG) {
- txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
- ((u64) IPG_MANUAL_VLAN_VID << 32) |
- ((u64) IPG_MANUAL_VLAN_CFI << 44) |
- ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
- }
-
- /* The fragment start location within system memory is defined
- * by the sk_buff structure's data field. The physical address
- * of this location within the system's virtual memory space
- * is determined using the IPG_HOST2BUS_MAP function.
- */
- txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE));
-
- /* The length of the fragment within system memory is defined by
- * the sk_buff structure's len field.
- */
- txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
- ((u64) (skb->len & 0xffff) << 48));
-
- /* Clear the TFDDone bit last to indicate the TFD is ready
- * for transfer to the IPG.
- */
- txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
-
- spin_lock_irqsave(&sp->lock, flags);
-
- sp->tx_current++;
-
- mmiowb();
-
- ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
-
- if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
- netif_stop_queue(dev);
-
- spin_unlock_irqrestore(&sp->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-static void ipg_set_phy_default_param(unsigned char rev,
- struct net_device *dev, int phy_address)
-{
- unsigned short length;
- unsigned char revision;
- const unsigned short *phy_param;
- unsigned short address, value;
-
- phy_param = &DefaultPhyParam[0];
- length = *phy_param & 0x00FF;
- revision = (unsigned char)((*phy_param) >> 8);
- phy_param++;
- while (length != 0) {
- if (rev == revision) {
- while (length > 1) {
- address = *phy_param;
- value = *(phy_param + 1);
- phy_param += 2;
- mdio_write(dev, phy_address, address, value);
- length -= 4;
- }
- break;
- } else {
- phy_param += length / 2;
- length = *phy_param & 0x00FF;
- revision = (unsigned char)((*phy_param) >> 8);
- phy_param++;
- }
- }
-}
-
-static int read_eeprom(struct net_device *dev, int eep_addr)
-{
- void __iomem *ioaddr = ipg_ioaddr(dev);
- unsigned int i;
- int ret = 0;
- u16 value;
-
- value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
- ipg_w16(value, EEPROM_CTRL);
-
- for (i = 0; i < 1000; i++) {
- u16 data;
-
- mdelay(10);
- data = ipg_r16(EEPROM_CTRL);
- if (!(data & IPG_EC_EEPROM_BUSY)) {
- ret = ipg_r16(EEPROM_DATA);
- break;
- }
- }
- return ret;
-}
-
-static void ipg_init_mii(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- struct mii_if_info *mii_if = &sp->mii_if;
- int phyaddr;
-
- mii_if->dev = dev;
- mii_if->mdio_read = mdio_read;
- mii_if->mdio_write = mdio_write;
- mii_if->phy_id_mask = 0x1f;
- mii_if->reg_num_mask = 0x1f;
-
- mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
-
- if (phyaddr != 0x1f) {
- u16 mii_phyctrl, mii_1000cr;
-
- mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
- mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
- GMII_PHY_1000BASETCONTROL_PreferMaster;
- mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
-
- mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
-
- /* Set default phyparam */
- ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
-
- /* Reset PHY */
- mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
- mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
-
- }
-}
-
-static int ipg_hw_init(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- void __iomem *ioaddr = sp->ioaddr;
- unsigned int i;
- int rc;
-
- /* Read/Write and Reset EEPROM Value */
- /* Read LED Mode Configuration from EEPROM */
- sp->led_mode = read_eeprom(dev, 6);
-
- /* Reset all functions within the IPG. Do not assert
- * RST_OUT as not compatible with some PHYs.
- */
- rc = ipg_reset(dev, IPG_RESET_MASK);
- if (rc < 0)
- goto out;
-
- ipg_init_mii(dev);
-
- /* Read MAC Address from EEPROM */
- for (i = 0; i < 3; i++)
- sp->station_addr[i] = read_eeprom(dev, 16 + i);
-
- for (i = 0; i < 3; i++)
- ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
-
- /* Set station address in ethernet_device structure. */
- dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
- dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
- dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
- dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
- dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
- dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
-out:
- return rc;
-}
-
-static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- int rc;
-
- mutex_lock(&sp->mii_mutex);
- rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
- mutex_unlock(&sp->mii_mutex);
-
- return rc;
-}
-
-static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- int err;
-
- /* Function to accommodate changes to Maximum Transfer Unit
- * (or MTU) of IPG NIC. Cannot use default function since
- * the default will not allow for MTU > 1500 bytes.
- */
-
- IPG_DEBUG_MSG("_nic_change_mtu\n");
-
- /*
- * Check that the new MTU value is between 68 (14 byte header, 46 byte
- * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
- */
- if (new_mtu < 68 || new_mtu > 10240)
- return -EINVAL;
-
- err = ipg_nic_stop(dev);
- if (err)
- return err;
-
- dev->mtu = new_mtu;
-
- sp->max_rxframe_size = new_mtu;
-
- sp->rxfrag_size = new_mtu;
- if (sp->rxfrag_size > 4088)
- sp->rxfrag_size = 4088;
-
- sp->rxsupport_size = sp->max_rxframe_size;
-
- if (new_mtu > 0x0600)
- sp->is_jumbo = true;
- else
- sp->is_jumbo = false;
-
- return ipg_nic_open(dev);
-}
-
-static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- int rc;
-
- mutex_lock(&sp->mii_mutex);
- rc = mii_ethtool_gset(&sp->mii_if, cmd);
- mutex_unlock(&sp->mii_mutex);
-
- return rc;
-}
-
-static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- int rc;
-
- mutex_lock(&sp->mii_mutex);
- rc = mii_ethtool_sset(&sp->mii_if, cmd);
- mutex_unlock(&sp->mii_mutex);
-
- return rc;
-}
-
-static int ipg_nway_reset(struct net_device *dev)
-{
- struct ipg_nic_private *sp = netdev_priv(dev);
- int rc;
-
- mutex_lock(&sp->mii_mutex);
- rc = mii_nway_restart(&sp->mii_if);
- mutex_unlock(&sp->mii_mutex);
-
- return rc;
-}
-
-static const struct ethtool_ops ipg_ethtool_ops = {
- .get_settings = ipg_get_settings,
- .set_settings = ipg_set_settings,
- .nway_reset = ipg_nway_reset,
-};
-
-static void ipg_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct ipg_nic_private *sp = netdev_priv(dev);
-
- IPG_DEBUG_MSG("_remove\n");
-
- /* Un-register Ethernet device. */
- unregister_netdev(dev);
-
- pci_iounmap(pdev, sp->ioaddr);
-
- pci_release_regions(pdev);
-
- free_netdev(dev);
- pci_disable_device(pdev);
-}
-
-static const struct net_device_ops ipg_netdev_ops = {
- .ndo_open = ipg_nic_open,
- .ndo_stop = ipg_nic_stop,
- .ndo_start_xmit = ipg_nic_hard_start_xmit,
- .ndo_get_stats = ipg_nic_get_stats,
- .ndo_set_rx_mode = ipg_nic_set_multicast_list,
- .ndo_do_ioctl = ipg_ioctl,
- .ndo_tx_timeout = ipg_tx_timeout,
- .ndo_change_mtu = ipg_nic_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- unsigned int i = id->driver_data;
- struct ipg_nic_private *sp;
- struct net_device *dev;
- void __iomem *ioaddr;
- int rc;
-
- rc = pci_enable_device(pdev);
- if (rc < 0)
- goto out;
-
- pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
-
- pci_set_master(pdev);
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (rc < 0) {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- pr_err("%s: DMA config failed\n", pci_name(pdev));
- goto err_disable_0;
- }
- }
-
- /*
- * Initialize net device.
- */
- dev = alloc_etherdev(sizeof(struct ipg_nic_private));
- if (!dev) {
- rc = -ENOMEM;
- goto err_disable_0;
- }
-
- sp = netdev_priv(dev);
- spin_lock_init(&sp->lock);
- mutex_init(&sp->mii_mutex);
-
- sp->is_jumbo = IPG_IS_JUMBO;
- sp->rxfrag_size = IPG_RXFRAG_SIZE;
- sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
- sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
-
- /* Declare IPG NIC functions for Ethernet device methods.
- */
- dev->netdev_ops = &ipg_netdev_ops;
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->ethtool_ops = &ipg_ethtool_ops;
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_free_dev_1;
-
- ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
- if (!ioaddr) {
- pr_err("%s: cannot map MMIO\n", pci_name(pdev));
- rc = -EIO;
- goto err_release_regions_2;
- }
-
- /* Save the pointer to the PCI device information. */
- sp->ioaddr = ioaddr;
- sp->pdev = pdev;
- sp->dev = dev;
-
- INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
-
- pci_set_drvdata(pdev, dev);
-
- rc = ipg_hw_init(dev);
- if (rc < 0)
- goto err_unmap_3;
-
- rc = register_netdev(dev);
- if (rc < 0)
- goto err_unmap_3;
-
- netdev_info(dev, "Ethernet device registered\n");
-out:
- return rc;
-
-err_unmap_3:
- pci_iounmap(pdev, ioaddr);
-err_release_regions_2:
- pci_release_regions(pdev);
-err_free_dev_1:
- free_netdev(dev);
-err_disable_0:
- pci_disable_device(pdev);
- goto out;
-}
-
-static struct pci_driver ipg_pci_driver = {
- .name = IPG_DRIVER_NAME,
- .id_table = ipg_pci_tbl,
- .probe = ipg_probe,
- .remove = ipg_remove,
-};
-
-module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de606281f97b..000000000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
-/*
- * Include file for Gigabit Ethernet device driver for Network
- * Interface Cards (NICs) utilizing the Tamarack Microelectronics
- * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
- * Controller.
- */
-#ifndef __LINUX_IPG_H
-#define __LINUX_IPG_H
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <asm/bitops.h>
-
-/*
- * Constants
- */
-
-/* GMII based PHY IDs */
-#define NS 0x2000
-#define MARVELL 0x0141
-#define ICPLUS_PHY 0x243
-
-/* NIC Physical Layer Device MII register fields. */
-#define MII_PHY_SELECTOR_IEEE8023 0x0001
-#define MII_PHY_TECHABILITYFIELD 0x1FE0
-
-/* GMII_PHY_1000 need to set to prefer master */
-#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
-
-/* NIC Physical Layer Device GMII constants. */
-#define GMII_PREAMBLE 0xFFFFFFFF
-#define GMII_ST 0x1
-#define GMII_READ 0x2
-#define GMII_WRITE 0x1
-#define GMII_TA_READ_MASK 0x1
-#define GMII_TA_WRITE 0x2
-
-/* I/O register offsets. */
-enum ipg_regs {
- DMA_CTRL = 0x00,
- RX_DMA_STATUS = 0x08, /* Unused + reserved */
- TFD_LIST_PTR_0 = 0x10,
- TFD_LIST_PTR_1 = 0x14,
- TX_DMA_BURST_THRESH = 0x18,
- TX_DMA_URGENT_THRESH = 0x19,
- TX_DMA_POLL_PERIOD = 0x1a,
- RFD_LIST_PTR_0 = 0x1c,
- RFD_LIST_PTR_1 = 0x20,
- RX_DMA_BURST_THRESH = 0x24,
- RX_DMA_URGENT_THRESH = 0x25,
- RX_DMA_POLL_PERIOD = 0x26,
- DEBUG_CTRL = 0x2c,
- ASIC_CTRL = 0x30,
- FIFO_CTRL = 0x38, /* Unused */
- FLOW_OFF_THRESH = 0x3c,
- FLOW_ON_THRESH = 0x3e,
- EEPROM_DATA = 0x48,
- EEPROM_CTRL = 0x4a,
- EXPROM_ADDR = 0x4c, /* Unused */
- EXPROM_DATA = 0x50, /* Unused */
- WAKE_EVENT = 0x51, /* Unused */
- COUNTDOWN = 0x54, /* Unused */
- INT_STATUS_ACK = 0x5a,
- INT_ENABLE = 0x5c,
- INT_STATUS = 0x5e, /* Unused */
- TX_STATUS = 0x60,
- MAC_CTRL = 0x6c,
- VLAN_TAG = 0x70, /* Unused */
- PHY_SET = 0x75,
- PHY_CTRL = 0x76,
- STATION_ADDRESS_0 = 0x78,
- STATION_ADDRESS_1 = 0x7a,
- STATION_ADDRESS_2 = 0x7c,
- MAX_FRAME_SIZE = 0x86,
- RECEIVE_MODE = 0x88,
- HASHTABLE_0 = 0x8c,
- HASHTABLE_1 = 0x90,
- RMON_STATISTICS_MASK = 0x98,
- STATISTICS_MASK = 0x9c,
- RX_JUMBO_FRAMES = 0xbc, /* Unused */
- TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
- IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
- UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
- TX_JUMBO_FRAMES = 0xf4 /* Unused */
-};
-
-/* Ethernet MIB statistic register offsets. */
-#define IPG_OCTETRCVOK 0xA8
-#define IPG_MCSTOCTETRCVDOK 0xAC
-#define IPG_BCSTOCTETRCVOK 0xB0
-#define IPG_FRAMESRCVDOK 0xB4
-#define IPG_MCSTFRAMESRCVDOK 0xB8
-#define IPG_BCSTFRAMESRCVDOK 0xBE
-#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRORS 0xC8
-#define IPG_INRANGELENGTHERRORS 0xCA
-#define IPG_FRAMECHECKSEQERRORS 0xCC
-#define IPG_FRAMESLOSTRXERRORS 0xCE
-#define IPG_OCTETXMTOK 0xD0
-#define IPG_MCSTOCTETXMTOK 0xD4
-#define IPG_BCSTOCTETXMTOK 0xD8
-#define IPG_FRAMESXMTDOK 0xDC
-#define IPG_MCSTFRAMESXMTDOK 0xE0
-#define IPG_FRAMESWDEFERREDXMT 0xE4
-#define IPG_LATECOLLISIONS 0xE8
-#define IPG_MULTICOLFRAMES 0xEC
-#define IPG_SINGLECOLFRAMES 0xF0
-#define IPG_BCSTFRAMESXMTDOK 0xF6
-#define IPG_CARRIERSENSEERRORS 0xF8
-#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
-#define IPG_FRAMESABORTXSCOLLS 0xFC
-#define IPG_FRAMESWEXDEFERRAL 0xFE
-
-/* RMON statistic register offsets. */
-#define IPG_ETHERSTATSCOLLISIONS 0x100
-#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
-#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
-#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
-#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
-#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
-#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
-#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
-#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
-#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
-#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
-#define IPG_ETHERSTATSFRAGMENTS 0x12C
-#define IPG_ETHERSTATSJABBERS 0x130
-#define IPG_ETHERSTATSOCTETS 0x134
-#define IPG_ETHERSTATSPKTS 0x138
-#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
-#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
-#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
-#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
-#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
-#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
-
-/* RMON statistic register equivalents. */
-#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
-#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
-#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
-#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
-#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
-#define IPG_ETHERSTATSDROPEVENTS 0xCE
-
-/* Serial EEPROM offsets */
-#define IPG_EEPROM_CONFIGPARAM 0x00
-#define IPG_EEPROM_ASICCTRL 0x01
-#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
-#define IPG_EEPROM_SUBSYSTEMID 0x03
-#define IPG_EEPROM_STATIONADDRESS0 0x10
-#define IPG_EEPROM_STATIONADDRESS1 0x11
-#define IPG_EEPROM_STATIONADDRESS2 0x12
-
-/* Register & data structure bit masks */
-
-/* PCI register masks. */
-
-/* IOBaseAddress */
-#define IPG_PIB_RSVD_MASK 0xFFFFFE01
-#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
-#define IPG_PIB_IOBASEADDRIND 0x00000001
-
-/* MemBaseAddress */
-#define IPG_PMB_RSVD_MASK 0xFFFFFE07
-#define IPG_PMB_MEMBASEADDRIND 0x00000001
-#define IPG_PMB_MEMMAPTYPE 0x00000006
-#define IPG_PMB_MEMMAPTYPE0 0x00000002
-#define IPG_PMB_MEMMAPTYPE1 0x00000004
-#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
-
-/* ConfigStatus */
-#define IPG_CS_RSVD_MASK 0xFFB0
-#define IPG_CS_CAPABILITIES 0x0010
-#define IPG_CS_66MHZCAPABLE 0x0020
-#define IPG_CS_FASTBACK2BACK 0x0080
-#define IPG_CS_DATAPARITYREPORTED 0x0100
-#define IPG_CS_DEVSELTIMING 0x0600
-#define IPG_CS_SIGNALEDTARGETABORT 0x0800
-#define IPG_CS_RECEIVEDTARGETABORT 0x1000
-#define IPG_CS_RECEIVEDMASTERABORT 0x2000
-#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
-#define IPG_CS_DETECTEDPARITYERROR 0x8000
-
-/* TFD data structure masks. */
-
-/* TFDList, TFC */
-#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
-#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
-#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
-#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
-#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
-#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
-#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
-#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
-#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
-#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
-#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
-#define IPG_TFC_TFDDONE 0x0000000080000000ULL
-#define IPG_TFC_VID 0x00000FFF00000000ULL
-#define IPG_TFC_CFI 0x0000100000000000ULL
-#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
-
-/* TFDList, FragInfo */
-#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
-#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
-
-/* RFD data structure masks. */
-
-/* RFDList, RFS */
-#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
-#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
-#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
-#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
-#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
-#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
-#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
-#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
-#define IPG_RFS_TCPERROR 0x0000000001000000ULL
-#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
-#define IPG_RFS_UDPERROR 0x0000000004000000ULL
-#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
-#define IPG_RFS_IPERROR 0x0000000010000000ULL
-#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
-#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
-#define IPG_RFS_RFDDONE 0x0000000080000000ULL
-#define IPG_RFS_TCI 0x0000FFFF00000000ULL
-
-/* RFDList, FragInfo */
-#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
-#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
-
-/* I/O Register masks. */
-
-/* RMON Statistics Mask */
-#define IPG_RZ_ALL 0x0FFFFFFF
-
-/* Statistics Mask */
-#define IPG_SM_ALL 0x0FFFFFFF
-#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
-#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
-#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
-#define IPG_SM_RXJUMBOFRAMES 0x00000008
-#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
-#define IPG_SM_IPCHECKSUMERRORS 0x00000020
-#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
-#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
-#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
-#define IPG_SM_INRANGELENGTHERRORS 0x00000200
-#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
-#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
-#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
-#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
-#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
-#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
-#define IPG_SM_LATECOLLISIONS 0x00010000
-#define IPG_SM_MULTICOLFRAMES 0x00020000
-#define IPG_SM_SINGLECOLFRAMES 0x00040000
-#define IPG_SM_TXJUMBOFRAMES 0x00080000
-#define IPG_SM_CARRIERSENSEERRORS 0x00100000
-#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
-#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
-#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
-
-/* Countdown */
-#define IPG_CD_RSVD_MASK 0x0700FFFF
-#define IPG_CD_COUNT 0x0000FFFF
-#define IPG_CD_COUNTDOWNSPEED 0x01000000
-#define IPG_CD_COUNTDOWNMODE 0x02000000
-#define IPG_CD_COUNTINTENABLED 0x04000000
-
-/* TxDMABurstThresh */
-#define IPG_TB_RSVD_MASK 0xFF
-
-/* TxDMAUrgentThresh */
-#define IPG_TU_RSVD_MASK 0xFF
-
-/* TxDMAPollPeriod */
-#define IPG_TP_RSVD_MASK 0xFF
-
-/* RxDMAUrgentThresh */
-#define IPG_RU_RSVD_MASK 0xFF
-
-/* RxDMAPollPeriod */
-#define IPG_RP_RSVD_MASK 0xFF
-
-/* ReceiveMode */
-#define IPG_RM_RSVD_MASK 0x3F
-#define IPG_RM_RECEIVEUNICAST 0x01
-#define IPG_RM_RECEIVEMULTICAST 0x02
-#define IPG_RM_RECEIVEBROADCAST 0x04
-#define IPG_RM_RECEIVEALLFRAMES 0x08
-#define IPG_RM_RECEIVEMULTICASTHASH 0x10
-#define IPG_RM_RECEIVEIPMULTICAST 0x20
-
-/* PhySet */
-#define IPG_PS_MEM_LENB9B 0x01
-#define IPG_PS_MEM_LEN9 0x02
-#define IPG_PS_NON_COMPDET 0x04
-
-/* PhyCtrl */
-#define IPG_PC_RSVD_MASK 0xFF
-#define IPG_PC_MGMTCLK_LO 0x00
-#define IPG_PC_MGMTCLK_HI 0x01
-#define IPG_PC_MGMTCLK 0x01
-#define IPG_PC_MGMTDATA 0x02
-#define IPG_PC_MGMTDIR 0x04
-#define IPG_PC_DUPLEX_POLARITY 0x08
-#define IPG_PC_DUPLEX_STATUS 0x10
-#define IPG_PC_LINK_POLARITY 0x20
-#define IPG_PC_LINK_SPEED 0xC0
-#define IPG_PC_LINK_SPEED_10MBPS 0x40
-#define IPG_PC_LINK_SPEED_100MBPS 0x80
-#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
-
-/* DMACtrl */
-#define IPG_DC_RSVD_MASK 0xC07D9818
-#define IPG_DC_RX_DMA_COMPLETE 0x00000008
-#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
-#define IPG_DC_TX_DMA_COMPLETE 0x00000800
-#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
-#define IPG_DC_TX_DMA_IN_PROG 0x00008000
-#define IPG_DC_RX_EARLY_DISABLE 0x00010000
-#define IPG_DC_MWI_DISABLE 0x00040000
-#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
-#define IPG_DC_TX_BURST_LIMIT 0x00700000
-#define IPG_DC_TARGET_ABORT 0x40000000
-#define IPG_DC_MASTER_ABORT 0x80000000
-
-/* ASICCtrl */
-#define IPG_AC_RSVD_MASK 0x07FFEFF2
-#define IPG_AC_EXP_ROM_SIZE 0x00000002
-#define IPG_AC_PHY_SPEED10 0x00000010
-#define IPG_AC_PHY_SPEED100 0x00000020
-#define IPG_AC_PHY_SPEED1000 0x00000040
-#define IPG_AC_PHY_MEDIA 0x00000080
-#define IPG_AC_FORCED_CFG 0x00000700
-#define IPG_AC_D3RESETDISABLE 0x00000800
-#define IPG_AC_SPEED_UP_MODE 0x00002000
-#define IPG_AC_LED_MODE 0x00004000
-#define IPG_AC_RST_OUT_POLARITY 0x00008000
-#define IPG_AC_GLOBAL_RESET 0x00010000
-#define IPG_AC_RX_RESET 0x00020000
-#define IPG_AC_TX_RESET 0x00040000
-#define IPG_AC_DMA 0x00080000
-#define IPG_AC_FIFO 0x00100000
-#define IPG_AC_NETWORK 0x00200000
-#define IPG_AC_HOST 0x00400000
-#define IPG_AC_AUTO_INIT 0x00800000
-#define IPG_AC_RST_OUT 0x01000000
-#define IPG_AC_INT_REQUEST 0x02000000
-#define IPG_AC_RESET_BUSY 0x04000000
-#define IPG_AC_LED_SPEED 0x08000000
-#define IPG_AC_LED_MODE_BIT_1 0x20000000
-
-/* EepromCtrl */
-#define IPG_EC_RSVD_MASK 0x83FF
-#define IPG_EC_EEPROM_ADDR 0x00FF
-#define IPG_EC_EEPROM_OPCODE 0x0300
-#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
-#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
-#define IPG_EC_EEPROM_READOPCODE 0x0200
-#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
-#define IPG_EC_EEPROM_BUSY 0x8000
-
-/* FIFOCtrl */
-#define IPG_FC_RSVD_MASK 0xC001
-#define IPG_FC_RAM_TEST_MODE 0x0001
-#define IPG_FC_TRANSMITTING 0x4000
-#define IPG_FC_RECEIVING 0x8000
-
-/* TxStatus */
-#define IPG_TS_RSVD_MASK 0xFFFF00DD
-#define IPG_TS_TX_ERROR 0x00000001
-#define IPG_TS_LATE_COLLISION 0x00000004
-#define IPG_TS_TX_MAX_COLL 0x00000008
-#define IPG_TS_TX_UNDERRUN 0x00000010
-#define IPG_TS_TX_IND_REQD 0x00000040
-#define IPG_TS_TX_COMPLETE 0x00000080
-#define IPG_TS_TX_FRAMEID 0xFFFF0000
-
-/* WakeEvent */
-#define IPG_WE_WAKE_PKT_ENABLE 0x01
-#define IPG_WE_MAGIC_PKT_ENABLE 0x02
-#define IPG_WE_LINK_EVT_ENABLE 0x04
-#define IPG_WE_WAKE_POLARITY 0x08
-#define IPG_WE_WAKE_PKT_EVT 0x10
-#define IPG_WE_MAGIC_PKT_EVT 0x20
-#define IPG_WE_LINK_EVT 0x40
-#define IPG_WE_WOL_ENABLE 0x80
-
-/* IntEnable */
-#define IPG_IE_RSVD_MASK 0x1FFE
-#define IPG_IE_HOST_ERROR 0x0002
-#define IPG_IE_TX_COMPLETE 0x0004
-#define IPG_IE_MAC_CTRL_FRAME 0x0008
-#define IPG_IE_RX_COMPLETE 0x0010
-#define IPG_IE_RX_EARLY 0x0020
-#define IPG_IE_INT_REQUESTED 0x0040
-#define IPG_IE_UPDATE_STATS 0x0080
-#define IPG_IE_LINK_EVENT 0x0100
-#define IPG_IE_TX_DMA_COMPLETE 0x0200
-#define IPG_IE_RX_DMA_COMPLETE 0x0400
-#define IPG_IE_RFD_LIST_END 0x0800
-#define IPG_IE_RX_DMA_PRIORITY 0x1000
-
-/* IntStatus */
-#define IPG_IS_RSVD_MASK 0x1FFF
-#define IPG_IS_INTERRUPT_STATUS 0x0001
-#define IPG_IS_HOST_ERROR 0x0002
-#define IPG_IS_TX_COMPLETE 0x0004
-#define IPG_IS_MAC_CTRL_FRAME 0x0008
-#define IPG_IS_RX_COMPLETE 0x0010
-#define IPG_IS_RX_EARLY 0x0020
-#define IPG_IS_INT_REQUESTED 0x0040
-#define IPG_IS_UPDATE_STATS 0x0080
-#define IPG_IS_LINK_EVENT 0x0100
-#define IPG_IS_TX_DMA_COMPLETE 0x0200
-#define IPG_IS_RX_DMA_COMPLETE 0x0400
-#define IPG_IS_RFD_LIST_END 0x0800
-#define IPG_IS_RX_DMA_PRIORITY 0x1000
-
-/* MACCtrl */
-#define IPG_MC_RSVD_MASK 0x7FE33FA3
-#define IPG_MC_IFS_SELECT 0x00000003
-#define IPG_MC_IFS_4352BIT 0x00000003
-#define IPG_MC_IFS_1792BIT 0x00000002
-#define IPG_MC_IFS_1024BIT 0x00000001
-#define IPG_MC_IFS_96BIT 0x00000000
-#define IPG_MC_DUPLEX_SELECT 0x00000020
-#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
-#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
-#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
-#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
-#define IPG_MC_RCV_FCS 0x00000200
-#define IPG_MC_FIFO_LOOPBACK 0x00000400
-#define IPG_MC_MAC_LOOPBACK 0x00000800
-#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
-#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
-#define IPG_MC_COLLISION_DETECT 0x00010000
-#define IPG_MC_CARRIER_SENSE 0x00020000
-#define IPG_MC_STATISTICS_ENABLE 0x00200000
-#define IPG_MC_STATISTICS_DISABLE 0x00400000
-#define IPG_MC_STATISTICS_ENABLED 0x00800000
-#define IPG_MC_TX_ENABLE 0x01000000
-#define IPG_MC_TX_DISABLE 0x02000000
-#define IPG_MC_TX_ENABLED 0x04000000
-#define IPG_MC_RX_ENABLE 0x08000000
-#define IPG_MC_RX_DISABLE 0x10000000
-#define IPG_MC_RX_ENABLED 0x20000000
-#define IPG_MC_PAUSED 0x40000000
-
-/*
- * Tune
- */
-
-/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
-#define IPG_APPEND_FCS_ON_TX 1
-
-/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
-#define IPG_STRIP_FCS_ON_RX 1
-
-/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
- * Ethernet errors.
- */
-#define IPG_DROP_ON_RX_ETH_ERRORS 1
-
-/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
- * (via TFC).
- */
-#define IPG_INSERT_MANUAL_VLAN_TAG 0
-
-/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
-#define IPG_ADD_IPCHECKSUM_ON_TX 0
-
-/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
- * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
- */
-#define IPG_ADD_TCPCHECKSUM_ON_TX 0
-
-/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
- * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
- */
-#define IPG_ADD_UDPCHECKSUM_ON_TX 0
-
-/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
- * constants as desired.
- */
-#define IPG_MANUAL_VLAN_VID 0xABC
-#define IPG_MANUAL_VLAN_CFI 0x1
-#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
-
-#define IPG_IO_REG_RANGE 0xFF
-#define IPG_MEM_REG_RANGE 0x154
-#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
-#define IPG_NIC_PHY_ADDRESS 0x01
-#define IPG_DMALIST_ALIGN_PAD 0x07
-#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
-
-/* Number of milliseconds to wait after issuing a software reset.
- * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
- */
-#define IPG_AC_RESETWAIT 0x05
-
-/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
-#define IPG_AC_RESET_TIMEOUT 0x0A
-
-/* Minimum number of nanoseconds used to toggle MDC clock during
- * MII/GMII register access.
- */
-#define IPG_PC_PHYCTRLWAIT_NS 200
-
-#define IPG_TFDLIST_LENGTH 0x100
-
-/* Number of frames between TxDMAComplete interrupt.
- * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
- */
-#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
-
-#define IPG_RFDLIST_LENGTH 0x100
-
-/* Maximum number of RFDs to process per interrupt.
- * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
- */
-#define IPG_MAXRFDPROCESS_COUNT 0x80
-
-/* Minimum margin between last freed RFD, and current RFD.
- * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
- */
-#define IPG_MINUSEDRFDSTOFREE 0x80
-
-/* specify the jumbo frame maximum size
- * per unit is 0x600 (the rx_buffer size that one RFD can carry)
- */
-#define MAX_JUMBOSIZE 0x8 /* max is 12K */
-
-/* Key register values loaded at driver start up. */
-
-/* TXDMAPollPeriod is specified in 320ns increments.
- *
- * Value Time
- * ---------------------
- * 0x00-0x01 320ns
- * 0x03 ~1us
- * 0x1F ~10us
- * 0xFF ~82us
- */
-#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
-
-/* TxDMAUrgentThresh specifies the minimum amount of
- * data in the transmit FIFO before asserting an
- * urgent transmit DMA request.
- *
- * Value Min TxFIFO occupied space before urgent TX request
- * ---------------------------------------------------------------
- * 0x00-0x04 128 bytes (1024 bits)
- * 0x27 1248 bytes (~10000 bits)
- * 0x30 1536 bytes (12288 bits)
- * 0xFF 8192 bytes (65535 bits)
- */
-#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
-
-/* TxDMABurstThresh specifies the minimum amount of
- * free space in the transmit FIFO before asserting an
- * transmit DMA request.
- *
- * Value Min TxFIFO free space before TX request
- * ----------------------------------------------------
- * 0x00-0x08 256 bytes
- * 0x30 1536 bytes
- * 0xFF 8192 bytes
- */
-#define IPG_TXDMABURSTTHRESH_VALUE 0x30
-
-/* RXDMAPollPeriod is specified in 320ns increments.
- *
- * Value Time
- * ---------------------
- * 0x00-0x01 320ns
- * 0x03 ~1us
- * 0x1F ~10us
- * 0xFF ~82us
- */
-#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
-
-/* RxDMAUrgentThresh specifies the minimum amount of
- * free space within the receive FIFO before asserting
- * a urgent receive DMA request.
- *
- * Value Min RxFIFO free space before urgent RX request
- * ---------------------------------------------------------------
- * 0x00-0x04 128 bytes (1024 bits)
- * 0x27 1248 bytes (~10000 bits)
- * 0x30 1536 bytes (12288 bits)
- * 0xFF 8192 bytes (65535 bits)
- */
-#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
-
-/* RxDMABurstThresh specifies the minimum amount of
- * occupied space within the receive FIFO before asserting
- * a receive DMA request.
- *
- * Value Min TxFIFO free space before TX request
- * ----------------------------------------------------
- * 0x00-0x08 256 bytes
- * 0x30 1536 bytes
- * 0xFF 8192 bytes
- */
-#define IPG_RXDMABURSTTHRESH_VALUE 0x30
-
-/* FlowOnThresh specifies the maximum amount of occupied
- * space in the receive FIFO before a PAUSE frame with
- * maximum pause time transmitted.
- *
- * Value Max RxFIFO occupied space before PAUSE
- * ---------------------------------------------------
- * 0x0000 0 bytes
- * 0x0740 29,696 bytes
- * 0x07FF 32,752 bytes
- */
-#define IPG_FLOWONTHRESH_VALUE 0x0740
-
-/* FlowOffThresh specifies the minimum amount of occupied
- * space in the receive FIFO before a PAUSE frame with
- * zero pause time is transmitted.
- *
- * Value Max RxFIFO occupied space before PAUSE
- * ---------------------------------------------------
- * 0x0000 0 bytes
- * 0x00BF 3056 bytes
- * 0x07FF 32,752 bytes
- */
-#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
-
-/*
- * Miscellaneous macros.
- */
-
-/* Macros for printing debug statements. */
-#ifdef IPG_DEBUG
-# define IPG_DEBUG_MSG(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG "IPG: " fmt, ##args); \
-} while (0)
-# define IPG_DDEBUG_MSG(fmt, args...) \
- printk(KERN_DEBUG "IPG: " fmt, ##args)
-# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
-# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
-#else
-# define IPG_DEBUG_MSG(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG "IPG: " fmt, ##args); \
-} while (0)
-# define IPG_DDEBUG_MSG(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG "IPG: " fmt, ##args); \
-} while (0)
-# define IPG_DUMPRFDLIST(args)
-# define IPG_DUMPTFDLIST(args)
-#endif
-
-/*
- * End miscellaneous macros.
- */
-
-/* Transmit Frame Descriptor. The IPG supports 15 fragments,
- * however Linux requires only a single fragment. Note, each
- * TFD field is 64 bits wide.
- */
-struct ipg_tx {
- __le64 next_desc;
- __le64 tfc;
- __le64 frag_info;
-};
-
-/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
- */
-struct ipg_rx {
- __le64 next_desc;
- __le64 rfs;
- __le64 frag_info;
-};
-
-struct ipg_jumbo {
- int found_start;
- int current_size;
- struct sk_buff *skb;
-};
-
-/* Structure of IPG NIC specific data. */
-struct ipg_nic_private {
- void __iomem *ioaddr;
- struct ipg_tx *txd;
- struct ipg_rx *rxd;
- dma_addr_t txd_map;
- dma_addr_t rxd_map;
- struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
- struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
- unsigned int tx_current;
- unsigned int tx_dirty;
- unsigned int rx_current;
- unsigned int rx_dirty;
- bool is_jumbo;
- struct ipg_jumbo jumbo;
- unsigned long rxfrag_size;
- unsigned long rxsupport_size;
- unsigned long max_rxframe_size;
- unsigned int rx_buf_sz;
- struct pci_dev *pdev;
- struct net_device *dev;
- struct net_device_stats stats;
- spinlock_t lock;
- int tenmbpsmode;
-
- u16 led_mode;
- u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
-
- struct mutex mii_mutex;
- struct mii_if_info mii_if;
- int reset_current_tfd;
-#ifdef IPG_DEBUG
- int RFDlistendCount;
- int RFDListCheckedCount;
- int EmptyRFDListCount;
-#endif
- struct delayed_work task;
-};
-
-#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e7e505..31c491e02e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
dev->caps.phys_port_id[i] = func_cap.phys_port_id;
- if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
- &dev->caps.gid_table_len[i],
- &dev->caps.pkey_table_len[i]))
+ err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
+ &dev->caps.gid_table_len[i],
+ &dev->caps.pkey_table_len[i]);
+ if (err)
goto err_mem;
}
@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long)
pci_resource_len(dev->persist->pdev, 2));
+ err = -ENOMEM;
goto err_mem;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34f3e5b..6fec3e993d02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
struct res_counter *counter;
struct res_counter *tmp;
int err;
- int index;
+ int *counters_arr = NULL;
+ int i, j;
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave);
- spin_lock_irq(mlx4_tlock(dev));
- list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
- if (counter->com.owner == slave) {
- index = counter->com.res_id;
- rb_erase(&counter->com.node,
- &tracker->res_tree[RES_COUNTER]);
- list_del(&counter->com.list);
- kfree(counter);
- __mlx4_counter_free(dev, index);
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return;
+
+ do {
+ i = 0;
+ j = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+ if (counter->com.owner == slave) {
+ counters_arr[i++] = counter->com.res_id;
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
+ list_del(&counter->com.list);
+ kfree(counter);
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ while (j < i) {
+ __mlx4_counter_free(dev, counters_arr[j++]);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
- }
- spin_unlock_irq(mlx4_tlock(dev));
+ } while (i);
+
+ kfree(counters_arr);
}
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62dd8c09..22e72bf1ae48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+enum mlx5e_dma_map_type {
+ MLX5E_DMA_MAP_SINGLE,
+ MLX5E_DMA_MAP_PAGE
+};
+
struct mlx5e_sq_dma {
- dma_addr_t addr;
- u32 size;
+ dma_addr_t addr;
+ u32 size;
+ enum mlx5e_dma_map_type type;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d78cdf..1e52db32c73d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
return err;
}
+static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
+ u32 tirn)
+{
+ void *in;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
+ priv->tirn[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
+ err = mlx5e_refresh_tirs_self_loopback_enable(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0;
+err_close_channels:
+ mlx5e_close_channels(priv);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+
if (new_mtu > max_mtu) {
netdev_err(netdev,
"%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
"Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP;
}
+ if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
+ mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a251d7..1341b1d3c421 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
}
}
-static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
- u32 *size)
+static inline void mlx5e_tx_dma_unmap(struct device *pdev,
+ struct mlx5e_sq_dma *dma)
{
- sq->dma_fifo_pc--;
- *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
- *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
-}
-
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
-{
- dma_addr_t addr;
- u32 size;
- int i;
-
- for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
- mlx5e_dma_pop_last_pushed(sq, &addr, &size);
- dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ switch (dma->type) {
+ case MLX5E_DMA_MAP_SINGLE:
+ dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ break;
+ case MLX5E_DMA_MAP_PAGE:
+ dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ break;
+ default:
+ WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
}
}
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
- u32 size)
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+ dma_addr_t addr,
+ u32 size,
+ enum mlx5e_dma_map_type map_type)
{
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
sq->dma_fifo_pc++;
}
-static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
- u32 *size)
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{
- *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
- *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+ return &sq->dma_fifo[i & sq->dma_fifo_mask];
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ struct mlx5e_sq_dma *last_pushed_dma =
+ mlx5e_dma_get(sq, --sq->dma_fifo_pc);
+
+ mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
+ }
}
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
*/
#define MLX5E_MIN_INLINE ETH_HLEN
- if (bf && (skb_headlen(skb) <= sq->max_inline))
- return skb_headlen(skb);
+ if (bf) {
+ u16 ihs = skb_headlen(skb);
+
+ if (skb_vlan_tag_present(skb))
+ ihs += VLAN_HLEN;
+
+ if (ihs <= sq->max_inline)
+ return skb_headlen(skb);
+ }
return MLX5E_MIN_INLINE;
}
@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
- mlx5e_dma_push(sq, dma_addr, headlen);
+ mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++;
@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz);
+ mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++;
@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
}
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
- dma_addr_t addr;
- u32 size;
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, dma_fifo_cc++);
- mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
- dma_fifo_cc++;
- dma_unmap_single(sq->pdev, addr, size,
- DMA_TO_DEVICE);
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
}
npkts++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f21232019a..79ef799f88ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@ process_pkt:
rtl8169_rx_vlan_tag(desc, skb);
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
+
napi_gro_receive(&tp->napi, skb);
u64_stats_update_begin(&tp->rx_stats.syncp);
tp->rx_stats.packets++;
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
-
- if (skb->pkt_type == PACKET_MULTICAST)
- dev->stats.multicast++;
}
release_descriptor:
desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b2083cb53..ee8d1ec61fab 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
/* Interrupt enable: */
/* Frame receive */
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
- /* Receive FIFO full warning */
- ravb_write(ndev, RIC1_RFWE, RIC1);
/* Receive FIFO full error, descriptor empty */
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
/* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
((tis & tic) & BIT(q))) {
if (napi_schedule_prep(&priv->napi[q])) {
/* Mask RX and TX interrupts */
- ravb_write(ndev, ric0 & ~BIT(q), RIC0);
- ravb_write(ndev, tic & ~BIT(q), TIC);
+ ric0 &= ~BIT(q);
+ tic &= ~BIT(q);
+ ravb_write(ndev, ric0, RIC0);
+ ravb_write(ndev, tic, TIC);
__napi_schedule(&priv->napi[q]);
} else {
netdev_warn(ndev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c928de..a3c42a376741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@ out:
* with our request for slot reset the mmio_enabled callback will never be
* called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
-static struct pci_error_handlers efx_err_handlers = {
+static const struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
.slot_reset = efx_io_slot_reset,
.resume = efx_io_resume,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c9007e49..219a99b7a631 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
static int smsc911x_phy_reset(struct smsc911x_data *pdata)
{
- struct phy_device *phy_dev = pdata->phy_dev;
unsigned int temp;
unsigned int i = 100000;
- BUG_ON(!phy_dev);
- BUG_ON(!phy_dev->bus);
-
- SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
- smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
+ temp = smsc911x_reg_read(pdata, PMT_CTRL);
+ smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
do {
msleep(1);
- temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr,
- MII_BMCR);
- } while ((i--) && (temp & BMCR_RESET));
+ temp = smsc911x_reg_read(pdata, PMT_CTRL);
+ } while ((i--) && (temp & PMT_CTRL_PHY_RST_));
- if (temp & BMCR_RESET) {
+ if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
SMSC_WARN(pdata, hw, "PHY reset failed to complete");
return -EIO;
}
@@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
}
/* Reset the LAN911x */
- if (smsc911x_soft_reset(pdata))
+ if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
return -ENODEV;
dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdbf029f..82de68b1a452 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
QSGMII_PHY_RX_SIGNAL_DETECT_EN |
QSGMII_PHY_TX_DRIVER_EN |
QSGMII_PHY_QSGMII_EN |
- 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
- 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
- 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
- 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
- 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+ 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+ 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+ 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+ 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
}
plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd50a15..f38696ceee74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
*/
VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
-#define VAL_PKT_LEN_DEF 0
-/* ValPktLen[] is used for setting the checksum offload ability of NIC.
- 0: Receive frame with invalid layer 2 length (Default)
- 1: Drop frame with invalid layer 2 length
-*/
-VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
-
#define WOL_OPT_DEF 0
#define WOL_OPT_MIN 0
#define WOL_OPT_MAX 7
@@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
- velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
- if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
- VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
+ if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
+ if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
stats->rx_length_errors++;
return -EINVAL;
}
@@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
vptr->rx.buf_sz, DMA_FROM_DEVICE);
- /*
- * Drop frame not meeting IEEE 802.3
- */
-
- if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
- if (rd->rdesc0.RSR & RSR_RL) {
- stats->rx_length_errors++;
- return -EINVAL;
- }
- }
-
velocity_rx_csum(rd, skb);
if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b5304d851..b103adb8d62e 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
FJES_CMD_REQ_RES_CODE_BUSY) &&
(timeout > 0)) {
msleep(200 + hw->my_epid * 20);
- timeout -= (200 + hw->my_epid * 20);
+ timeout -= (200 + hw->my_epid * 20);
res_buf->unshare_buffer.length = 0;
res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e3df6d..8c48bb2a94ea 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@ acct:
}
}
-static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
+static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
bool local)
{
struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
unsigned int len;
rx_handler_result_t ret = RX_HANDLER_CONSUMED;
bool success = false;
+ struct sk_buff *skb = *pskb;
len = skb->len + ETH_HLEN;
if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
if (!skb)
goto out;
+ *pskb = skb;
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
@@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr)
- return ipvlan_rcv_frame(addr, skb, true);
+ return ipvlan_rcv_frame(addr, &skb, true);
out:
skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr)
- return ipvlan_rcv_frame(addr, skb, true);
+ return ipvlan_rcv_frame(addr, &skb, true);
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
@@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
if (addr)
- ret = ipvlan_rcv_frame(addr, skb, false);
+ ret = ipvlan_rcv_frame(addr, pskb, false);
out:
return ret;
@@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
if (addr)
- ret = ipvlan_rcv_frame(addr, skb, false);
+ ret = ipvlan_rcv_frame(addr, pskb, false);
}
return ret;
@@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
port->mode);
kfree_skb(skb);
- return NET_RX_DROP;
+ return RX_HANDLER_CONSUMED;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c6292c27..06c8bfeaccd6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
+ *pskb = skb;
eth = eth_hdr(skb);
macvlan_forward_source(skb, port, eth->h_source);
src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
goto out;
}
+ *pskb = skb;
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d32d27..2d020a3ec0b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = at803x_ack_interrupt,
+ .config_intr = at803x_config_intr,
.driver = {
.owner = THIS_MODULE,
},
@@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = at803x_ack_interrupt,
+ .config_intr = at803x_config_intr,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d5827536..0240552b50f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = MARVELL_PHY_ID_88E1540,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1540",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &m88e1510_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .driver = { .owner = THIS_MODULE },
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E3016,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48abafc87..48ce6ef400fe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -863,6 +863,9 @@ void phy_state_machine(struct work_struct *work)
needs_aneg = true;
break;
case PHY_NOLINK:
+ if (phy_interrupt_is_valid(phydev))
+ break;
+
err = phy_read_status(phydev);
if (err)
break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad712ddb2..dd295dbaa074 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
#define PHY_ID_VSC8211 0x000fc4b0
@@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
- phydev->drv->phy_id == PHY_ID_VSC8574) ?
+ phydev->drv->phy_id == PHY_ID_VSC8574 ||
+ phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
MII_VSC8221_IMASK_MASK);
else {
@@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = {
.config_intr = &vsc82xx_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_VSC8601,
+ .name = "Vitesse VSC8601",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &genphy_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb1b464..3da70bf9936a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t) &wwan_info,
}, {
+ /* Dell DW5580 modules */
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (kernel_ulong_t)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4caddccbe..899ea4288197 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2157,12 +2157,13 @@ vmxnet3_set_mc(struct net_device *netdev)
if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
- rxConf->mfTableLen = cpu_to_le16(
- netdev_mc_count(netdev) * ETH_ALEN);
+ size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
+
+ rxConf->mfTableLen = cpu_to_le16(sz);
new_table_pa = dma_map_single(
&adapter->pdev->dev,
new_table,
- rxConf->mfTableLen,
+ sz,
PCI_DMA_TODEVICE);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a55c035..4c58c83dc225 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040300
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a18916a31..a831d18596a5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void)
free_page((unsigned long)sei_page);
}
-int chsc_enable_facility(int operation_code)
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
{
- unsigned long flags;
int ret;
- struct {
- struct chsc_header request;
- u8 reserved1:4;
- u8 format:4;
- u8 reserved2;
- u16 operation_code;
- u32 reserved3;
- u32 reserved4;
- u32 operation_data_area[252];
- struct chsc_header response;
- u32 reserved5:4;
- u32 format2:4;
- u32 reserved6:24;
- } __attribute__ ((packed)) *sda_area;
- spin_lock_irqsave(&chsc_page_lock, flags);
- memset(chsc_page, 0, PAGE_SIZE);
- sda_area = chsc_page;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code)
default:
ret = chsc_error_from_response(sda_area->response.code);
}
+out:
+ return ret;
+}
+
+int chsc_enable_facility(int operation_code)
+{
+ struct chsc_sda_area *sda_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
+
+ ret = __chsc_enable_facility(sda_area, operation_code);
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
-out:
+
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50700b2..0de134c3a204 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@ struct chsc_scpd {
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed));
+struct chsc_sda_area {
+ struct chsc_header request;
+ u8 :4;
+ u8 format:4;
+ u8 :8;
+ u16 operation_code;
+ u32 :32;
+ u32 :32;
+ u32 operation_data_area[252];
+ struct chsc_header response;
+ u32 :4;
+ u32 format2:4;
+ u32 :24;
+} __packed __aligned(PAGE_SIZE);
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void);
extern int chsc_init(void);
extern void chsc_init_cleanup(void);
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e818d6b..690b8547e828 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
{
+ static struct chsc_sda_area sda_area __initdata;
struct subchannel_id schid;
struct schib schib;
schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
+
+ if (schid.ssid) {
+ /*
+ * Firmware should have already enabled MSS but whoever started
+ * the kernel might have initiated a channel subsystem reset.
+ * Ensure that MSS is enabled.
+ */
+ memset(&sda_area, 0, sizeof(sda_area));
+ if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
+ return -ENODEV;
+ }
if (stsch_err(schid, &schib))
return -ENODEV;
if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
return -ENODEV;
if (!schib.pmcw.dnv)
return -ENODEV;
+
+ iplinfo->ssid = schid.ssid;
iplinfo->devno = schib.pmcw.dev;
iplinfo->is_qdio = schib.pmcw.qf;
return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053bdc12..489e703dc82d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
-#ifdef CONFIG_SMP
css->global_pgid.pgid_high.cpu_addr = stap();
-#else
- css->global_pgid.pgid_high.cpu_addr = 0;
-#endif
}
get_cpu_id(&cpu_id);
css->global_pgid.cpu_id = cpu_id.ident;
css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
-
}
static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b3c8a4..b8ab18676e69 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
#
ap-objs := ap_bus.o
-obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o
+# zcrypt_api depends on ap
+obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
+# msgtype* depend on zcrypt_api
obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
+# adapter drivers depend on ap, zcrypt_api and msgtype*
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfbcaddb..61f768518a34 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL;
static struct ap_config_info *ap_configuration;
static DEFINE_SPINLOCK(ap_device_list_lock);
static LIST_HEAD(ap_device_list);
+static bool initialised;
/*
* Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
{
struct device_driver *drv = &ap_drv->driver;
+ if (!initialised)
+ return -ENODEV;
+
drv->bus = &ap_bus_type;
drv->probe = ap_device_probe;
drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@ int __init ap_module_init(void)
goto out_pm;
queue_work(system_long_wq, &ap_scan_work);
+ initialised = true;
return 0;
@@ -1837,6 +1842,7 @@ void ap_module_exit(void)
{
int i;
+ initialised = false;
ap_reset_domain();
ap_poll_thread_stop();
del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603ebbc1f8..9f8fa42c062c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister);
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
- if (zops->owner) {
- spin_lock_bh(&zcrypt_ops_list_lock);
- list_add_tail(&zops->list, &zcrypt_ops_list);
- spin_unlock_bh(&zcrypt_ops_list_lock);
- }
+ spin_lock_bh(&zcrypt_ops_list_lock);
+ list_add_tail(&zops->list, &zcrypt_ops_list);
+ spin_unlock_bh(&zcrypt_ops_list_lock);
}
EXPORT_SYMBOL(zcrypt_msgtype_register);
@@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
spin_lock_bh(&zcrypt_ops_list_lock);
list_for_each_entry(zops, &zcrypt_ops_list, list) {
if ((zops->variant == variant) &&
- (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
+ (!strncmp(zops->name, name, sizeof(zops->name)))) {
found = 1;
break;
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 750876891931..38618f05ad92 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@ struct zcrypt_ops {
struct list_head list; /* zcrypt ops list. */
struct module *owner;
int variant;
+ char name[128];
};
struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9137a8..74edf2934e7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
.rsa_modexpo = zcrypt_cex2a_modexpo,
.rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
.owner = THIS_MODULE,
+ .name = MSGTYPE50_NAME,
.variant = MSGTYPE50_VARIANT_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 74762214193b..9a2dd472c1cc 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
.owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
.variant = MSGTYPE06_VARIANT_NORNG,
.rsa_modexpo = zcrypt_msgtype6_modexpo,
.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
static struct zcrypt_ops zcrypt_msgtype6_ops = {
.owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
.variant = MSGTYPE06_VARIANT_DEFAULT,
.rsa_modexpo = zcrypt_msgtype6_modexpo,
.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
.owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
.variant = MSGTYPE06_VARIANT_EP11,
.rsa_modexpo = NULL,
.rsa_modexpo_crt = NULL,