summaryrefslogtreecommitdiff
path: root/net/openvswitch
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2019-08-21 22:47:35 -0700
committerRodrigo Vivi <rodrigo.vivi@intel.com>2019-08-22 00:10:36 -0700
commit829e8def7bd7b1e58028113ee5c2877da89d8f27 (patch)
tree3958810e8e17c508172889c02e75d9dc2ec6080c /net/openvswitch
parent8e40983dec63300013ff5755c36e3ac455e351a5 (diff)
parentae4530062620561d24683b1bd3438b8397693429 (diff)
Merge drm/drm-next into drm-intel-next-queued
We need the rename of reservation_object to dma_resv. The solution on this merge came from linux-next: From: Stephen Rothwell <sfr@canb.auug.org.au> Date: Wed, 14 Aug 2019 12:48:39 +1000 Subject: [PATCH] drm: fix up fallout from "dma-buf: rename reservation_object to dma_resv" Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> --- drivers/gpu/drm/i915/gt/intel_engine_pool.c | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 03d90b49584a..4cd54c569911 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -43,12 +43,12 @@ static int pool_active(struct i915_active *ref) { struct intel_engine_pool_node *node = container_of(ref, typeof(*node), active); - struct reservation_object *resv = node->obj->base.resv; + struct dma_resv *resv = node->obj->base.resv; int err; - if (reservation_object_trylock(resv)) { - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); + if (dma_resv_trylock(resv)) { + dma_resv_add_excl_fence(resv, NULL); + dma_resv_unlock(resv); } err = i915_gem_object_pin_pages(node->obj); which is a simplified version from a previous one which had: Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow.h4
-rw-r--r--net/openvswitch/flow_table.c8
3 files changed, 10 insertions, 10 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..bc89e16e0505 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb)
{
- struct flow_stats *stats;
+ struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */
- struct flow_stats *new_stats;
+ struct sw_flow_stats *new_stats;
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
- struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
+ struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
- struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
+ struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 3e2cc2202d66..a5506e2d4b7a 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -194,7 +194,7 @@ struct sw_flow_actions {
struct nlattr actions[];
};
-struct flow_stats {
+struct sw_flow_stats {
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */
@@ -216,7 +216,7 @@ struct sw_flow {
struct cpumask cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
- struct flow_stats __rcu *stats[]; /* One for each CPU. First one
+ struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 988fd8a94e43..cf3582c5ed70 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
- struct flow_stats *stats;
+ struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
- (struct flow_stats __force *)flow->stats[cpu]);
+ (struct sw_flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@@ -712,13 +712,13 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
- * sizeof(struct flow_stats *)),
+ * sizeof(struct sw_flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
- = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+ = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);