1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
|
/*
* drivers/video/tegra/nvmap/nvmap.h
*
* GPU memory management driver for Tegra
*
* Copyright (c) 2010, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*'
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
#define __VIDEO_TEGRA_NVMAP_NVMAP_H
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/atomic.h>
#include <mach/nvmap.h>
#include "nvmap_heap.h"
#define nvmap_err(_client, _fmt, ...) \
dev_err(nvmap_client_to_device(_client), \
"%s: "_fmt, __func__, ##__VA_ARGS__)
#define nvmap_warn(_client, _fmt, ...) \
dev_warn(nvmap_client_to_device(_client), \
"%s: "_fmt, __func__, ##__VA_ARGS__)
#define nvmap_debug(_client, _fmt, ...) \
dev_dbg(nvmap_client_to_device(_client), \
"%s: "_fmt, __func__, ##__VA_ARGS__)
#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
struct nvmap_device;
struct page;
struct tegra_iovmm_area;
/* handles allocated using shared system memory (either IOVMM- or high-order
* page allocations */
struct nvmap_pgalloc {
struct page **pages;
struct tegra_iovmm_area *area;
struct list_head mru_list; /* MRU entry for IOVMM reclamation */
bool contig; /* contiguous system memory */
bool dirty; /* area is invalid and needs mapping */
};
struct nvmap_handle {
struct rb_node node; /* entry on global handle tree */
atomic_t ref; /* reference count (i.e., # of duplications) */
atomic_t pin; /* pin count */
unsigned int usecount; /* how often is used */
unsigned long flags;
size_t size; /* padded (as-allocated) size */
size_t orig_size; /* original (as-requested) size */
struct nvmap_client *owner;
struct nvmap_device *dev;
union {
struct nvmap_pgalloc pgalloc;
struct nvmap_heap_block *carveout;
};
bool global; /* handle may be duplicated by other clients */
bool secure; /* zap IOVMM area on unpin */
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
bool alloc; /* handle has memory allocated */
struct mutex lock;
};
struct nvmap_share {
struct tegra_iovmm_client *iovmm;
wait_queue_head_t pin_wait;
struct mutex pin_lock;
#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
spinlock_t mru_lock;
struct list_head *mru_lists;
int nr_mru;
#endif
};
struct nvmap_carveout_commit {
size_t commit;
struct list_head list;
};
struct nvmap_client {
const char *name;
struct nvmap_device *dev;
struct nvmap_share *share;
struct rb_root handle_refs;
atomic_t iovm_commit;
size_t iovm_limit;
struct mutex ref_lock;
bool super;
atomic_t count;
struct task_struct *task;
struct nvmap_carveout_commit carveout_commit[0];
};
/* handle_ref objects are client-local references to an nvmap_handle;
* they are distinct objects so that handles can be unpinned and
* unreferenced the correct number of times when a client abnormally
* terminates */
struct nvmap_handle_ref {
struct nvmap_handle *handle;
struct rb_node node;
atomic_t dupes; /* number of times to free on file close */
atomic_t pin; /* number of times to unpin on free */
};
struct nvmap_vma_priv {
struct nvmap_handle *handle;
size_t offs;
atomic_t count; /* number of processes cloning the VMA */
};
static inline void nvmap_ref_lock(struct nvmap_client *priv)
{
mutex_lock(&priv->ref_lock);
}
static inline void nvmap_ref_unlock(struct nvmap_client *priv)
{
mutex_unlock(&priv->ref_lock);
}
struct device *nvmap_client_to_device(struct nvmap_client *client);
pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
void nvmap_usecount_inc(struct nvmap_handle *h);
void nvmap_usecount_dec(struct nvmap_handle *h);
struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
size_t len, size_t align,
unsigned long usage,
unsigned int prot,
struct nvmap_handle *handle);
unsigned long nvmap_carveout_usage(struct nvmap_client *c,
struct nvmap_heap_block *b);
struct nvmap_carveout_node;
void nvmap_carveout_commit_add(struct nvmap_client *client,
struct nvmap_carveout_node *node, size_t len);
void nvmap_carveout_commit_subtract(struct nvmap_client *client,
struct nvmap_carveout_node *node,
size_t len);
struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
unsigned long handle);
struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
unsigned long id);
struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
unsigned long id);
struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
size_t size);
struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
unsigned long id);
int nvmap_alloc_handle_id(struct nvmap_client *client,
unsigned long id, unsigned int heap_mask,
size_t align, unsigned int flags);
void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
int nvmap_pin_ids(struct nvmap_client *client,
unsigned int nr, const unsigned long *ids);
void nvmap_unpin_ids(struct nvmap_client *priv,
unsigned int nr, const unsigned long *ids);
void _nvmap_handle_free(struct nvmap_handle *h);
int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
{
if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
pr_err("%s: %s getting a freed handle\n",
__func__, current->group_leader->comm);
if (atomic_read(&h->ref) <= 0)
return NULL;
}
return h;
}
static inline void nvmap_handle_put(struct nvmap_handle *h)
{
int cnt = atomic_dec_return(&h->ref);
if (WARN_ON(cnt < 0)) {
pr_err("%s: %s put to negative references\n",
__func__, current->comm);
} else if (cnt == 0)
_nvmap_handle_free(h);
}
static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
{
if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
return pgprot_dmacoherent(prot);
else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
return pgprot_writecombine(prot);
else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
return pgprot_inner_writeback(prot);
return prot;
}
int is_nvmap_vma(struct vm_area_struct *vma);
#endif
|