| File: | dev/pci/drm/dma-resv.c |
| Warning: | line 529, column 26 Array access (from variable 'shared') results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) | |||
| 3 | * | |||
| 4 | * Based on bo.c which bears the following copyright notice, | |||
| 5 | * but is dual licensed: | |||
| 6 | * | |||
| 7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |||
| 8 | * All Rights Reserved. | |||
| 9 | * | |||
| 10 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 11 | * copy of this software and associated documentation files (the | |||
| 12 | * "Software"), to deal in the Software without restriction, including | |||
| 13 | * without limitation the rights to use, copy, modify, merge, publish, | |||
| 14 | * distribute, sub license, and/or sell copies of the Software, and to | |||
| 15 | * permit persons to whom the Software is furnished to do so, subject to | |||
| 16 | * the following conditions: | |||
| 17 | * | |||
| 18 | * The above copyright notice and this permission notice (including the | |||
| 19 | * next paragraph) shall be included in all copies or substantial portions | |||
| 20 | * of the Software. | |||
| 21 | * | |||
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |||
| 25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |||
| 26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |||
| 27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |||
| 28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| 29 | * | |||
| 30 | **************************************************************************/ | |||
| 31 | /* | |||
| 32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |||
| 33 | */ | |||
| 34 | ||||
| 35 | #include <linux/dma-resv.h> | |||
| 36 | #include <linux/export.h> | |||
| 37 | #include <linux/sched/mm.h> | |||
| 38 | ||||
| 39 | /** | |||
| 40 | * DOC: Reservation Object Overview | |||
| 41 | * | |||
| 42 | * The reservation object provides a mechanism to manage shared and | |||
| 43 | * exclusive fences associated with a buffer. A reservation object | |||
| 44 | * can have attached one exclusive fence (normally associated with | |||
| 45 | * write operations) or N shared fences (read operations). The RCU | |||
| 46 | * mechanism is used to protect read access to fences from locked | |||
| 47 | * write-side updates. | |||
| 48 | */ | |||
| 49 | ||||
| 50 | DEFINE_WD_CLASS(reservation_ww_class)struct ww_class reservation_ww_class = { .stamp = 0, .name = "reservation_ww_class" }; | |||
| 51 | EXPORT_SYMBOL(reservation_ww_class); | |||
| 52 | ||||
| 53 | struct lock_class_key reservation_seqcount_class; | |||
| 54 | EXPORT_SYMBOL(reservation_seqcount_class); | |||
| 55 | ||||
| 56 | const char reservation_seqcount_string[] = "reservation_seqcount"; | |||
| 57 | EXPORT_SYMBOL(reservation_seqcount_string); | |||
| 58 | ||||
| 59 | /** | |||
| 60 | * dma_resv_list_alloc - allocate fence list | |||
| 61 | * @shared_max: number of fences we need space for | |||
| 62 | * | |||
| 63 | * Allocate a new dma_resv_list and make sure to correctly initialize | |||
| 64 | * shared_max. | |||
| 65 | */ | |||
| 66 | static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) | |||
| 67 | { | |||
| 68 | struct dma_resv_list *list; | |||
| 69 | ||||
| 70 | list = kmalloc(offsetof(typeof(*list), shared[shared_max])__builtin_offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 71 | if (!list) | |||
| 72 | return NULL((void *)0); | |||
| 73 | ||||
| 74 | #ifdef __linux__ | |||
| 75 | list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)__builtin_offsetof(typeof(*list), shared)) / | |||
| 76 | sizeof(*list->shared); | |||
| 77 | #else | |||
| 78 | list->shared_max = (offsetof(typeof(*list), shared[shared_max])__builtin_offsetof(typeof(*list), shared[shared_max]) - | |||
| 79 | offsetof(typeof(*list), shared)__builtin_offsetof(typeof(*list), shared)) / sizeof(*list->shared); | |||
| 80 | #endif | |||
| 81 | ||||
| 82 | return list; | |||
| 83 | } | |||
| 84 | ||||
| 85 | /** | |||
| 86 | * dma_resv_list_free - free fence list | |||
| 87 | * @list: list to free | |||
| 88 | * | |||
| 89 | * Free a dma_resv_list and make sure to drop all references. | |||
| 90 | */ | |||
| 91 | static void dma_resv_list_free(struct dma_resv_list *list) | |||
| 92 | { | |||
| 93 | unsigned int i; | |||
| 94 | ||||
| 95 | if (!list) | |||
| 96 | return; | |||
| 97 | ||||
| 98 | for (i = 0; i < list->shared_count; ++i) | |||
| 99 | dma_fence_put(rcu_dereference_protected(list->shared[i], true)(list->shared[i])); | |||
| 100 | ||||
| 101 | kfree_rcu(list, rcu)do { free((void *)list, 145, 0); } while(0); | |||
| 102 | } | |||
| 103 | ||||
| 104 | #if IS_ENABLED(CONFIG_LOCKDEP)0 | |||
| 105 | static int __init dma_resv_lockdep(void) | |||
| 106 | { | |||
| 107 | struct mm_struct *mm = mm_alloc(); | |||
| 108 | struct ww_acquire_ctx ctx; | |||
| 109 | struct dma_resv obj; | |||
| 110 | int ret; | |||
| 111 | ||||
| 112 | if (!mm) | |||
| 113 | return -ENOMEM12; | |||
| 114 | ||||
| 115 | dma_resv_init(&obj); | |||
| 116 | ||||
| 117 | down_read(&mm->mmap_sem)rw_enter_read(&mm->mmap_sem); | |||
| 118 | ww_acquire_init(&ctx, &reservation_ww_class); | |||
| 119 | ret = dma_resv_lock(&obj, &ctx); | |||
| 120 | if (ret == -EDEADLK11) | |||
| 121 | dma_resv_lock_slow(&obj, &ctx); | |||
| 122 | fs_reclaim_acquire(GFP_KERNEL(0x0001 | 0x0004)); | |||
| 123 | fs_reclaim_release(GFP_KERNEL(0x0001 | 0x0004)); | |||
| 124 | ww_mutex_unlock(&obj.lock); | |||
| 125 | ww_acquire_fini(&ctx); | |||
| 126 | up_read(&mm->mmap_sem)rw_exit_read(&mm->mmap_sem); | |||
| 127 | ||||
| 128 | mmput(mm); | |||
| 129 | ||||
| 130 | return 0; | |||
| 131 | } | |||
| 132 | subsys_initcall(dma_resv_lockdep); | |||
| 133 | #endif | |||
| 134 | ||||
| 135 | /** | |||
| 136 | * dma_resv_init - initialize a reservation object | |||
| 137 | * @obj: the reservation object | |||
| 138 | */ | |||
| 139 | void dma_resv_init(struct dma_resv *obj) | |||
| 140 | { | |||
| 141 | ww_mutex_init(&obj->lock, &reservation_ww_class); | |||
| 142 | ||||
| 143 | __seqcount_init(&obj->seq, reservation_seqcount_string, | |||
| 144 | &reservation_seqcount_class); | |||
| 145 | RCU_INIT_POINTER(obj->fence, NULL)do { (obj->fence) = (((void *)0)); } while(0); | |||
| 146 | RCU_INIT_POINTER(obj->fence_excl, NULL)do { (obj->fence_excl) = (((void *)0)); } while(0); | |||
| 147 | } | |||
| 148 | EXPORT_SYMBOL(dma_resv_init); | |||
| 149 | ||||
| 150 | /** | |||
| 151 | * dma_resv_fini - destroys a reservation object | |||
| 152 | * @obj: the reservation object | |||
| 153 | */ | |||
| 154 | void dma_resv_fini(struct dma_resv *obj) | |||
| 155 | { | |||
| 156 | struct dma_resv_list *fobj; | |||
| 157 | struct dma_fence *excl; | |||
| 158 | ||||
| 159 | /* | |||
| 160 | * This object should be dead and all references must have | |||
| 161 | * been released to it, so no need to be protected with rcu. | |||
| 162 | */ | |||
| 163 | excl = rcu_dereference_protected(obj->fence_excl, 1)(obj->fence_excl); | |||
| 164 | if (excl) | |||
| 165 | dma_fence_put(excl); | |||
| 166 | ||||
| 167 | fobj = rcu_dereference_protected(obj->fence, 1)(obj->fence); | |||
| 168 | dma_resv_list_free(fobj); | |||
| 169 | ww_mutex_destroy(&obj->lock); | |||
| 170 | } | |||
| 171 | EXPORT_SYMBOL(dma_resv_fini); | |||
| 172 | ||||
| 173 | /** | |||
| 174 | * dma_resv_reserve_shared - Reserve space to add shared fences to | |||
| 175 | * a dma_resv. | |||
| 176 | * @obj: reservation object | |||
| 177 | * @num_fences: number of fences we want to add | |||
| 178 | * | |||
| 179 | * Should be called before dma_resv_add_shared_fence(). Must | |||
| 180 | * be called with obj->lock held. | |||
| 181 | * | |||
| 182 | * RETURNS | |||
| 183 | * Zero for success, or -errno | |||
| 184 | */ | |||
| 185 | int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) | |||
| 186 | { | |||
| 187 | struct dma_resv_list *old, *new; | |||
| 188 | unsigned int i, j, k, max; | |||
| 189 | ||||
| 190 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
| 191 | ||||
| 192 | old = dma_resv_get_list(obj); | |||
| 193 | ||||
| 194 | if (old && old->shared_max) { | |||
| 195 | if ((old->shared_count + num_fences) <= old->shared_max) | |||
| 196 | return 0; | |||
| 197 | else | |||
| 198 | max = max(old->shared_count + num_fences,(((old->shared_count + num_fences)>(old->shared_max * 2))?(old->shared_count + num_fences):(old->shared_max * 2)) | |||
| 199 | old->shared_max * 2)(((old->shared_count + num_fences)>(old->shared_max * 2))?(old->shared_count + num_fences):(old->shared_max * 2)); | |||
| 200 | } else { | |||
| 201 | max = 4; | |||
| 202 | } | |||
| 203 | ||||
| 204 | new = dma_resv_list_alloc(max); | |||
| 205 | if (!new) | |||
| 206 | return -ENOMEM12; | |||
| 207 | ||||
| 208 | /* | |||
| 209 | * no need to bump fence refcounts, rcu_read access | |||
| 210 | * requires the use of kref_get_unless_zero, and the | |||
| 211 | * references from the old struct are carried over to | |||
| 212 | * the new. | |||
| 213 | */ | |||
| 214 | for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { | |||
| 215 | struct dma_fence *fence; | |||
| 216 | ||||
| 217 | fence = rcu_dereference_protected(old->shared[i],(old->shared[i]) | |||
| 218 | dma_resv_held(obj))(old->shared[i]); | |||
| 219 | if (dma_fence_is_signaled(fence)) | |||
| 220 | RCU_INIT_POINTER(new->shared[--k], fence)do { (new->shared[--k]) = (fence); } while(0); | |||
| 221 | else | |||
| 222 | RCU_INIT_POINTER(new->shared[j++], fence)do { (new->shared[j++]) = (fence); } while(0); | |||
| 223 | } | |||
| 224 | new->shared_count = j; | |||
| 225 | ||||
| 226 | /* | |||
| 227 | * We are not changing the effective set of fences here so can | |||
| 228 | * merely update the pointer to the new array; both existing | |||
| 229 | * readers and new readers will see exactly the same set of | |||
| 230 | * active (unsignaled) shared fences. Individual fences and the | |||
| 231 | * old array are protected by RCU and so will not vanish under | |||
| 232 | * the gaze of the rcu_read_lock() readers. | |||
| 233 | */ | |||
| 234 | rcu_assign_pointer(obj->fence, new)do { (obj->fence) = (new); } while(0); | |||
| 235 | ||||
| 236 | if (!old) | |||
| 237 | return 0; | |||
| 238 | ||||
| 239 | /* Drop the references to the signaled fences */ | |||
| 240 | for (i = k; i < max; ++i) { | |||
| 241 | struct dma_fence *fence; | |||
| 242 | ||||
| 243 | fence = rcu_dereference_protected(new->shared[i],(new->shared[i]) | |||
| 244 | dma_resv_held(obj))(new->shared[i]); | |||
| 245 | dma_fence_put(fence); | |||
| 246 | } | |||
| 247 | kfree_rcu(old, rcu)do { free((void *)old, 145, 0); } while(0); | |||
| 248 | ||||
| 249 | return 0; | |||
| 250 | } | |||
| 251 | EXPORT_SYMBOL(dma_resv_reserve_shared); | |||
| 252 | ||||
| 253 | /** | |||
| 254 | * dma_resv_add_shared_fence - Add a fence to a shared slot | |||
| 255 | * @obj: the reservation object | |||
| 256 | * @fence: the shared fence to add | |||
| 257 | * | |||
| 258 | * Add a fence to a shared slot, obj->lock must be held, and | |||
| 259 | * dma_resv_reserve_shared() has been called. | |||
| 260 | */ | |||
| 261 | void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) | |||
| 262 | { | |||
| 263 | struct dma_resv_list *fobj; | |||
| 264 | struct dma_fence *old; | |||
| 265 | unsigned int i, count; | |||
| 266 | ||||
| 267 | dma_fence_get(fence); | |||
| 268 | ||||
| 269 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
| 270 | ||||
| 271 | fobj = dma_resv_get_list(obj); | |||
| 272 | count = fobj->shared_count; | |||
| 273 | ||||
| 274 | preempt_disable(); | |||
| 275 | write_seqcount_begin(&obj->seq); | |||
| 276 | ||||
| 277 | for (i = 0; i < count; ++i) { | |||
| 278 | ||||
| 279 | old = rcu_dereference_protected(fobj->shared[i],(fobj->shared[i]) | |||
| 280 | dma_resv_held(obj))(fobj->shared[i]); | |||
| 281 | if (old->context == fence->context || | |||
| 282 | dma_fence_is_signaled(old)) | |||
| 283 | goto replace; | |||
| 284 | } | |||
| 285 | ||||
| 286 | BUG_ON(fobj->shared_count >= fobj->shared_max)((!(fobj->shared_count >= fobj->shared_max)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/dma-resv.c" , 286, "!(fobj->shared_count >= fobj->shared_max)")); | |||
| 287 | old = NULL((void *)0); | |||
| 288 | count++; | |||
| 289 | ||||
| 290 | replace: | |||
| 291 | RCU_INIT_POINTER(fobj->shared[i], fence)do { (fobj->shared[i]) = (fence); } while(0); | |||
| 292 | /* pointer update must be visible before we extend the shared_count */ | |||
| 293 | smp_store_mb(fobj->shared_count, count)do { fobj->shared_count = count; do { __asm volatile("mfence" ::: "memory"); } while (0); } while (0); | |||
| 294 | ||||
| 295 | write_seqcount_end(&obj->seq); | |||
| 296 | preempt_enable(); | |||
| 297 | dma_fence_put(old); | |||
| 298 | } | |||
| 299 | EXPORT_SYMBOL(dma_resv_add_shared_fence); | |||
| 300 | ||||
| 301 | /** | |||
| 302 | * dma_resv_add_excl_fence - Add an exclusive fence. | |||
| 303 | * @obj: the reservation object | |||
| 304 | * @fence: the shared fence to add | |||
| 305 | * | |||
| 306 | * Add a fence to the exclusive slot. The obj->lock must be held. | |||
| 307 | */ | |||
| 308 | void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) | |||
| 309 | { | |||
| 310 | struct dma_fence *old_fence = dma_resv_get_excl(obj); | |||
| 311 | struct dma_resv_list *old; | |||
| 312 | u32 i = 0; | |||
| 313 | ||||
| 314 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
| 315 | ||||
| 316 | old = dma_resv_get_list(obj); | |||
| 317 | if (old) | |||
| 318 | i = old->shared_count; | |||
| 319 | ||||
| 320 | if (fence) | |||
| 321 | dma_fence_get(fence); | |||
| 322 | ||||
| 323 | preempt_disable(); | |||
| 324 | write_seqcount_begin(&obj->seq); | |||
| 325 | /* write_seqcount_begin provides the necessary memory barrier */ | |||
| 326 | RCU_INIT_POINTER(obj->fence_excl, fence)do { (obj->fence_excl) = (fence); } while(0); | |||
| 327 | if (old) | |||
| 328 | old->shared_count = 0; | |||
| 329 | write_seqcount_end(&obj->seq); | |||
| 330 | preempt_enable(); | |||
| 331 | ||||
| 332 | /* inplace update, no shared fences */ | |||
| 333 | while (i--) | |||
| 334 | dma_fence_put(rcu_dereference_protected(old->shared[i],(old->shared[i]) | |||
| 335 | dma_resv_held(obj))(old->shared[i])); | |||
| 336 | ||||
| 337 | dma_fence_put(old_fence); | |||
| 338 | } | |||
| 339 | EXPORT_SYMBOL(dma_resv_add_excl_fence); | |||
| 340 | ||||
| 341 | /** | |||
| 342 | * dma_resv_copy_fences - Copy all fences from src to dst. | |||
| 343 | * @dst: the destination reservation object | |||
| 344 | * @src: the source reservation object | |||
| 345 | * | |||
| 346 | * Copy all fences from src to dst. dst-lock must be held. | |||
| 347 | */ | |||
| 348 | int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) | |||
| 349 | { | |||
| 350 | struct dma_resv_list *src_list, *dst_list; | |||
| 351 | struct dma_fence *old, *new; | |||
| 352 | unsigned i; | |||
| 353 | ||||
| 354 | dma_resv_assert_held(dst)do { (void)(&(dst)->lock.base); } while(0); | |||
| 355 | ||||
| 356 | rcu_read_lock(); | |||
| 357 | src_list = rcu_dereference(src->fence)(src->fence); | |||
| 358 | ||||
| 359 | retry: | |||
| 360 | if (src_list) { | |||
| 361 | unsigned shared_count = src_list->shared_count; | |||
| 362 | ||||
| 363 | rcu_read_unlock(); | |||
| 364 | ||||
| 365 | dst_list = dma_resv_list_alloc(shared_count); | |||
| 366 | if (!dst_list) | |||
| 367 | return -ENOMEM12; | |||
| 368 | ||||
| 369 | rcu_read_lock(); | |||
| 370 | src_list = rcu_dereference(src->fence)(src->fence); | |||
| 371 | if (!src_list || src_list->shared_count > shared_count) { | |||
| 372 | kfree(dst_list); | |||
| 373 | goto retry; | |||
| 374 | } | |||
| 375 | ||||
| 376 | dst_list->shared_count = 0; | |||
| 377 | for (i = 0; i < src_list->shared_count; ++i) { | |||
| 378 | struct dma_fence *fence; | |||
| 379 | ||||
| 380 | fence = rcu_dereference(src_list->shared[i])(src_list->shared[i]); | |||
| 381 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, | |||
| 382 | &fence->flags)) | |||
| 383 | continue; | |||
| 384 | ||||
| 385 | if (!dma_fence_get_rcu(fence)) { | |||
| 386 | dma_resv_list_free(dst_list); | |||
| 387 | src_list = rcu_dereference(src->fence)(src->fence); | |||
| 388 | goto retry; | |||
| 389 | } | |||
| 390 | ||||
| 391 | if (dma_fence_is_signaled(fence)) { | |||
| 392 | dma_fence_put(fence); | |||
| 393 | continue; | |||
| 394 | } | |||
| 395 | ||||
| 396 | rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence)do { (dst_list->shared[dst_list->shared_count++]) = (fence ); } while(0); | |||
| 397 | } | |||
| 398 | } else { | |||
| 399 | dst_list = NULL((void *)0); | |||
| 400 | } | |||
| 401 | ||||
| 402 | new = dma_fence_get_rcu_safe(&src->fence_excl); | |||
| 403 | rcu_read_unlock(); | |||
| 404 | ||||
| 405 | src_list = dma_resv_get_list(dst); | |||
| 406 | old = dma_resv_get_excl(dst); | |||
| 407 | ||||
| 408 | preempt_disable(); | |||
| 409 | write_seqcount_begin(&dst->seq); | |||
| 410 | /* write_seqcount_begin provides the necessary memory barrier */ | |||
| 411 | RCU_INIT_POINTER(dst->fence_excl, new)do { (dst->fence_excl) = (new); } while(0); | |||
| 412 | RCU_INIT_POINTER(dst->fence, dst_list)do { (dst->fence) = (dst_list); } while(0); | |||
| 413 | write_seqcount_end(&dst->seq); | |||
| 414 | preempt_enable(); | |||
| 415 | ||||
| 416 | dma_resv_list_free(src_list); | |||
| 417 | dma_fence_put(old); | |||
| 418 | ||||
| 419 | return 0; | |||
| 420 | } | |||
| 421 | EXPORT_SYMBOL(dma_resv_copy_fences); | |||
| 422 | ||||
| 423 | /** | |||
| 424 | * dma_resv_get_fences_rcu - Get an object's shared and exclusive | |||
| 425 | * fences without update side lock held | |||
| 426 | * @obj: the reservation object | |||
| 427 | * @pfence_excl: the returned exclusive fence (or NULL) | |||
| 428 | * @pshared_count: the number of shared fences returned | |||
| 429 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to | |||
| 430 | * the required size, and must be freed by caller) | |||
| 431 | * | |||
| 432 | * Retrieve all fences from the reservation object. If the pointer for the | |||
| 433 | * exclusive fence is not specified the fence is put into the array of the | |||
| 434 | * shared fences as well. Returns either zero or -ENOMEM. | |||
| 435 | */ | |||
| 436 | int dma_resv_get_fences_rcu(struct dma_resv *obj, | |||
| 437 | struct dma_fence **pfence_excl, | |||
| 438 | unsigned *pshared_count, | |||
| 439 | struct dma_fence ***pshared) | |||
| 440 | { | |||
| 441 | struct dma_fence **shared = NULL((void *)0); | |||
| ||||
| 442 | struct dma_fence *fence_excl; | |||
| 443 | unsigned int shared_count; | |||
| 444 | int ret = 1; | |||
| 445 | ||||
| 446 | do { | |||
| 447 | struct dma_resv_list *fobj; | |||
| 448 | unsigned int i, seq; | |||
| 449 | size_t sz = 0; | |||
| 450 | ||||
| 451 | shared_count = i = 0; | |||
| 452 | ||||
| 453 | rcu_read_lock(); | |||
| 454 | seq = read_seqcount_begin(&obj->seq); | |||
| 455 | ||||
| 456 | fence_excl = rcu_dereference(obj->fence_excl)(obj->fence_excl); | |||
| 457 | if (fence_excl && !dma_fence_get_rcu(fence_excl)) | |||
| 458 | goto unlock; | |||
| 459 | ||||
| 460 | fobj = rcu_dereference(obj->fence)(obj->fence); | |||
| 461 | if (fobj) | |||
| 462 | sz += sizeof(*shared) * fobj->shared_max; | |||
| 463 | ||||
| 464 | if (!pfence_excl && fence_excl
| |||
| 465 | sz += sizeof(*shared); | |||
| 466 | ||||
| 467 | if (sz) { | |||
| 468 | struct dma_fence **nshared; | |||
| 469 | ||||
| 470 | #ifdef __linux__ | |||
| 471 | nshared = krealloc(shared, sz, | |||
| 472 | GFP_NOWAIT0x0002 | __GFP_NOWARN0); | |||
| 473 | #else | |||
| 474 | nshared = kmalloc(sz, GFP_NOWAIT0x0002 | __GFP_NOWARN0); | |||
| 475 | if (nshared != NULL((void *)0) && shared != NULL((void *)0)) | |||
| 476 | memcpy(nshared, shared, sz)__builtin_memcpy((nshared), (shared), (sz)); | |||
| 477 | if (nshared) { | |||
| 478 | kfree(shared); | |||
| 479 | shared = NULL((void *)0); | |||
| 480 | } | |||
| 481 | #endif | |||
| 482 | if (!nshared) { | |||
| 483 | rcu_read_unlock(); | |||
| 484 | ||||
| 485 | dma_fence_put(fence_excl); | |||
| 486 | fence_excl = NULL((void *)0); | |||
| 487 | ||||
| 488 | #ifdef __linux__ | |||
| 489 | nshared = krealloc(shared, sz, GFP_KERNEL(0x0001 | 0x0004)); | |||
| 490 | #else | |||
| 491 | nshared = kmalloc(sz, GFP_KERNEL(0x0001 | 0x0004)); | |||
| 492 | if (nshared != NULL((void *)0) && shared != NULL((void *)0)) | |||
| 493 | memcpy(nshared, shared, sz)__builtin_memcpy((nshared), (shared), (sz)); | |||
| 494 | kfree(shared); | |||
| 495 | shared = NULL((void *)0); | |||
| 496 | #endif | |||
| 497 | if (nshared) { | |||
| 498 | shared = nshared; | |||
| 499 | continue; | |||
| 500 | } | |||
| 501 | ||||
| 502 | ret = -ENOMEM12; | |||
| 503 | break; | |||
| 504 | } | |||
| 505 | shared = nshared; | |||
| 506 | shared_count = fobj ? fobj->shared_count : 0; | |||
| 507 | for (i = 0; i < shared_count; ++i) { | |||
| 508 | shared[i] = rcu_dereference(fobj->shared[i])(fobj->shared[i]); | |||
| 509 | if (!dma_fence_get_rcu(shared[i])) | |||
| 510 | break; | |||
| 511 | } | |||
| 512 | } | |||
| 513 | ||||
| 514 | if (i
| |||
| 515 | while (i--) | |||
| 516 | dma_fence_put(shared[i]); | |||
| 517 | dma_fence_put(fence_excl); | |||
| 518 | goto unlock; | |||
| 519 | } | |||
| 520 | ||||
| 521 | ret = 0; | |||
| 522 | unlock: | |||
| 523 | rcu_read_unlock(); | |||
| 524 | } while (ret); | |||
| 525 | ||||
| 526 | if (pfence_excl
| |||
| 527 | *pfence_excl = fence_excl; | |||
| 528 | else if (fence_excl
| |||
| 529 | shared[shared_count++] = fence_excl; | |||
| ||||
| 530 | ||||
| 531 | if (!shared_count) { | |||
| 532 | kfree(shared); | |||
| 533 | shared = NULL((void *)0); | |||
| 534 | } | |||
| 535 | ||||
| 536 | *pshared_count = shared_count; | |||
| 537 | *pshared = shared; | |||
| 538 | return ret; | |||
| 539 | } | |||
| 540 | EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); | |||
| 541 | ||||
| 542 | /** | |||
| 543 | * dma_resv_wait_timeout_rcu - Wait on reservation's objects | |||
| 544 | * shared and/or exclusive fences. | |||
| 545 | * @obj: the reservation object | |||
| 546 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | |||
| 547 | * @intr: if true, do interruptible wait | |||
| 548 | * @timeout: timeout value in jiffies or zero to return immediately | |||
| 549 | * | |||
| 550 | * RETURNS | |||
| 551 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | |||
| 552 | * greater than zer on success. | |||
| 553 | */ | |||
| 554 | long dma_resv_wait_timeout_rcu(struct dma_resv *obj, | |||
| 555 | bool_Bool wait_all, bool_Bool intr, | |||
| 556 | unsigned long timeout) | |||
| 557 | { | |||
| 558 | struct dma_fence *fence; | |||
| 559 | unsigned seq, shared_count; | |||
| 560 | long ret = timeout ? timeout : 1; | |||
| 561 | int i; | |||
| 562 | ||||
| 563 | retry: | |||
| 564 | shared_count = 0; | |||
| 565 | seq = read_seqcount_begin(&obj->seq); | |||
| 566 | rcu_read_lock(); | |||
| 567 | i = -1; | |||
| 568 | ||||
| 569 | fence = rcu_dereference(obj->fence_excl)(obj->fence_excl); | |||
| 570 | if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | |||
| 571 | if (!dma_fence_get_rcu(fence)) | |||
| 572 | goto unlock_retry; | |||
| 573 | ||||
| 574 | if (dma_fence_is_signaled(fence)) { | |||
| 575 | dma_fence_put(fence); | |||
| 576 | fence = NULL((void *)0); | |||
| 577 | } | |||
| 578 | ||||
| 579 | } else { | |||
| 580 | fence = NULL((void *)0); | |||
| 581 | } | |||
| 582 | ||||
| 583 | if (wait_all) { | |||
| 584 | struct dma_resv_list *fobj = rcu_dereference(obj->fence)(obj->fence); | |||
| 585 | ||||
| 586 | if (fobj) | |||
| 587 | shared_count = fobj->shared_count; | |||
| 588 | ||||
| 589 | for (i = 0; !fence && i < shared_count; ++i) { | |||
| 590 | struct dma_fence *lfence = rcu_dereference(fobj->shared[i])(fobj->shared[i]); | |||
| 591 | ||||
| 592 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, | |||
| 593 | &lfence->flags)) | |||
| 594 | continue; | |||
| 595 | ||||
| 596 | if (!dma_fence_get_rcu(lfence)) | |||
| 597 | goto unlock_retry; | |||
| 598 | ||||
| 599 | if (dma_fence_is_signaled(lfence)) { | |||
| 600 | dma_fence_put(lfence); | |||
| 601 | continue; | |||
| 602 | } | |||
| 603 | ||||
| 604 | fence = lfence; | |||
| 605 | break; | |||
| 606 | } | |||
| 607 | } | |||
| 608 | ||||
| 609 | rcu_read_unlock(); | |||
| 610 | if (fence) { | |||
| 611 | if (read_seqcount_retry(&obj->seq, seq)) { | |||
| 612 | dma_fence_put(fence); | |||
| 613 | goto retry; | |||
| 614 | } | |||
| 615 | ||||
| 616 | ret = dma_fence_wait_timeout(fence, intr, ret); | |||
| 617 | dma_fence_put(fence); | |||
| 618 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | |||
| 619 | goto retry; | |||
| 620 | } | |||
| 621 | return ret; | |||
| 622 | ||||
| 623 | unlock_retry: | |||
| 624 | rcu_read_unlock(); | |||
| 625 | goto retry; | |||
| 626 | } | |||
| 627 | EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); | |||
| 628 | ||||
| 629 | ||||
| 630 | static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) | |||
| 631 | { | |||
| 632 | struct dma_fence *fence, *lfence = passed_fence; | |||
| 633 | int ret = 1; | |||
| 634 | ||||
| 635 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | |||
| 636 | fence = dma_fence_get_rcu(lfence); | |||
| 637 | if (!fence) | |||
| 638 | return -1; | |||
| 639 | ||||
| 640 | ret = !!dma_fence_is_signaled(fence); | |||
| 641 | dma_fence_put(fence); | |||
| 642 | } | |||
| 643 | return ret; | |||
| 644 | } | |||
| 645 | ||||
| 646 | /** | |||
| 647 | * dma_resv_test_signaled_rcu - Test if a reservation object's | |||
| 648 | * fences have been signaled. | |||
| 649 | * @obj: the reservation object | |||
| 650 | * @test_all: if true, test all fences, otherwise only test the exclusive | |||
| 651 | * fence | |||
| 652 | * | |||
| 653 | * RETURNS | |||
| 654 | * true if all fences signaled, else false | |||
| 655 | */ | |||
| 656 | bool_Bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool_Bool test_all) | |||
| 657 | { | |||
| 658 | unsigned seq, shared_count; | |||
| 659 | int ret; | |||
| 660 | ||||
| 661 | rcu_read_lock(); | |||
| 662 | retry: | |||
| 663 | ret = true1; | |||
| 664 | shared_count = 0; | |||
| 665 | seq = read_seqcount_begin(&obj->seq); | |||
| 666 | ||||
| 667 | if (test_all) { | |||
| 668 | unsigned i; | |||
| 669 | ||||
| 670 | struct dma_resv_list *fobj = rcu_dereference(obj->fence)(obj->fence); | |||
| 671 | ||||
| 672 | if (fobj) | |||
| 673 | shared_count = fobj->shared_count; | |||
| 674 | ||||
| 675 | for (i = 0; i < shared_count; ++i) { | |||
| 676 | struct dma_fence *fence = rcu_dereference(fobj->shared[i])(fobj->shared[i]); | |||
| 677 | ||||
| 678 | ret = dma_resv_test_signaled_single(fence); | |||
| 679 | if (ret < 0) | |||
| 680 | goto retry; | |||
| 681 | else if (!ret) | |||
| 682 | break; | |||
| 683 | } | |||
| 684 | ||||
| 685 | if (read_seqcount_retry(&obj->seq, seq)) | |||
| 686 | goto retry; | |||
| 687 | } | |||
| 688 | ||||
| 689 | if (!shared_count) { | |||
| 690 | struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl)(obj->fence_excl); | |||
| 691 | ||||
| 692 | if (fence_excl) { | |||
| 693 | ret = dma_resv_test_signaled_single(fence_excl); | |||
| 694 | if (ret < 0) | |||
| 695 | goto retry; | |||
| 696 | ||||
| 697 | if (read_seqcount_retry(&obj->seq, seq)) | |||
| 698 | goto retry; | |||
| 699 | } | |||
| 700 | } | |||
| 701 | ||||
| 702 | rcu_read_unlock(); | |||
| 703 | return ret; | |||
| 704 | } | |||
| 705 | EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); |