]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - drivers/gpu/drm/ttm/ttm_bo_util.c
d73d6e3e17b2927c6215213e872e155a3badbfab
[~shefty/rdma-dev.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42         ttm_bo_mem_put(bo, &bo->mem);
43 }
44
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46                     bool evict,
47                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49         struct ttm_tt *ttm = bo->ttm;
50         struct ttm_mem_reg *old_mem = &bo->mem;
51         int ret;
52
53         if (old_mem->mem_type != TTM_PL_SYSTEM) {
54                 ttm_tt_unbind(ttm);
55                 ttm_bo_free_old_node(bo);
56                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57                                 TTM_PL_MASK_MEM);
58                 old_mem->mem_type = TTM_PL_SYSTEM;
59         }
60
61         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62         if (unlikely(ret != 0))
63                 return ret;
64
65         if (new_mem->mem_type != TTM_PL_SYSTEM) {
66                 ret = ttm_tt_bind(ttm, new_mem);
67                 if (unlikely(ret != 0))
68                         return ret;
69         }
70
71         *old_mem = *new_mem;
72         new_mem->mm_node = NULL;
73
74         return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80         if (likely(man->io_reserve_fastpath))
81                 return 0;
82
83         if (interruptible)
84                 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86         mutex_lock(&man->io_reserve_mutex);
87         return 0;
88 }
89
90 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91 {
92         if (likely(man->io_reserve_fastpath))
93                 return;
94
95         mutex_unlock(&man->io_reserve_mutex);
96 }
97
98 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99 {
100         struct ttm_buffer_object *bo;
101
102         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103                 return -EAGAIN;
104
105         bo = list_first_entry(&man->io_reserve_lru,
106                               struct ttm_buffer_object,
107                               io_reserve_lru);
108         list_del_init(&bo->io_reserve_lru);
109         ttm_bo_unmap_virtual_locked(bo);
110
111         return 0;
112 }
113
114 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115                               struct ttm_mem_reg *mem)
116 {
117         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118         int ret = 0;
119
120         if (!bdev->driver->io_mem_reserve)
121                 return 0;
122         if (likely(man->io_reserve_fastpath))
123                 return bdev->driver->io_mem_reserve(bdev, mem);
124
125         if (bdev->driver->io_mem_reserve &&
126             mem->bus.io_reserved_count++ == 0) {
127 retry:
128                 ret = bdev->driver->io_mem_reserve(bdev, mem);
129                 if (ret == -EAGAIN) {
130                         ret = ttm_mem_io_evict(man);
131                         if (ret == 0)
132                                 goto retry;
133                 }
134         }
135         return ret;
136 }
137
138 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139                             struct ttm_mem_reg *mem)
140 {
141         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143         if (likely(man->io_reserve_fastpath))
144                 return;
145
146         if (bdev->driver->io_mem_reserve &&
147             --mem->bus.io_reserved_count == 0 &&
148             bdev->driver->io_mem_free)
149                 bdev->driver->io_mem_free(bdev, mem);
150
151 }
152
153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154 {
155         struct ttm_mem_reg *mem = &bo->mem;
156         int ret;
157
158         if (!mem->bus.io_reserved_vm) {
159                 struct ttm_mem_type_manager *man =
160                         &bo->bdev->man[mem->mem_type];
161
162                 ret = ttm_mem_io_reserve(bo->bdev, mem);
163                 if (unlikely(ret != 0))
164                         return ret;
165                 mem->bus.io_reserved_vm = true;
166                 if (man->use_io_reserve_lru)
167                         list_add_tail(&bo->io_reserve_lru,
168                                       &man->io_reserve_lru);
169         }
170         return 0;
171 }
172
173 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174 {
175         struct ttm_mem_reg *mem = &bo->mem;
176
177         if (mem->bus.io_reserved_vm) {
178                 mem->bus.io_reserved_vm = false;
179                 list_del_init(&bo->io_reserve_lru);
180                 ttm_mem_io_free(bo->bdev, mem);
181         }
182 }
183
184 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185                         void **virtual)
186 {
187         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188         int ret;
189         void *addr;
190
191         *virtual = NULL;
192         (void) ttm_mem_io_lock(man, false);
193         ret = ttm_mem_io_reserve(bdev, mem);
194         ttm_mem_io_unlock(man);
195         if (ret || !mem->bus.is_iomem)
196                 return ret;
197
198         if (mem->bus.addr) {
199                 addr = mem->bus.addr;
200         } else {
201                 if (mem->placement & TTM_PL_FLAG_WC)
202                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203                 else
204                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205                 if (!addr) {
206                         (void) ttm_mem_io_lock(man, false);
207                         ttm_mem_io_free(bdev, mem);
208                         ttm_mem_io_unlock(man);
209                         return -ENOMEM;
210                 }
211         }
212         *virtual = addr;
213         return 0;
214 }
215
216 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217                          void *virtual)
218 {
219         struct ttm_mem_type_manager *man;
220
221         man = &bdev->man[mem->mem_type];
222
223         if (virtual && mem->bus.addr == NULL)
224                 iounmap(virtual);
225         (void) ttm_mem_io_lock(man, false);
226         ttm_mem_io_free(bdev, mem);
227         ttm_mem_io_unlock(man);
228 }
229
230 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231 {
232         uint32_t *dstP =
233             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234         uint32_t *srcP =
235             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237         int i;
238         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239                 iowrite32(ioread32(srcP++), dstP++);
240         return 0;
241 }
242
243 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244                                 unsigned long page,
245                                 pgprot_t prot)
246 {
247         struct page *d = ttm->pages[page];
248         void *dst;
249
250         if (!d)
251                 return -ENOMEM;
252
253         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255 #ifdef CONFIG_X86
256         dst = kmap_atomic_prot(d, prot);
257 #else
258         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259                 dst = vmap(&d, 1, 0, prot);
260         else
261                 dst = kmap(d);
262 #endif
263         if (!dst)
264                 return -ENOMEM;
265
266         memcpy_fromio(dst, src, PAGE_SIZE);
267
268 #ifdef CONFIG_X86
269         kunmap_atomic(dst);
270 #else
271         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272                 vunmap(dst);
273         else
274                 kunmap(d);
275 #endif
276
277         return 0;
278 }
279
280 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281                                 unsigned long page,
282                                 pgprot_t prot)
283 {
284         struct page *s = ttm->pages[page];
285         void *src;
286
287         if (!s)
288                 return -ENOMEM;
289
290         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291 #ifdef CONFIG_X86
292         src = kmap_atomic_prot(s, prot);
293 #else
294         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295                 src = vmap(&s, 1, 0, prot);
296         else
297                 src = kmap(s);
298 #endif
299         if (!src)
300                 return -ENOMEM;
301
302         memcpy_toio(dst, src, PAGE_SIZE);
303
304 #ifdef CONFIG_X86
305         kunmap_atomic(src);
306 #else
307         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308                 vunmap(src);
309         else
310                 kunmap(s);
311 #endif
312
313         return 0;
314 }
315
316 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317                        bool evict, bool no_wait_gpu,
318                        struct ttm_mem_reg *new_mem)
319 {
320         struct ttm_bo_device *bdev = bo->bdev;
321         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322         struct ttm_tt *ttm = bo->ttm;
323         struct ttm_mem_reg *old_mem = &bo->mem;
324         struct ttm_mem_reg old_copy = *old_mem;
325         void *old_iomap;
326         void *new_iomap;
327         int ret;
328         unsigned long i;
329         unsigned long page;
330         unsigned long add = 0;
331         int dir;
332
333         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334         if (ret)
335                 return ret;
336         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337         if (ret)
338                 goto out;
339
340         if (old_iomap == NULL && new_iomap == NULL)
341                 goto out2;
342         if (old_iomap == NULL && ttm == NULL)
343                 goto out2;
344
345         if (ttm->state == tt_unpopulated) {
346                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347                 if (ret)
348                         goto out1;
349         }
350
351         add = 0;
352         dir = 1;
353
354         if ((old_mem->mem_type == new_mem->mem_type) &&
355             (new_mem->start < old_mem->start + old_mem->size)) {
356                 dir = -1;
357                 add = new_mem->num_pages - 1;
358         }
359
360         for (i = 0; i < new_mem->num_pages; ++i) {
361                 page = i * dir + add;
362                 if (old_iomap == NULL) {
363                         pgprot_t prot = ttm_io_prot(old_mem->placement,
364                                                     PAGE_KERNEL);
365                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
366                                                    prot);
367                 } else if (new_iomap == NULL) {
368                         pgprot_t prot = ttm_io_prot(new_mem->placement,
369                                                     PAGE_KERNEL);
370                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
371                                                    prot);
372                 } else
373                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
374                 if (ret)
375                         goto out1;
376         }
377         mb();
378 out2:
379         old_copy = *old_mem;
380         *old_mem = *new_mem;
381         new_mem->mm_node = NULL;
382
383         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
384                 ttm_tt_unbind(ttm);
385                 ttm_tt_destroy(ttm);
386                 bo->ttm = NULL;
387         }
388
389 out1:
390         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
391 out:
392         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
393         ttm_bo_mem_put(bo, &old_copy);
394         return ret;
395 }
396 EXPORT_SYMBOL(ttm_bo_move_memcpy);
397
398 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
399 {
400         kfree(bo);
401 }
402
403 /**
404  * ttm_buffer_object_transfer
405  *
406  * @bo: A pointer to a struct ttm_buffer_object.
407  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
408  * holding the data of @bo with the old placement.
409  *
410  * This is a utility function that may be called after an accelerated move
411  * has been scheduled. A new buffer object is created as a placeholder for
412  * the old data while it's being copied. When that buffer object is idle,
413  * it can be destroyed, releasing the space of the old placement.
414  * Returns:
415  * !0: Failure.
416  */
417
418 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
419                                       struct ttm_buffer_object **new_obj)
420 {
421         struct ttm_buffer_object *fbo;
422         struct ttm_bo_device *bdev = bo->bdev;
423         struct ttm_bo_driver *driver = bdev->driver;
424
425         fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
426         if (!fbo)
427                 return -ENOMEM;
428
429         *fbo = *bo;
430
431         /**
432          * Fix up members that we shouldn't copy directly:
433          * TODO: Explicit member copy would probably be better here.
434          */
435
436         init_waitqueue_head(&fbo->event_queue);
437         INIT_LIST_HEAD(&fbo->ddestroy);
438         INIT_LIST_HEAD(&fbo->lru);
439         INIT_LIST_HEAD(&fbo->swap);
440         INIT_LIST_HEAD(&fbo->io_reserve_lru);
441         fbo->vm_node = NULL;
442         atomic_set(&fbo->cpu_writers, 0);
443
444         fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
445         kref_init(&fbo->list_kref);
446         kref_init(&fbo->kref);
447         fbo->destroy = &ttm_transfered_destroy;
448         fbo->acc_size = 0;
449
450         *new_obj = fbo;
451         return 0;
452 }
453
454 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
455 {
456 #if defined(__i386__) || defined(__x86_64__)
457         if (caching_flags & TTM_PL_FLAG_WC)
458                 tmp = pgprot_writecombine(tmp);
459         else if (boot_cpu_data.x86 > 3)
460                 tmp = pgprot_noncached(tmp);
461
462 #elif defined(__powerpc__)
463         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
464                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
465                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
466                         pgprot_val(tmp) |= _PAGE_GUARDED;
467         }
468 #endif
469 #if defined(__ia64__)
470         if (caching_flags & TTM_PL_FLAG_WC)
471                 tmp = pgprot_writecombine(tmp);
472         else
473                 tmp = pgprot_noncached(tmp);
474 #endif
475 #if defined(__sparc__) || defined(__mips__)
476         if (!(caching_flags & TTM_PL_FLAG_CACHED))
477                 tmp = pgprot_noncached(tmp);
478 #endif
479         return tmp;
480 }
481 EXPORT_SYMBOL(ttm_io_prot);
482
483 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
484                           unsigned long offset,
485                           unsigned long size,
486                           struct ttm_bo_kmap_obj *map)
487 {
488         struct ttm_mem_reg *mem = &bo->mem;
489
490         if (bo->mem.bus.addr) {
491                 map->bo_kmap_type = ttm_bo_map_premapped;
492                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
493         } else {
494                 map->bo_kmap_type = ttm_bo_map_iomap;
495                 if (mem->placement & TTM_PL_FLAG_WC)
496                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
497                                                   size);
498                 else
499                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
500                                                        size);
501         }
502         return (!map->virtual) ? -ENOMEM : 0;
503 }
504
505 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
506                            unsigned long start_page,
507                            unsigned long num_pages,
508                            struct ttm_bo_kmap_obj *map)
509 {
510         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
511         struct ttm_tt *ttm = bo->ttm;
512         int ret;
513
514         BUG_ON(!ttm);
515
516         if (ttm->state == tt_unpopulated) {
517                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
518                 if (ret)
519                         return ret;
520         }
521
522         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
523                 /*
524                  * We're mapping a single page, and the desired
525                  * page protection is consistent with the bo.
526                  */
527
528                 map->bo_kmap_type = ttm_bo_map_kmap;
529                 map->page = ttm->pages[start_page];
530                 map->virtual = kmap(map->page);
531         } else {
532                 /*
533                  * We need to use vmap to get the desired page protection
534                  * or to make the buffer object look contiguous.
535                  */
536                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
537                         PAGE_KERNEL :
538                         ttm_io_prot(mem->placement, PAGE_KERNEL);
539                 map->bo_kmap_type = ttm_bo_map_vmap;
540                 map->virtual = vmap(ttm->pages + start_page, num_pages,
541                                     0, prot);
542         }
543         return (!map->virtual) ? -ENOMEM : 0;
544 }
545
546 int ttm_bo_kmap(struct ttm_buffer_object *bo,
547                 unsigned long start_page, unsigned long num_pages,
548                 struct ttm_bo_kmap_obj *map)
549 {
550         struct ttm_mem_type_manager *man =
551                 &bo->bdev->man[bo->mem.mem_type];
552         unsigned long offset, size;
553         int ret;
554
555         BUG_ON(!list_empty(&bo->swap));
556         map->virtual = NULL;
557         map->bo = bo;
558         if (num_pages > bo->num_pages)
559                 return -EINVAL;
560         if (start_page > bo->num_pages)
561                 return -EINVAL;
562 #if 0
563         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
564                 return -EPERM;
565 #endif
566         (void) ttm_mem_io_lock(man, false);
567         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
568         ttm_mem_io_unlock(man);
569         if (ret)
570                 return ret;
571         if (!bo->mem.bus.is_iomem) {
572                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
573         } else {
574                 offset = start_page << PAGE_SHIFT;
575                 size = num_pages << PAGE_SHIFT;
576                 return ttm_bo_ioremap(bo, offset, size, map);
577         }
578 }
579 EXPORT_SYMBOL(ttm_bo_kmap);
580
581 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
582 {
583         struct ttm_buffer_object *bo = map->bo;
584         struct ttm_mem_type_manager *man =
585                 &bo->bdev->man[bo->mem.mem_type];
586
587         if (!map->virtual)
588                 return;
589         switch (map->bo_kmap_type) {
590         case ttm_bo_map_iomap:
591                 iounmap(map->virtual);
592                 break;
593         case ttm_bo_map_vmap:
594                 vunmap(map->virtual);
595                 break;
596         case ttm_bo_map_kmap:
597                 kunmap(map->page);
598                 break;
599         case ttm_bo_map_premapped:
600                 break;
601         default:
602                 BUG();
603         }
604         (void) ttm_mem_io_lock(man, false);
605         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
606         ttm_mem_io_unlock(man);
607         map->virtual = NULL;
608         map->page = NULL;
609 }
610 EXPORT_SYMBOL(ttm_bo_kunmap);
611
612 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
613                               void *sync_obj,
614                               bool evict,
615                               bool no_wait_gpu,
616                               struct ttm_mem_reg *new_mem)
617 {
618         struct ttm_bo_device *bdev = bo->bdev;
619         struct ttm_bo_driver *driver = bdev->driver;
620         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
621         struct ttm_mem_reg *old_mem = &bo->mem;
622         int ret;
623         struct ttm_buffer_object *ghost_obj;
624         void *tmp_obj = NULL;
625
626         spin_lock(&bdev->fence_lock);
627         if (bo->sync_obj) {
628                 tmp_obj = bo->sync_obj;
629                 bo->sync_obj = NULL;
630         }
631         bo->sync_obj = driver->sync_obj_ref(sync_obj);
632         if (evict) {
633                 ret = ttm_bo_wait(bo, false, false, false);
634                 spin_unlock(&bdev->fence_lock);
635                 if (tmp_obj)
636                         driver->sync_obj_unref(&tmp_obj);
637                 if (ret)
638                         return ret;
639
640                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
641                     (bo->ttm != NULL)) {
642                         ttm_tt_unbind(bo->ttm);
643                         ttm_tt_destroy(bo->ttm);
644                         bo->ttm = NULL;
645                 }
646                 ttm_bo_free_old_node(bo);
647         } else {
648                 /**
649                  * This should help pipeline ordinary buffer moves.
650                  *
651                  * Hang old buffer memory on a new buffer object,
652                  * and leave it to be released when the GPU
653                  * operation has completed.
654                  */
655
656                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
657
658                 /* ttm_buffer_object_transfer accesses bo->sync_obj */
659                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
660                 spin_unlock(&bdev->fence_lock);
661                 if (tmp_obj)
662                         driver->sync_obj_unref(&tmp_obj);
663
664                 if (ret)
665                         return ret;
666
667                 /**
668                  * If we're not moving to fixed memory, the TTM object
669                  * needs to stay alive. Otherwhise hang it on the ghost
670                  * bo to be unbound and destroyed.
671                  */
672
673                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
674                         ghost_obj->ttm = NULL;
675                 else
676                         bo->ttm = NULL;
677
678                 ttm_bo_unreserve(ghost_obj);
679                 ttm_bo_unref(&ghost_obj);
680         }
681
682         *old_mem = *new_mem;
683         new_mem->mm_node = NULL;
684
685         return 0;
686 }
687 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);