Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[~shefty/rdma-dev.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49
50 #define MM_UNUSED_TARGET 4
51
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54         struct drm_mm_node *child;
55
56         if (atomic)
57                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58         else
59                 child = kzalloc(sizeof(*child), GFP_KERNEL);
60
61         if (unlikely(child == NULL)) {
62                 spin_lock(&mm->unused_lock);
63                 if (list_empty(&mm->unused_nodes))
64                         child = NULL;
65                 else {
66                         child =
67                             list_entry(mm->unused_nodes.next,
68                                        struct drm_mm_node, node_list);
69                         list_del(&child->node_list);
70                         --mm->num_unused;
71                 }
72                 spin_unlock(&mm->unused_lock);
73         }
74         return child;
75 }
76
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78  * drm_mm:      memory manager struct we are pre-allocating for
79  *
80  * Returns 0 on success or -ENOMEM if allocation fails.
81  */
82 int drm_mm_pre_get(struct drm_mm *mm)
83 {
84         struct drm_mm_node *node;
85
86         spin_lock(&mm->unused_lock);
87         while (mm->num_unused < MM_UNUSED_TARGET) {
88                 spin_unlock(&mm->unused_lock);
89                 node = kzalloc(sizeof(*node), GFP_KERNEL);
90                 spin_lock(&mm->unused_lock);
91
92                 if (unlikely(node == NULL)) {
93                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94                         spin_unlock(&mm->unused_lock);
95                         return ret;
96                 }
97                 ++mm->num_unused;
98                 list_add_tail(&node->node_list, &mm->unused_nodes);
99         }
100         spin_unlock(&mm->unused_lock);
101         return 0;
102 }
103 EXPORT_SYMBOL(drm_mm_pre_get);
104
105 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106 {
107         return hole_node->start + hole_node->size;
108 }
109
110 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111 {
112         struct drm_mm_node *next_node =
113                 list_entry(hole_node->node_list.next, struct drm_mm_node,
114                            node_list);
115
116         return next_node->start;
117 }
118
119 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120                                  struct drm_mm_node *node,
121                                  unsigned long size, unsigned alignment,
122                                  unsigned long color)
123 {
124         struct drm_mm *mm = hole_node->mm;
125         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
127         unsigned long adj_start = hole_start;
128         unsigned long adj_end = hole_end;
129
130         BUG_ON(!hole_node->hole_follows || node->allocated);
131
132         if (mm->color_adjust)
133                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
134
135         if (alignment) {
136                 unsigned tmp = adj_start % alignment;
137                 if (tmp)
138                         adj_start += alignment - tmp;
139         }
140
141         if (adj_start == hole_start) {
142                 hole_node->hole_follows = 0;
143                 list_del(&hole_node->hole_stack);
144         }
145
146         node->start = adj_start;
147         node->size = size;
148         node->mm = mm;
149         node->color = color;
150         node->allocated = 1;
151
152         INIT_LIST_HEAD(&node->hole_stack);
153         list_add(&node->node_list, &hole_node->node_list);
154
155         BUG_ON(node->start + node->size > adj_end);
156
157         node->hole_follows = 0;
158         if (node->start + node->size < hole_end) {
159                 list_add(&node->hole_stack, &mm->hole_stack);
160                 node->hole_follows = 1;
161         }
162 }
163
164 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165                                              unsigned long size,
166                                              unsigned alignment,
167                                              unsigned long color,
168                                              int atomic)
169 {
170         struct drm_mm_node *node;
171
172         node = drm_mm_kmalloc(hole_node->mm, atomic);
173         if (unlikely(node == NULL))
174                 return NULL;
175
176         drm_mm_insert_helper(hole_node, node, size, alignment, color);
177
178         return node;
179 }
180 EXPORT_SYMBOL(drm_mm_get_block_generic);
181
182 /**
183  * Search for free space and insert a preallocated memory node. Returns
184  * -ENOSPC if no suitable free area is available. The preallocated memory node
185  * must be cleared.
186  */
187 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
188                                unsigned long size, unsigned alignment,
189                                unsigned long color)
190 {
191         struct drm_mm_node *hole_node;
192
193         hole_node = drm_mm_search_free_generic(mm, size, alignment,
194                                                color, 0);
195         if (!hole_node)
196                 return -ENOSPC;
197
198         drm_mm_insert_helper(hole_node, node, size, alignment, color);
199         return 0;
200 }
201 EXPORT_SYMBOL(drm_mm_insert_node_generic);
202
203 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
204                        unsigned long size, unsigned alignment)
205 {
206         return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
207 }
208 EXPORT_SYMBOL(drm_mm_insert_node);
209
210 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
211                                        struct drm_mm_node *node,
212                                        unsigned long size, unsigned alignment,
213                                        unsigned long color,
214                                        unsigned long start, unsigned long end)
215 {
216         struct drm_mm *mm = hole_node->mm;
217         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
218         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
219         unsigned long adj_start = hole_start;
220         unsigned long adj_end = hole_end;
221
222         BUG_ON(!hole_node->hole_follows || node->allocated);
223
224         if (mm->color_adjust)
225                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
226
227         if (adj_start < start)
228                 adj_start = start;
229
230         if (alignment) {
231                 unsigned tmp = adj_start % alignment;
232                 if (tmp)
233                         adj_start += alignment - tmp;
234         }
235
236         if (adj_start == hole_start) {
237                 hole_node->hole_follows = 0;
238                 list_del(&hole_node->hole_stack);
239         }
240
241         node->start = adj_start;
242         node->size = size;
243         node->mm = mm;
244         node->color = color;
245         node->allocated = 1;
246
247         INIT_LIST_HEAD(&node->hole_stack);
248         list_add(&node->node_list, &hole_node->node_list);
249
250         BUG_ON(node->start + node->size > adj_end);
251         BUG_ON(node->start + node->size > end);
252
253         node->hole_follows = 0;
254         if (node->start + node->size < hole_end) {
255                 list_add(&node->hole_stack, &mm->hole_stack);
256                 node->hole_follows = 1;
257         }
258 }
259
260 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
261                                                 unsigned long size,
262                                                 unsigned alignment,
263                                                 unsigned long color,
264                                                 unsigned long start,
265                                                 unsigned long end,
266                                                 int atomic)
267 {
268         struct drm_mm_node *node;
269
270         node = drm_mm_kmalloc(hole_node->mm, atomic);
271         if (unlikely(node == NULL))
272                 return NULL;
273
274         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
275                                    start, end);
276
277         return node;
278 }
279 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
280
281 /**
282  * Search for free space and insert a preallocated memory node. Returns
283  * -ENOSPC if no suitable free area is available. This is for range
284  * restricted allocations. The preallocated memory node must be cleared.
285  */
286 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
287                                         unsigned long size, unsigned alignment, unsigned long color,
288                                         unsigned long start, unsigned long end)
289 {
290         struct drm_mm_node *hole_node;
291
292         hole_node = drm_mm_search_free_in_range_generic(mm,
293                                                         size, alignment, color,
294                                                         start, end, 0);
295         if (!hole_node)
296                 return -ENOSPC;
297
298         drm_mm_insert_helper_range(hole_node, node,
299                                    size, alignment, color,
300                                    start, end);
301         return 0;
302 }
303 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
304
305 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
306                                 unsigned long size, unsigned alignment,
307                                 unsigned long start, unsigned long end)
308 {
309         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
310 }
311 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
312
313 /**
314  * Remove a memory node from the allocator.
315  */
316 void drm_mm_remove_node(struct drm_mm_node *node)
317 {
318         struct drm_mm *mm = node->mm;
319         struct drm_mm_node *prev_node;
320
321         BUG_ON(node->scanned_block || node->scanned_prev_free
322                                    || node->scanned_next_free);
323
324         prev_node =
325             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
326
327         if (node->hole_follows) {
328                 BUG_ON(drm_mm_hole_node_start(node)
329                                 == drm_mm_hole_node_end(node));
330                 list_del(&node->hole_stack);
331         } else
332                 BUG_ON(drm_mm_hole_node_start(node)
333                                 != drm_mm_hole_node_end(node));
334
335         if (!prev_node->hole_follows) {
336                 prev_node->hole_follows = 1;
337                 list_add(&prev_node->hole_stack, &mm->hole_stack);
338         } else
339                 list_move(&prev_node->hole_stack, &mm->hole_stack);
340
341         list_del(&node->node_list);
342         node->allocated = 0;
343 }
344 EXPORT_SYMBOL(drm_mm_remove_node);
345
346 /*
347  * Remove a memory node from the allocator and free the allocated struct
348  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
349  * drm_mm_get_block functions.
350  */
351 void drm_mm_put_block(struct drm_mm_node *node)
352 {
353
354         struct drm_mm *mm = node->mm;
355
356         drm_mm_remove_node(node);
357
358         spin_lock(&mm->unused_lock);
359         if (mm->num_unused < MM_UNUSED_TARGET) {
360                 list_add(&node->node_list, &mm->unused_nodes);
361                 ++mm->num_unused;
362         } else
363                 kfree(node);
364         spin_unlock(&mm->unused_lock);
365 }
366 EXPORT_SYMBOL(drm_mm_put_block);
367
368 static int check_free_hole(unsigned long start, unsigned long end,
369                            unsigned long size, unsigned alignment)
370 {
371         if (end - start < size)
372                 return 0;
373
374         if (alignment) {
375                 unsigned tmp = start % alignment;
376                 if (tmp)
377                         start += alignment - tmp;
378         }
379
380         return end >= start + size;
381 }
382
383 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
384                                                unsigned long size,
385                                                unsigned alignment,
386                                                unsigned long color,
387                                                bool best_match)
388 {
389         struct drm_mm_node *entry;
390         struct drm_mm_node *best;
391         unsigned long best_size;
392
393         BUG_ON(mm->scanned_blocks);
394
395         best = NULL;
396         best_size = ~0UL;
397
398         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
399                 unsigned long adj_start = drm_mm_hole_node_start(entry);
400                 unsigned long adj_end = drm_mm_hole_node_end(entry);
401
402                 if (mm->color_adjust) {
403                         mm->color_adjust(entry, color, &adj_start, &adj_end);
404                         if (adj_end <= adj_start)
405                                 continue;
406                 }
407
408                 BUG_ON(!entry->hole_follows);
409                 if (!check_free_hole(adj_start, adj_end, size, alignment))
410                         continue;
411
412                 if (!best_match)
413                         return entry;
414
415                 if (entry->size < best_size) {
416                         best = entry;
417                         best_size = entry->size;
418                 }
419         }
420
421         return best;
422 }
423 EXPORT_SYMBOL(drm_mm_search_free_generic);
424
425 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
426                                                         unsigned long size,
427                                                         unsigned alignment,
428                                                         unsigned long color,
429                                                         unsigned long start,
430                                                         unsigned long end,
431                                                         bool best_match)
432 {
433         struct drm_mm_node *entry;
434         struct drm_mm_node *best;
435         unsigned long best_size;
436
437         BUG_ON(mm->scanned_blocks);
438
439         best = NULL;
440         best_size = ~0UL;
441
442         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
443                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
444                         start : drm_mm_hole_node_start(entry);
445                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
446                         end : drm_mm_hole_node_end(entry);
447
448                 BUG_ON(!entry->hole_follows);
449
450                 if (mm->color_adjust) {
451                         mm->color_adjust(entry, color, &adj_start, &adj_end);
452                         if (adj_end <= adj_start)
453                                 continue;
454                 }
455
456                 if (!check_free_hole(adj_start, adj_end, size, alignment))
457                         continue;
458
459                 if (!best_match)
460                         return entry;
461
462                 if (entry->size < best_size) {
463                         best = entry;
464                         best_size = entry->size;
465                 }
466         }
467
468         return best;
469 }
470 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
471
472 /**
473  * Moves an allocation. To be used with embedded struct drm_mm_node.
474  */
475 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
476 {
477         list_replace(&old->node_list, &new->node_list);
478         list_replace(&old->hole_stack, &new->hole_stack);
479         new->hole_follows = old->hole_follows;
480         new->mm = old->mm;
481         new->start = old->start;
482         new->size = old->size;
483         new->color = old->color;
484
485         old->allocated = 0;
486         new->allocated = 1;
487 }
488 EXPORT_SYMBOL(drm_mm_replace_node);
489
490 /**
491  * Initializa lru scanning.
492  *
493  * This simply sets up the scanning routines with the parameters for the desired
494  * hole.
495  *
496  * Warning: As long as the scan list is non-empty, no other operations than
497  * adding/removing nodes to/from the scan list are allowed.
498  */
499 void drm_mm_init_scan(struct drm_mm *mm,
500                       unsigned long size,
501                       unsigned alignment,
502                       unsigned long color)
503 {
504         mm->scan_color = color;
505         mm->scan_alignment = alignment;
506         mm->scan_size = size;
507         mm->scanned_blocks = 0;
508         mm->scan_hit_start = 0;
509         mm->scan_hit_size = 0;
510         mm->scan_check_range = 0;
511         mm->prev_scanned_node = NULL;
512 }
513 EXPORT_SYMBOL(drm_mm_init_scan);
514
515 /**
516  * Initializa lru scanning.
517  *
518  * This simply sets up the scanning routines with the parameters for the desired
519  * hole. This version is for range-restricted scans.
520  *
521  * Warning: As long as the scan list is non-empty, no other operations than
522  * adding/removing nodes to/from the scan list are allowed.
523  */
524 void drm_mm_init_scan_with_range(struct drm_mm *mm,
525                                  unsigned long size,
526                                  unsigned alignment,
527                                  unsigned long color,
528                                  unsigned long start,
529                                  unsigned long end)
530 {
531         mm->scan_color = color;
532         mm->scan_alignment = alignment;
533         mm->scan_size = size;
534         mm->scanned_blocks = 0;
535         mm->scan_hit_start = 0;
536         mm->scan_hit_size = 0;
537         mm->scan_start = start;
538         mm->scan_end = end;
539         mm->scan_check_range = 1;
540         mm->prev_scanned_node = NULL;
541 }
542 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
543
544 /**
545  * Add a node to the scan list that might be freed to make space for the desired
546  * hole.
547  *
548  * Returns non-zero, if a hole has been found, zero otherwise.
549  */
550 int drm_mm_scan_add_block(struct drm_mm_node *node)
551 {
552         struct drm_mm *mm = node->mm;
553         struct drm_mm_node *prev_node;
554         unsigned long hole_start, hole_end;
555         unsigned long adj_start;
556         unsigned long adj_end;
557
558         mm->scanned_blocks++;
559
560         BUG_ON(node->scanned_block);
561         node->scanned_block = 1;
562
563         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
564                                node_list);
565
566         node->scanned_preceeds_hole = prev_node->hole_follows;
567         prev_node->hole_follows = 1;
568         list_del(&node->node_list);
569         node->node_list.prev = &prev_node->node_list;
570         node->node_list.next = &mm->prev_scanned_node->node_list;
571         mm->prev_scanned_node = node;
572
573         hole_start = drm_mm_hole_node_start(prev_node);
574         hole_end = drm_mm_hole_node_end(prev_node);
575
576         adj_start = hole_start;
577         adj_end = hole_end;
578
579         if (mm->color_adjust)
580                 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
581
582         if (mm->scan_check_range) {
583                 if (adj_start < mm->scan_start)
584                         adj_start = mm->scan_start;
585                 if (adj_end > mm->scan_end)
586                         adj_end = mm->scan_end;
587         }
588
589         if (check_free_hole(adj_start, adj_end,
590                             mm->scan_size, mm->scan_alignment)) {
591                 mm->scan_hit_start = hole_start;
592                 mm->scan_hit_size = hole_end;
593
594                 return 1;
595         }
596
597         return 0;
598 }
599 EXPORT_SYMBOL(drm_mm_scan_add_block);
600
601 /**
602  * Remove a node from the scan list.
603  *
604  * Nodes _must_ be removed in the exact same order from the scan list as they
605  * have been added, otherwise the internal state of the memory manager will be
606  * corrupted.
607  *
608  * When the scan list is empty, the selected memory nodes can be freed. An
609  * immediately following drm_mm_search_free with best_match = 0 will then return
610  * the just freed block (because its at the top of the free_stack list).
611  *
612  * Returns one if this block should be evicted, zero otherwise. Will always
613  * return zero when no hole has been found.
614  */
615 int drm_mm_scan_remove_block(struct drm_mm_node *node)
616 {
617         struct drm_mm *mm = node->mm;
618         struct drm_mm_node *prev_node;
619
620         mm->scanned_blocks--;
621
622         BUG_ON(!node->scanned_block);
623         node->scanned_block = 0;
624
625         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
626                                node_list);
627
628         prev_node->hole_follows = node->scanned_preceeds_hole;
629         INIT_LIST_HEAD(&node->node_list);
630         list_add(&node->node_list, &prev_node->node_list);
631
632         /* Only need to check for containement because start&size for the
633          * complete resulting free block (not just the desired part) is
634          * stored. */
635         if (node->start >= mm->scan_hit_start &&
636             node->start + node->size
637                         <= mm->scan_hit_start + mm->scan_hit_size) {
638                 return 1;
639         }
640
641         return 0;
642 }
643 EXPORT_SYMBOL(drm_mm_scan_remove_block);
644
645 int drm_mm_clean(struct drm_mm * mm)
646 {
647         struct list_head *head = &mm->head_node.node_list;
648
649         return (head->next->next == head);
650 }
651 EXPORT_SYMBOL(drm_mm_clean);
652
653 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
654 {
655         INIT_LIST_HEAD(&mm->hole_stack);
656         INIT_LIST_HEAD(&mm->unused_nodes);
657         mm->num_unused = 0;
658         mm->scanned_blocks = 0;
659         spin_lock_init(&mm->unused_lock);
660
661         /* Clever trick to avoid a special case in the free hole tracking. */
662         INIT_LIST_HEAD(&mm->head_node.node_list);
663         INIT_LIST_HEAD(&mm->head_node.hole_stack);
664         mm->head_node.hole_follows = 1;
665         mm->head_node.scanned_block = 0;
666         mm->head_node.scanned_prev_free = 0;
667         mm->head_node.scanned_next_free = 0;
668         mm->head_node.mm = mm;
669         mm->head_node.start = start + size;
670         mm->head_node.size = start - mm->head_node.start;
671         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
672
673         mm->color_adjust = NULL;
674
675         return 0;
676 }
677 EXPORT_SYMBOL(drm_mm_init);
678
679 void drm_mm_takedown(struct drm_mm * mm)
680 {
681         struct drm_mm_node *entry, *next;
682
683         if (!list_empty(&mm->head_node.node_list)) {
684                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
685                 return;
686         }
687
688         spin_lock(&mm->unused_lock);
689         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
690                 list_del(&entry->node_list);
691                 kfree(entry);
692                 --mm->num_unused;
693         }
694         spin_unlock(&mm->unused_lock);
695
696         BUG_ON(mm->num_unused != 0);
697 }
698 EXPORT_SYMBOL(drm_mm_takedown);
699
700 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
701 {
702         struct drm_mm_node *entry;
703         unsigned long total_used = 0, total_free = 0, total = 0;
704         unsigned long hole_start, hole_end, hole_size;
705
706         hole_start = drm_mm_hole_node_start(&mm->head_node);
707         hole_end = drm_mm_hole_node_end(&mm->head_node);
708         hole_size = hole_end - hole_start;
709         if (hole_size)
710                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
711                         prefix, hole_start, hole_end,
712                         hole_size);
713         total_free += hole_size;
714
715         drm_mm_for_each_node(entry, mm) {
716                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
717                         prefix, entry->start, entry->start + entry->size,
718                         entry->size);
719                 total_used += entry->size;
720
721                 if (entry->hole_follows) {
722                         hole_start = drm_mm_hole_node_start(entry);
723                         hole_end = drm_mm_hole_node_end(entry);
724                         hole_size = hole_end - hole_start;
725                         printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
726                                 prefix, hole_start, hole_end,
727                                 hole_size);
728                         total_free += hole_size;
729                 }
730         }
731         total = total_free + total_used;
732
733         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
734                 total_used, total_free);
735 }
736 EXPORT_SYMBOL(drm_mm_debug_table);
737
738 #if defined(CONFIG_DEBUG_FS)
739 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
740 {
741         struct drm_mm_node *entry;
742         unsigned long total_used = 0, total_free = 0, total = 0;
743         unsigned long hole_start, hole_end, hole_size;
744
745         hole_start = drm_mm_hole_node_start(&mm->head_node);
746         hole_end = drm_mm_hole_node_end(&mm->head_node);
747         hole_size = hole_end - hole_start;
748         if (hole_size)
749                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
750                                 hole_start, hole_end, hole_size);
751         total_free += hole_size;
752
753         drm_mm_for_each_node(entry, mm) {
754                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
755                                 entry->start, entry->start + entry->size,
756                                 entry->size);
757                 total_used += entry->size;
758                 if (entry->hole_follows) {
759                         hole_start = drm_mm_hole_node_start(entry);
760                         hole_end = drm_mm_hole_node_end(entry);
761                         hole_size = hole_end - hole_start;
762                         seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
763                                         hole_start, hole_end, hole_size);
764                         total_free += hole_size;
765                 }
766         }
767         total = total_free + total_used;
768
769         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
770         return 0;
771 }
772 EXPORT_SYMBOL(drm_mm_dump_table);
773 #endif