7d56537becce413a59159fcff41926bcbd8d57b4
[mirror/scst/.git] / scst / src / scst_mem.c
1 /*
2  *  scst_mem.c
3  *
4  *  Copyright (C) 2006 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2007 Krzysztof Blaszkowski <kb@sysmikro.com.pl>
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29
30 #include "scst.h"
31 #include "scst_priv.h"
32 #include "scst_mem.h"
33
34 #define PURGE_INTERVAL          (60 * HZ)
35 #define PURGE_TIME_AFTER        PURGE_INTERVAL
36 #define SHRINK_TIME_AFTER       (1 * HZ)
37
38 static struct scst_sgv_pools_manager sgv_pools_mgr;
39
40 void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
41 {
42         tgt_dev->gfp_mask = __GFP_NOWARN;
43         tgt_dev->pool = &sgv_pools_mgr.default_set.norm;
44 }
45
46 void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
47 {
48         TRACE_MEM("%s", "Use clustering");
49         tgt_dev->gfp_mask = __GFP_NOWARN;
50         tgt_dev->pool = &sgv_pools_mgr.default_set.norm_clust;
51 }
52
53 void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
54 {
55         TRACE_MEM("%s", "Use ISA DMA memory");
56         tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
57         tgt_dev->pool = &sgv_pools_mgr.default_set.dma;
58 }
59
60 static int scst_check_clustering(struct scatterlist *sg, int cur, int hint)
61 {
62         int res = -1;
63         int i = hint;
64         unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
65         int len_cur = sg[cur].length;
66         unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
67         int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
68         unsigned long pfn, pfn_next, full_page;
69
70 #if 0
71         TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
72                 pfn_cur, pfn_cur_next, len_cur, full_page_cur);
73 #endif
74
75         /* check the hint first */
76         if (i >= 0) {
77                 pfn = page_to_pfn(sg_page(&sg[i]));
78                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
79                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
80
81                 if ((pfn == pfn_cur_next) && full_page_cur)
82                         goto out_head;
83
84                 if ((pfn_next == pfn_cur) && full_page)
85                         goto out_tail;
86         }
87
88         /* ToDo: implement more intelligent search */
89         for (i = cur - 1; i >= 0; i--) {
90                 pfn = page_to_pfn(sg_page(&sg[i]));
91                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
92                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
93
94                 if ((pfn == pfn_cur_next) && full_page_cur)
95                         goto out_head;
96
97                 if ((pfn_next == pfn_cur) && full_page)
98                         goto out_tail;
99         }
100
101 out:
102         return res;
103
104 out_tail:
105         TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
106         sg[i].length += len_cur;
107         sg_clear(&sg[cur]);
108         res = i;
109         goto out;
110
111 out_head:
112         TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
113         sg_assign_page(&sg[i], sg_page(&sg[cur]));
114         sg[i].length += len_cur;
115         sg_clear(&sg[cur]);
116         res = i;
117         goto out;
118 }
119
120 static void scst_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
121         void *priv)
122 {
123         int i;
124
125         TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
126
127         for (i = 0; i < sg_count; i++) {
128                 struct page *p = sg_page(&sg[i]);
129                 int len = sg[i].length;
130                 int pages =
131                         (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
132
133                 TRACE_MEM("page %lx, len %d, pages %d",
134                         (unsigned long)p, len, pages);
135
136                 while (pages > 0) {
137                         int order = 0;
138
139 /*
140  * __free_pages() doesn't like freeing pages with not that order with
141  * which they were allocated, so disable this small optimization.
142  */
143 #if 0
144                         if (len > 0) {
145                                 while (((1 << order) << PAGE_SHIFT) < len)
146                                         order++;
147                                 len = 0;
148                         }
149 #endif
150                         TRACE_MEM("free_pages(): order %d, page %lx",
151                                 order, (unsigned long)p);
152
153                         __free_pages(p, order);
154
155                         pages -= 1 << order;
156                         p += 1 << order;
157                 }
158         }
159 }
160
161 static struct page *scst_alloc_sys_pages(struct scatterlist *sg,
162         gfp_t gfp_mask, void *priv)
163 {
164         struct page *page = alloc_pages(gfp_mask, 0);
165
166         sg_set_page(sg, page, PAGE_SIZE, 0);
167         TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
168         if (page == NULL) {
169                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
170                         "sg page failed");
171         }
172         return page;
173 }
174
175 static int scst_alloc_sg_entries(struct scatterlist *sg, int pages,
176         gfp_t gfp_mask, int clustered, struct trans_tbl_ent *trans_tbl,
177         const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
178 {
179         int sg_count = 0;
180         int pg, i, j;
181         int merged = -1;
182
183         TRACE_MEM("pages=%d, clustered=%d", pages, clustered);
184
185 #if 0
186         gfp_mask |= __GFP_COLD;
187 #endif
188 #ifdef CONFIG_SCST_STRICT_SECURITY
189         gfp_mask |= __GFP_ZERO;
190 #endif
191
192         for (pg = 0; pg < pages; pg++) {
193                 void *rc;
194 #ifdef CONFIG_SCST_DEBUG_OOM
195                 if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
196                     ((scst_random() % 10000) == 55))
197                         rc = NULL;
198                 else
199 #endif
200                         rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
201                                 priv);
202                 if (rc == NULL)
203                         goto out_no_mem;
204                 if (clustered) {
205                         merged = scst_check_clustering(sg, sg_count, merged);
206                         if (merged == -1)
207                                 sg_count++;
208                 } else
209                         sg_count++;
210                 TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
211                         sg_count);
212         }
213
214         if (clustered && (trans_tbl != NULL)) {
215                 pg = 0;
216                 for (i = 0; i < pages; i++) {
217                         int n = (sg[i].length >> PAGE_SHIFT) +
218                                 ((sg[i].length & ~PAGE_MASK) != 0);
219                         trans_tbl[i].pg_count = pg;
220                         for (j = 0; j < n; j++)
221                                 trans_tbl[pg++].sg_num = i+1;
222                         TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
223                                 trans_tbl[i].pg_count);
224                 }
225         }
226
227 out:
228         TRACE_MEM("sg_count=%d", sg_count);
229         return sg_count;
230
231 out_no_mem:
232         alloc_fns->free_pages_fn(sg, sg_count, priv);
233         sg_count = 0;
234         goto out;
235 }
236
237 static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
238         int pages_to_alloc, int order, gfp_t gfp_mask)
239 {
240         int sz, tsz = 0;
241         int res = 0;
242
243         TRACE_ENTRY();
244
245         sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
246
247         obj->sg_entries = kmalloc(sz, gfp_mask);
248         if (unlikely(obj->sg_entries == NULL)) {
249                 TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
250                         "SG vector failed (size %d)", sz);
251                 res = -ENOMEM;
252                 goto out;
253         }
254
255         sg_init_table(obj->sg_entries, pages_to_alloc);
256
257         if (obj->owner_pool->clustered) {
258                 if (order <= sgv_pools_mgr.sgv_max_trans_order) {
259                         obj->trans_tbl = (struct trans_tbl_ent *)obj->sg_entries_data;
260                         /*
261                          * No need to clear trans_tbl, if needed, it will be
262                          * fully rewritten in scst_alloc_sg_entries()
263                          */
264                 } else {
265                         tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
266                         obj->trans_tbl = kzalloc(tsz, gfp_mask);
267                         if (unlikely(obj->trans_tbl == NULL)) {
268                                 TRACE(TRACE_OUT_OF_MEM, "Allocation of trans_tbl "
269                                         "failed (size %d)", tsz);
270                                 res = -ENOMEM;
271                                 goto out_free;
272                         }
273                 }
274         }
275
276         TRACE_MEM("pages_to_alloc %d, order %d, sz %d, tsz %d, obj %p, "
277                 "sg_entries %p, trans_tbl %p", pages_to_alloc, order,
278                 sz, tsz, obj, obj->sg_entries, obj->trans_tbl);
279
280 out:
281         TRACE_EXIT_RES(res);
282         return res;
283
284 out_free:
285         kfree(obj->sg_entries);
286         obj->sg_entries = NULL;
287         goto out;
288 }
289
290 static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
291 {
292         if (obj->sg_count != 0) {
293                 obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
294                         obj->sg_count, obj->allocator_priv);
295         }
296         if (obj->sg_entries != obj->sg_entries_data) {
297                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
298                         /* kfree() handles NULL parameter */
299                         kfree(obj->trans_tbl);
300                         obj->trans_tbl = NULL;
301                 }
302                 kfree(obj->sg_entries);
303         }
304
305         kmem_cache_free(obj->owner_pool->caches[obj->order_or_pages], obj);
306         return;
307 }
308
309 static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
310         int order, gfp_t gfp_mask)
311 {
312         struct sgv_pool_obj *obj;
313         int pages = 1 << order;
314
315         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
316         if (likely(!list_empty(&pool->recycling_lists[order]))) {
317                 obj = list_entry(pool->recycling_lists[order].next,
318                          struct sgv_pool_obj,
319                         recycle_entry.recycling_list_entry);
320
321                 list_del(&obj->recycle_entry.sorted_recycling_list_entry);
322                 list_del(&obj->recycle_entry.recycling_list_entry);
323
324                 sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
325                 sgv_pools_mgr.mgr.throttle.active_pages_total += pages;
326
327                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
328
329                 EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
330                 goto out;
331         }
332
333         pool->acc.cached_entries++;
334         pool->acc.cached_pages += pages;
335
336         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
337
338         obj = kmem_cache_alloc(pool->caches[order],
339                 gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
340         if (likely(obj)) {
341                 memset(obj, 0, sizeof(*obj));
342                 obj->order_or_pages = order;
343                 obj->owner_pool = pool;
344         } else {
345                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
346                 pool->acc.cached_entries--;
347                 pool->acc.cached_pages -= pages;
348                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
349         }
350
351 out:
352         return obj;
353 }
354
355 static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
356 {
357         struct sgv_pool *owner = sgv->owner_pool;
358         struct list_head *entry;
359         struct list_head *list = &owner->recycling_lists[sgv->order_or_pages];
360         int sched = 0;
361         int pages = 1 << sgv->order_or_pages;
362
363         EXTRACHECKS_BUG_ON(sgv->order_or_pages < 0);
364
365         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
366
367         TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order_or_pages,
368                 sgv->sg_count);
369
370         if (owner->clustered) {
371                 /* Make objects with less entries more preferred */
372                 __list_for_each(entry, list) {
373                         struct sgv_pool_obj *tmp = list_entry(entry,
374                                 struct sgv_pool_obj,
375                                 recycle_entry.recycling_list_entry);
376                         TRACE_DBG("tmp %p, order %d, sg_count %d", tmp,
377                                 tmp->order_or_pages, tmp->sg_count);
378                         if (sgv->sg_count <= tmp->sg_count)
379                                 break;
380                 }
381                 entry = entry->prev;
382         } else
383                 entry = list;
384
385         TRACE_DBG("Adding in %p (list %p)", entry, list);
386         list_add(&sgv->recycle_entry.recycling_list_entry, entry);
387
388         list_add_tail(&sgv->recycle_entry.sorted_recycling_list_entry,
389                 &sgv_pools_mgr.mgr.sorted_recycling_list);
390
391         sgv->recycle_entry.time_stamp = jiffies;
392
393         sgv_pools_mgr.mgr.throttle.inactive_pages_total += pages;
394         sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
395
396         if (!sgv_pools_mgr.mgr.pitbool_running) {
397                 sgv_pools_mgr.mgr.pitbool_running = 1;
398                 sched = 1;
399         }
400
401         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
402
403         if (sched)
404                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
405                         PURGE_INTERVAL);
406 }
407
408 /* Must be called under pool_mgr_lock held */
409 static void __sgv_pool_cached_purge(struct sgv_pool_obj *e)
410 {
411         int pages = 1 << e->order_or_pages;
412
413         list_del(&e->recycle_entry.sorted_recycling_list_entry);
414         list_del(&e->recycle_entry.recycling_list_entry);
415         e->owner_pool->acc.cached_entries--;
416         e->owner_pool->acc.cached_pages -= pages;
417         sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
418
419         return;
420 }
421
422 /* Must be called under pool_mgr_lock held */
423 static int sgv_pool_cached_purge(struct sgv_pool_obj *e, int t,
424         unsigned long rt)
425 {
426         EXTRACHECKS_BUG_ON(t == 0);
427
428         if (time_after(rt, (e->recycle_entry.time_stamp + t))) {
429                 __sgv_pool_cached_purge(e);
430                 return 0;
431         }
432         return 1;
433 }
434
435 /* Called under pool_mgr_lock held, but drops/reaquires it inside */
436 static int sgv_pool_oom_free_objs(int pgs)
437 {
438         TRACE_MEM("Shrinking pools about %d pages", pgs);
439         while ((sgv_pools_mgr.mgr.throttle.inactive_pages_total >
440                         sgv_pools_mgr.mgr.throttle.lo_wmk) &&
441               (pgs > 0)) {
442                 struct sgv_pool_obj *e;
443
444                 sBUG_ON(list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list));
445
446                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
447                                struct sgv_pool_obj,
448                                recycle_entry.sorted_recycling_list_entry);
449
450                 __sgv_pool_cached_purge(e);
451                 pgs -= 1 << e->order_or_pages;
452
453                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
454                 sgv_dtor_and_free(e);
455                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
456         }
457
458         TRACE_MEM("Pages remaining %d ", pgs);
459         return pgs;
460 }
461
462 static int sgv_pool_hiwmk_check(int pages_to_alloc)
463 {
464         int res = 0;
465         int pages = pages_to_alloc;
466
467         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
468
469         pages += sgv_pools_mgr.mgr.throttle.active_pages_total;
470         pages += sgv_pools_mgr.mgr.throttle.inactive_pages_total;
471
472         if (unlikely((u32)pages > sgv_pools_mgr.mgr.throttle.hi_wmk)) {
473                 pages -= sgv_pools_mgr.mgr.throttle.hi_wmk;
474                 sgv_pools_mgr.mgr.throttle.releases_on_hiwmk++;
475
476                 pages = sgv_pool_oom_free_objs(pages);
477                 if (pages > 0) {
478                         TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
479                             "memory (%d pages) for being executed "
480                             "commands together with the already "
481                             "allocated memory exceeds the allowed "
482                             "maximum %d. Should you increase "
483                             "scst_max_cmd_mem?", pages_to_alloc,
484                            sgv_pools_mgr.mgr.throttle.hi_wmk);
485                         sgv_pools_mgr.mgr.throttle.releases_failed++;
486                         res = -ENOMEM;
487                         goto out_unlock;
488                 }
489         }
490
491         sgv_pools_mgr.mgr.throttle.active_pages_total += pages_to_alloc;
492
493 out_unlock:
494         TRACE_MEM("pages_to_alloc %d, new active %d", pages_to_alloc,
495                 sgv_pools_mgr.mgr.throttle.active_pages_total);
496
497         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
498         return res;
499 }
500
501 static void sgv_pool_hiwmk_uncheck(int pages)
502 {
503         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
504         sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
505         TRACE_MEM("pages %d, new active %d", pages,
506                 sgv_pools_mgr.mgr.throttle.active_pages_total);
507         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
508         return;
509 }
510
511 static bool scst_check_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
512 {
513         int alloced;
514         bool res = true;
515
516         alloced = atomic_add_return(pages, &mem_lim->alloced_pages);
517         if (unlikely(alloced > mem_lim->max_allowed_pages)) {
518                 TRACE(TRACE_OUT_OF_MEM, "Requested amount of memory "
519                         "(%d pages) for being executed commands on a device "
520                         "together with the already allocated memory exceeds "
521                         "the allowed maximum %d. Should you increase "
522                         "scst_max_dev_cmd_mem?", pages,
523                         mem_lim->max_allowed_pages);
524                 atomic_sub(pages, &mem_lim->alloced_pages);
525                 res = false;
526         }
527
528         TRACE_MEM("mem_lim %p, pages %d, res %d, new alloced %d", mem_lim,
529                 pages, res, atomic_read(&mem_lim->alloced_pages));
530
531         return res;
532 }
533
534 static void scst_uncheck_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
535 {
536         atomic_sub(pages, &mem_lim->alloced_pages);
537
538         TRACE_MEM("mem_lim %p, pages %d, new alloced %d", mem_lim,
539                 pages, atomic_read(&mem_lim->alloced_pages));
540         return;
541 }
542
543 struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
544         gfp_t gfp_mask, int flags, int *count,
545         struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
546 {
547         struct sgv_pool_obj *obj;
548         int order, pages, cnt;
549         struct scatterlist *res = NULL;
550         int pages_to_alloc;
551         struct kmem_cache *cache;
552         int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
553         bool allowed_mem_checked = false, hiwmk_checked = false;
554
555         TRACE_ENTRY();
556
557         if (unlikely(size == 0))
558                 goto out;
559
560         sBUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
561
562         pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
563         order = get_order(size);
564
565         TRACE_MEM("size=%d, pages=%d, order=%d, flags=%x, *sgv %p", size, pages,
566                 order, flags, *sgv);
567
568         if (*sgv != NULL) {
569                 obj = *sgv;
570                 pages_to_alloc = (1 << order);
571                 cache = pool->caches[obj->order_or_pages];
572
573                 TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj,
574                         obj->order_or_pages);
575
576                 EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
577                 EXTRACHECKS_BUG_ON(obj->sg_count != 0);
578
579                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
580                         goto out_fail_free_sg_entries;
581                 allowed_mem_checked = true;
582
583                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
584                         goto out_fail_free_sg_entries;
585                 hiwmk_checked = true;
586         } else if ((order < SGV_POOL_ELEMENTS) && !no_cached) {
587                 pages_to_alloc = (1 << order);
588                 cache = pool->caches[order];
589
590                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
591                         goto out_fail;
592                 allowed_mem_checked = true;
593
594                 obj = sgv_pool_cached_get(pool, order, gfp_mask);
595                 if (unlikely(obj == NULL)) {
596                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
597                                 "sgv_pool_obj failed (size %d)", size);
598                         goto out_fail;
599                 }
600
601                 if (obj->sg_count != 0) {
602                         TRACE_MEM("Cached sgv_obj %p", obj);
603                         EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
604                         atomic_inc(&pool->cache_acc[order].hit_alloc);
605                         goto success;
606                 }
607
608                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) {
609                         if (!(flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
610                                 goto out_fail_free;
611                 }
612
613                 TRACE_MEM("Brand new sgv_obj %p", obj);
614
615                 if (order <= sgv_pools_mgr.sgv_max_local_order) {
616                         obj->sg_entries = obj->sg_entries_data;
617                         sg_init_table(obj->sg_entries, pages_to_alloc);
618                         TRACE_MEM("sg_entries %p", obj->sg_entries);
619                         if (pool->clustered) {
620                                 obj->trans_tbl = (struct trans_tbl_ent *)
621                                         (obj->sg_entries + pages_to_alloc);
622                                 TRACE_MEM("trans_tbl %p", obj->trans_tbl);
623                                 /*
624                                  * No need to clear trans_tbl, if needed, it will
625                                  * be fully rewritten in scst_alloc_sg_entries()
626                                  */
627                         }
628                 } else {
629                         if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
630                                         order, gfp_mask) != 0))
631                                 goto out_fail_free;
632                 }
633
634                 if ((flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) &&
635                     (flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
636                         goto out_return;
637
638                 obj->allocator_priv = priv;
639
640                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
641                         goto out_fail_free_sg_entries;
642                 hiwmk_checked = true;
643         } else {
644                 int sz;
645
646                 pages_to_alloc = pages;
647
648                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
649                         goto out_fail;
650                 allowed_mem_checked = true;
651
652                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS)
653                         goto out_return2;
654
655                 cache = NULL;
656                 sz = sizeof(*obj) + pages*sizeof(obj->sg_entries[0]);
657
658                 obj = kmalloc(sz, gfp_mask);
659                 if (unlikely(obj == NULL)) {
660                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
661                                 "sgv_pool_obj failed (size %d)", size);
662                         goto out_fail;
663                 }
664                 memset(obj, 0, sizeof(*obj));
665
666                 obj->owner_pool = pool;
667                 obj->order_or_pages = -pages_to_alloc;
668                 obj->allocator_priv = priv;
669
670                 obj->sg_entries = obj->sg_entries_data;
671                 sg_init_table(obj->sg_entries, pages);
672
673                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
674                         goto out_fail_free_sg_entries;
675                 hiwmk_checked = true;
676
677                 TRACE_MEM("Big or no_cached sgv_obj %p (size %d)", obj, sz);
678         }
679
680         obj->sg_count = scst_alloc_sg_entries(obj->sg_entries,
681                 pages_to_alloc, gfp_mask, pool->clustered, obj->trans_tbl,
682                 &pool->alloc_fns, priv);
683         if (unlikely(obj->sg_count <= 0)) {
684                 obj->sg_count = 0;
685                 if ((flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL) && cache)
686                         goto out_return1;
687                 else
688                         goto out_fail_free_sg_entries;
689         }
690
691         if (cache) {
692                 atomic_add(pages_to_alloc - obj->sg_count,
693                         &pool->cache_acc[order].merged);
694         } else {
695                 if (no_cached) {
696                         atomic_add(pages_to_alloc,
697                                 &pool->acc.other_pages);
698                         atomic_add(pages_to_alloc - obj->sg_count,
699                                 &pool->acc.other_merged);
700                 } else {
701                         atomic_add(pages_to_alloc,
702                                 &pool->acc.big_pages);
703                         atomic_add(pages_to_alloc - obj->sg_count,
704                                 &pool->acc.big_merged);
705                 }
706         }
707
708 success:
709         if (cache) {
710                 int sg;
711                 atomic_inc(&pool->cache_acc[order].total_alloc);
712                 if (pool->clustered)
713                         cnt = obj->trans_tbl[pages-1].sg_num;
714                 else
715                         cnt = pages;
716                 sg = cnt-1;
717                 obj->orig_sg = sg;
718                 obj->orig_length = obj->sg_entries[sg].length;
719                 if (pool->clustered) {
720                         obj->sg_entries[sg].length =
721                                 (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
722                 }
723         } else {
724                 cnt = obj->sg_count;
725                 if (no_cached)
726                         atomic_inc(&pool->acc.other_alloc);
727                 else
728                         atomic_inc(&pool->acc.big_alloc);
729         }
730
731         *count = cnt;
732         res = obj->sg_entries;
733         *sgv = obj;
734
735         if (size & ~PAGE_MASK)
736                 obj->sg_entries[cnt-1].length -= PAGE_SIZE - (size & ~PAGE_MASK);
737
738         TRACE_MEM("sgv_obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
739                 "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
740                 obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
741
742 out:
743         TRACE_EXIT_HRES(res);
744         return res;
745
746 out_return:
747         obj->allocator_priv = priv;
748         obj->owner_pool = pool;
749
750 out_return1:
751         *sgv = obj;
752         TRACE_MEM("Returning failed sgv_obj %p (count %d)", obj, *count);
753
754 out_return2:
755         *count = pages_to_alloc;
756         res = NULL;
757         goto out_uncheck;
758
759 out_fail_free_sg_entries:
760         if (obj->sg_entries != obj->sg_entries_data) {
761                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
762                         /* kfree() handles NULL parameter */
763                         kfree(obj->trans_tbl);
764                         obj->trans_tbl = NULL;
765                 }
766                 kfree(obj->sg_entries);
767                 obj->sg_entries = NULL;
768         }
769
770 out_fail_free:
771         if (cache)
772                 sgv_pool_cached_put(obj);
773         else
774                 kfree(obj);
775
776 out_fail:
777         res = NULL;
778         *count = 0;
779         *sgv = NULL;
780         TRACE_MEM("%s", "Allocation failed");
781
782 out_uncheck:
783         if (hiwmk_checked)
784                 sgv_pool_hiwmk_uncheck(pages_to_alloc);
785         if (allowed_mem_checked)
786                 scst_uncheck_allowed_mem(mem_lim, pages_to_alloc);
787         goto out;
788 }
789 EXPORT_SYMBOL(sgv_pool_alloc);
790
791 void *sgv_get_priv(struct sgv_pool_obj *sgv)
792 {
793         return sgv->allocator_priv;
794 }
795 EXPORT_SYMBOL(sgv_get_priv);
796
797 void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim)
798 {
799         int pages;
800
801         TRACE_MEM("Freeing sgv_obj %p, order %d, sg_entries %p, "
802                 "sg_count %d, allocator_priv %p", sgv, sgv->order_or_pages,
803                 sgv->sg_entries, sgv->sg_count, sgv->allocator_priv);
804
805         if (sgv->order_or_pages >= 0) {
806                 sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
807                 pages = (sgv->sg_count != 0) ? 1 << sgv->order_or_pages : 0;
808                 sgv_pool_cached_put(sgv);
809         } else {
810                 sgv->owner_pool->alloc_fns.free_pages_fn(sgv->sg_entries,
811                         sgv->sg_count, sgv->allocator_priv);
812                 pages = (sgv->sg_count != 0) ? -sgv->order_or_pages : 0;
813                 kfree(sgv);
814                 sgv_pool_hiwmk_uncheck(pages);
815         }
816
817         scst_uncheck_allowed_mem(mem_lim, pages);
818
819         return;
820 }
821 EXPORT_SYMBOL(sgv_pool_free);
822
823 struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count)
824 {
825         struct scatterlist *res;
826         int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
827         struct sgv_pool_alloc_fns sys_alloc_fns = {
828                 scst_alloc_sys_pages, scst_free_sys_sg_entries };
829         int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
830
831         TRACE_ENTRY();
832
833         atomic_inc(&sgv_pools_mgr.sgv_other_total_alloc);
834
835         if (unlikely(!no_fail)) {
836                 if (unlikely(sgv_pool_hiwmk_check(pages) != 0)) {
837                         res = NULL;
838                         goto out;
839                 }
840         }
841
842         res = kmalloc(pages*sizeof(*res), gfp_mask);
843         if (res == NULL) {
844                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate sg for %d pages",
845                         pages);
846                 goto out_uncheck;
847         }
848
849         sg_init_table(res, pages);
850
851         /*
852          * If we allow use clustering here, we will have troubles in
853          * scst_free() to figure out how many pages are in the SG vector.
854          * So, always don't use clustering.
855          */
856         *count = scst_alloc_sg_entries(res, pages, gfp_mask, 0, NULL,
857                         &sys_alloc_fns, NULL);
858         if (*count <= 0)
859                 goto out_free;
860
861 out:
862         TRACE_MEM("Alloced sg %p (count %d)", res, *count);
863
864         TRACE_EXIT_HRES(res);
865         return res;
866
867 out_free:
868         kfree(res);
869         res = NULL;
870
871 out_uncheck:
872         if (!no_fail)
873                 sgv_pool_hiwmk_uncheck(pages);
874         goto out;
875 }
876 EXPORT_SYMBOL(scst_alloc);
877
878 void scst_free(struct scatterlist *sg, int count)
879 {
880         TRACE_MEM("Freeing sg=%p", sg);
881
882         sgv_pool_hiwmk_uncheck(count);
883
884         scst_free_sys_sg_entries(sg, count, NULL);
885         kfree(sg);
886         return;
887 }
888 EXPORT_SYMBOL(scst_free);
889
890 static void sgv_pool_cached_init(struct sgv_pool *pool)
891 {
892         int i;
893         for (i = 0; i < SGV_POOL_ELEMENTS; i++)
894                 INIT_LIST_HEAD(&pool->recycling_lists[i]);
895 }
896
897 int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
898 {
899         int res = -ENOMEM;
900         int i;
901         struct sgv_pool_obj *obj;
902
903         TRACE_ENTRY();
904
905         memset(pool, 0, sizeof(*pool));
906
907         atomic_set(&pool->acc.other_alloc, 0);
908         atomic_set(&pool->acc.big_alloc, 0);
909         atomic_set(&pool->acc.other_pages, 0);
910         atomic_set(&pool->acc.big_pages, 0);
911         atomic_set(&pool->acc.other_merged, 0);
912         atomic_set(&pool->acc.big_merged, 0);
913
914         pool->clustered = clustered;
915         pool->alloc_fns.alloc_pages_fn = scst_alloc_sys_pages;
916         pool->alloc_fns.free_pages_fn = scst_free_sys_sg_entries;
917
918         TRACE_MEM("name %s, sizeof(*obj)=%zd, clustered=%d", name, sizeof(*obj),
919                 clustered);
920
921         strncpy(pool->name, name, sizeof(pool->name)-1);
922         pool->name[sizeof(pool->name)-1] = '\0';
923
924         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
925                 int size;
926
927                 atomic_set(&pool->cache_acc[i].total_alloc, 0);
928                 atomic_set(&pool->cache_acc[i].hit_alloc, 0);
929                 atomic_set(&pool->cache_acc[i].merged, 0);
930
931                 if (i <= sgv_pools_mgr.sgv_max_local_order) {
932                         size = sizeof(*obj) + (1 << i) *
933                                 (sizeof(obj->sg_entries[0]) +
934                                  (clustered ? sizeof(obj->trans_tbl[0]) : 0));
935                 } else if (i <= sgv_pools_mgr.sgv_max_trans_order) {
936                         /* sgv ie sg_entries is allocated outside object but ttbl
937                         is embedded still */
938                         size = sizeof(*obj) + (1 << i) *
939                                 ((clustered ? sizeof(obj->trans_tbl[0]) : 0));
940                 } else {
941                         size = sizeof(*obj);
942
943                         /* both sgv and ttbl are kallocated() */
944                 }
945
946                 TRACE_MEM("pages=%d, size=%d", 1 << i, size);
947
948                 scnprintf(pool->cache_names[i], sizeof(pool->cache_names[i]),
949                         "%s-%luK", name, (PAGE_SIZE >> 10) << i);
950                 pool->caches[i] = kmem_cache_create(pool->cache_names[i],
951                         size, 0, SCST_SLAB_FLAGS, NULL
952 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
953                         , NULL);
954 #else
955                         );
956 #endif
957                 if (pool->caches[i] == NULL) {
958                         TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool cache "
959                                 "%s(%d) failed", name, i);
960                         goto out_free;
961                 }
962         }
963
964         sgv_pool_cached_init(pool);
965
966         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
967         list_add_tail(&pool->sgv_pool_list_entry,
968                 &sgv_pools_mgr.scst_sgv_pool_list);
969         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
970
971         res = 0;
972
973 out:
974         TRACE_EXIT_RES(res);
975         return res;
976
977 out_free:
978         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
979                 if (pool->caches[i]) {
980                         kmem_cache_destroy(pool->caches[i]);
981                         pool->caches[i] = NULL;
982                 } else
983                         break;
984         }
985         goto out;
986 }
987
988 static void sgv_pool_evaluate_local_order(struct scst_sgv_pools_manager *pmgr)
989 {
990         int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
991
992         pmgr->sgv_max_local_order = get_order(
993                 (((space4sgv_ttbl /
994                   (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))) *
995                         PAGE_SIZE) & PAGE_MASK)) - 1;
996
997         pmgr->sgv_max_trans_order = get_order(
998                 (((space4sgv_ttbl /
999                   (sizeof(struct trans_tbl_ent))) * PAGE_SIZE) & PAGE_MASK)) - 1;
1000
1001         TRACE_MEM("sgv_max_local_order %d, sgv_max_trans_order %d",
1002                 pmgr->sgv_max_local_order, pmgr->sgv_max_trans_order);
1003         TRACE_MEM("max object size with embedded sgv & ttbl %zd",
1004                 (1 << pmgr->sgv_max_local_order) *
1005                 (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))
1006                 + sizeof(struct sgv_pool_obj));
1007         TRACE_MEM("max object size with embedded sgv (!clustered) %zd",
1008                 (1 << pmgr->sgv_max_local_order) *
1009                 (sizeof(struct scatterlist))
1010                 + sizeof(struct sgv_pool_obj));
1011         TRACE_MEM("max object size with embedded ttbl %zd",
1012                 (1 << pmgr->sgv_max_trans_order) * sizeof(struct trans_tbl_ent) +
1013                 sizeof(struct sgv_pool_obj));
1014 }
1015
1016 void sgv_pool_deinit(struct sgv_pool *pool)
1017 {
1018         int i;
1019
1020         TRACE_ENTRY();
1021
1022         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1023         list_del(&pool->sgv_pool_list_entry);
1024         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1025
1026         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1027                 struct sgv_pool_obj *e;
1028
1029                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1030                 while (!list_empty(&pool->recycling_lists[i])) {
1031                         e = list_entry(pool->recycling_lists[i].next,
1032                                  struct sgv_pool_obj,
1033                                 recycle_entry.recycling_list_entry);
1034
1035                         __sgv_pool_cached_purge(e);
1036                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1037
1038                         EXTRACHECKS_BUG_ON(e->owner_pool != pool);
1039                         sgv_dtor_and_free(e);
1040
1041                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1042                 }
1043                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1044
1045                 if (pool->caches[i])
1046                         kmem_cache_destroy(pool->caches[i]);
1047                 pool->caches[i] = NULL;
1048         }
1049
1050         TRACE_EXIT();
1051 }
1052
1053 void sgv_pool_set_allocator(struct sgv_pool *pool,
1054         struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
1055         void (*free_pages_fn)(struct scatterlist *, int, void *))
1056 {
1057         pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
1058         pool->alloc_fns.free_pages_fn = free_pages_fn;
1059         return;
1060 }
1061 EXPORT_SYMBOL(sgv_pool_set_allocator);
1062
1063 struct sgv_pool *sgv_pool_create(const char *name, int clustered)
1064 {
1065         struct sgv_pool *pool;
1066         int rc;
1067
1068         TRACE_ENTRY();
1069
1070         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1071         if (pool == NULL) {
1072                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
1073                 goto out;
1074         }
1075
1076         rc = sgv_pool_init(pool, name, clustered);
1077         if (rc != 0)
1078                 goto out_free;
1079
1080 out:
1081         TRACE_EXIT_RES(pool != NULL);
1082         return pool;
1083
1084 out_free:
1085         kfree(pool);
1086         pool = NULL;
1087         goto out;
1088 }
1089 EXPORT_SYMBOL(sgv_pool_create);
1090
1091 void sgv_pool_destroy(struct sgv_pool *pool)
1092 {
1093         TRACE_ENTRY();
1094
1095         sgv_pool_deinit(pool);
1096         kfree(pool);
1097
1098         TRACE_EXIT();
1099 }
1100 EXPORT_SYMBOL(sgv_pool_destroy);
1101
1102 static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
1103 {
1104         TRACE_ENTRY();
1105
1106         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1107
1108         if (nr > 0) {
1109                 struct sgv_pool_obj *e;
1110                 unsigned long rt = jiffies;
1111
1112                 while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1113                         e = list_entry(
1114                                 sgv_pools_mgr.mgr.sorted_recycling_list.next,
1115                                 struct sgv_pool_obj,
1116                                 recycle_entry.sorted_recycling_list_entry);
1117
1118                         if (sgv_pool_cached_purge(e, SHRINK_TIME_AFTER, rt) == 0) {
1119                                 nr -= 1 << e->order_or_pages;
1120                                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1121                                 sgv_dtor_and_free(e);
1122                                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1123                         } else
1124                                 break;
1125
1126                         if (nr <= 0)
1127                                 break;
1128                 }
1129         }
1130
1131         nr = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
1132
1133         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1134
1135         TRACE_EXIT();
1136         return nr;
1137 }
1138
1139 static void sgv_pool_cached_pitbool(void *p)
1140 {
1141         u32 total_pages;
1142         struct sgv_pool_obj *e;
1143         unsigned long rt = jiffies;
1144
1145         TRACE_ENTRY();
1146
1147         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1148
1149         sgv_pools_mgr.mgr.pitbool_running = 0;
1150
1151         while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1152                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
1153                         struct sgv_pool_obj,
1154                         recycle_entry.sorted_recycling_list_entry);
1155
1156                 if (sgv_pool_cached_purge(e, PURGE_TIME_AFTER, rt) == 0) {
1157                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1158                         sgv_dtor_and_free(e);
1159                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1160                 } else
1161                         break;
1162         }
1163
1164         total_pages = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
1165
1166         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1167
1168         if (total_pages) {
1169                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
1170                         PURGE_INTERVAL);
1171         }
1172
1173         TRACE_EXIT();
1174         return;
1175 }
1176
1177 /* Both parameters in pages */
1178 int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
1179 {
1180         int res;
1181         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1182
1183         TRACE_ENTRY();
1184
1185         memset(pools, 0, sizeof(*pools));
1186
1187         sgv_pools_mgr.mgr.throttle.hi_wmk = mem_hwmark;
1188         sgv_pools_mgr.mgr.throttle.lo_wmk = mem_lwmark;
1189
1190         sgv_pool_evaluate_local_order(&sgv_pools_mgr);
1191
1192         atomic_set(&pools->sgv_other_total_alloc, 0);
1193         INIT_LIST_HEAD(&pools->scst_sgv_pool_list);
1194         mutex_init(&pools->scst_sgv_pool_mutex);
1195
1196         INIT_LIST_HEAD(&pools->mgr.sorted_recycling_list);
1197         spin_lock_init(&pools->mgr.pool_mgr_lock);
1198
1199         res = sgv_pool_init(&pools->default_set.norm, "sgv", 0);
1200         if (res != 0)
1201                 goto out;
1202
1203         res = sgv_pool_init(&pools->default_set.norm_clust, "sgv-clust", 1);
1204         if (res != 0)
1205                 goto out_free_clust;
1206
1207         res = sgv_pool_init(&pools->default_set.dma, "sgv-dma", 0);
1208         if (res != 0)
1209                 goto out_free_norm;
1210
1211 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
1212         INIT_DELAYED_WORK(&pools->mgr.apit_pool,
1213                 (void (*)(struct work_struct *))sgv_pool_cached_pitbool);
1214 #else
1215         INIT_WORK(&pools->mgr.apit_pool, sgv_pool_cached_pitbool, NULL);
1216 #endif
1217
1218 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1219         pools->mgr.sgv_shrinker = set_shrinker(DEFAULT_SEEKS,
1220                 sgv_pool_cached_shrinker);
1221 #else
1222         pools->mgr.sgv_shrinker.shrink = sgv_pool_cached_shrinker;
1223         pools->mgr.sgv_shrinker.seeks = DEFAULT_SEEKS;
1224         register_shrinker(&pools->mgr.sgv_shrinker);
1225 #endif
1226
1227 out:
1228         TRACE_EXIT_RES(res);
1229         return res;
1230
1231 out_free_norm:
1232         sgv_pool_deinit(&pools->default_set.norm);
1233
1234 out_free_clust:
1235         sgv_pool_deinit(&pools->default_set.norm_clust);
1236         goto out;
1237 }
1238
1239 void scst_sgv_pools_deinit(void)
1240 {
1241         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1242
1243         TRACE_ENTRY();
1244
1245 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1246         remove_shrinker(pools->mgr.sgv_shrinker);
1247 #else
1248         unregister_shrinker(&pools->mgr.sgv_shrinker);
1249 #endif
1250
1251         cancel_delayed_work(&pools->mgr.apit_pool);
1252
1253         sgv_pool_deinit(&pools->default_set.dma);
1254         sgv_pool_deinit(&pools->default_set.norm);
1255         sgv_pool_deinit(&pools->default_set.norm_clust);
1256
1257         flush_scheduled_work();
1258
1259         TRACE_EXIT();
1260         return;
1261 }
1262
1263 static void scst_do_sgv_read(struct seq_file *seq, const struct sgv_pool *pool)
1264 {
1265         int i, total = 0, hit = 0, merged = 0, allocated = 0;
1266         int oa, om;
1267
1268         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1269                 int t;
1270
1271                 hit += atomic_read(&pool->cache_acc[i].hit_alloc);
1272                 total += atomic_read(&pool->cache_acc[i].total_alloc);
1273
1274                 t = atomic_read(&pool->cache_acc[i].total_alloc) -
1275                         atomic_read(&pool->cache_acc[i].hit_alloc);
1276                 allocated += t * (1 << i);
1277                 merged += atomic_read(&pool->cache_acc[i].merged);
1278         }
1279
1280         seq_printf(seq, "\n%-30s %-11d %-11d %-11d %d/%d (P/O)\n", pool->name,
1281                 hit, total, (allocated != 0) ? merged*100/allocated : 0,
1282                 pool->acc.cached_pages, pool->acc.cached_entries);
1283
1284         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1285                 int t = atomic_read(&pool->cache_acc[i].total_alloc) -
1286                         atomic_read(&pool->cache_acc[i].hit_alloc);
1287                 allocated = t * (1 << i);
1288                 merged = atomic_read(&pool->cache_acc[i].merged);
1289
1290                 seq_printf(seq, "  %-28s %-11d %-11d %d\n",
1291                         pool->cache_names[i],
1292                         atomic_read(&pool->cache_acc[i].hit_alloc),
1293                         atomic_read(&pool->cache_acc[i].total_alloc),
1294                         (allocated != 0) ? merged*100/allocated : 0);
1295         }
1296
1297         allocated = atomic_read(&pool->acc.big_pages);
1298         merged = atomic_read(&pool->acc.big_merged);
1299         oa = atomic_read(&pool->acc.other_pages);
1300         om = atomic_read(&pool->acc.other_merged);
1301
1302         seq_printf(seq, "  %-40s %d/%-9d %d/%d\n", "big/other",
1303                    atomic_read(&pool->acc.big_alloc),
1304                    atomic_read(&pool->acc.other_alloc),
1305                    (allocated != 0) ? merged*100/allocated : 0,
1306                    (oa != 0) ? om/oa : 0);
1307
1308         return;
1309 }
1310
1311 int sgv_pool_procinfo_show(struct seq_file *seq, void *v)
1312 {
1313         struct sgv_pool *pool;
1314
1315         TRACE_ENTRY();
1316
1317         seq_printf(seq, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n\n",
1318                 "Inactive/active pages",
1319                 sgv_pools_mgr.mgr.throttle.inactive_pages_total,
1320                 sgv_pools_mgr.mgr.throttle.active_pages_total,
1321                 "Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.throttle.hi_wmk,
1322                 sgv_pools_mgr.mgr.throttle.lo_wmk, "Hi watermark releases/failures",
1323                 sgv_pools_mgr.mgr.throttle.releases_on_hiwmk,
1324                 sgv_pools_mgr.mgr.throttle.releases_failed);
1325
1326         seq_printf(seq, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
1327                 "% merged", "Cached");
1328
1329         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1330         list_for_each_entry(pool, &sgv_pools_mgr.scst_sgv_pool_list,
1331                         sgv_pool_list_entry) {
1332                 scst_do_sgv_read(seq, pool);
1333         }
1334         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1335
1336         seq_printf(seq, "\n%-42s %-11d\n", "other",
1337                 atomic_read(&sgv_pools_mgr.sgv_other_total_alloc));
1338
1339         TRACE_EXIT();
1340         return 0;
1341 }