The patch below fixes the following:
[mirror/scst/.git] / scst / src / scst_mem.c
1 /*
2  *  scst_mem.c
3  *
4  *  Copyright (C) 2006 - 2008 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2007 Krzysztof Blaszkowski <kb@sysmikro.com.pl>
6  *  Copyright (C) 2007 - 2008 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29
30 #include "scst.h"
31 #include "scst_priv.h"
32 #include "scst_mem.h"
33
34 #define PURGE_INTERVAL          (60 * HZ)
35 #define PURGE_TIME_AFTER        PURGE_INTERVAL
36 #define SHRINK_TIME_AFTER       (1 * HZ)
37
38 static struct scst_sgv_pools_manager sgv_pools_mgr;
39
40 void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
41 {
42         tgt_dev->gfp_mask = __GFP_NOWARN;
43         tgt_dev->pool = &sgv_pools_mgr.default_set.norm;
44 }
45
46 void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
47 {
48         TRACE_MEM("%s", "Use clustering");
49         tgt_dev->gfp_mask = __GFP_NOWARN;
50         tgt_dev->pool = &sgv_pools_mgr.default_set.norm_clust;
51 }
52
53 void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
54 {
55         TRACE_MEM("%s", "Use ISA DMA memory");
56         tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
57         tgt_dev->pool = &sgv_pools_mgr.default_set.dma;
58 }
59
60 static int scst_check_clustering(struct scatterlist *sg, int cur, int hint)
61 {
62         int res = -1;
63         int i = hint;
64         unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
65         int len_cur = sg[cur].length;
66         unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
67         int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
68         unsigned long pfn, pfn_next, full_page;
69
70 #if 0
71         TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
72                 pfn_cur, pfn_cur_next, len_cur, full_page_cur);
73 #endif
74
75         /* check the hint first */
76         if (i >= 0) {
77                 pfn = page_to_pfn(sg_page(&sg[i]));
78                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
79                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
80
81                 if ((pfn == pfn_cur_next) && full_page_cur)
82                         goto out_head;
83
84                 if ((pfn_next == pfn_cur) && full_page)
85                         goto out_tail;
86         }
87
88         /* ToDo: implement more intelligent search */
89         for (i = cur - 1; i >= 0; i--) {
90                 pfn = page_to_pfn(sg_page(&sg[i]));
91                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
92                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
93
94                 if ((pfn == pfn_cur_next) && full_page_cur)
95                         goto out_head;
96
97                 if ((pfn_next == pfn_cur) && full_page)
98                         goto out_tail;
99         }
100
101 out:
102         return res;
103
104 out_tail:
105         TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
106         sg[i].length += len_cur;
107         sg_clear(&sg[cur]);
108         res = i;
109         goto out;
110
111 out_head:
112         TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
113         sg_assign_page(&sg[i], sg_page(&sg[cur]));
114         sg[i].length += len_cur;
115         sg_clear(&sg[cur]);
116         res = i;
117         goto out;
118 }
119
120 static void scst_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
121         void *priv)
122 {
123         int i;
124
125         TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
126
127         for (i = 0; i < sg_count; i++) {
128                 struct page *p = sg_page(&sg[i]);
129                 int len = sg[i].length;
130                 int pages =
131                         (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
132
133                 TRACE_MEM("page %lx, len %d, pages %d",
134                         (unsigned long)p, len, pages);
135
136                 while (pages > 0) {
137                         int order = 0;
138
139 /*
140  * __free_pages() doesn't like freeing pages with not that order with
141  * which they were allocated, so disable this small optimization.
142  */
143 #if 0
144                         if (len > 0) {
145                                 while (((1 << order) << PAGE_SHIFT) < len)
146                                         order++;
147                                 len = 0;
148                         }
149 #endif
150                         TRACE_MEM("free_pages(): order %d, page %lx",
151                                 order, (unsigned long)p);
152
153                         __free_pages(p, order);
154
155                         pages -= 1 << order;
156                         p += 1 << order;
157                 }
158         }
159 }
160
161 static struct page *scst_alloc_sys_pages(struct scatterlist *sg,
162         gfp_t gfp_mask, void *priv)
163 {
164         struct page *page = alloc_pages(gfp_mask, 0);
165
166         sg_set_page(sg, page, PAGE_SIZE, 0);
167         TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
168         if (page == NULL) {
169                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
170                         "sg page failed");
171         }
172         return page;
173 }
174
175 static int scst_alloc_sg_entries(struct scatterlist *sg, int pages,
176         gfp_t gfp_mask, int clustered, struct trans_tbl_ent *trans_tbl,
177         const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
178 {
179         int sg_count = 0;
180         int pg, i, j;
181         int merged = -1;
182
183         TRACE_MEM("pages=%d, clustered=%d", pages, clustered);
184
185 #if 0
186         gfp_mask |= __GFP_COLD;
187 #endif
188 #ifdef CONFIG_SCST_STRICT_SECURITY
189         gfp_mask |= __GFP_ZERO;
190 #endif
191
192         for (pg = 0; pg < pages; pg++) {
193                 void *rc;
194 #ifdef CONFIG_SCST_DEBUG_OOM
195                 if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
196                     ((scst_random() % 10000) == 55))
197                         rc = NULL;
198                 else
199 #endif
200                         rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
201                                 priv);
202                 if (rc == NULL)
203                         goto out_no_mem;
204                 if (clustered) {
205                         merged = scst_check_clustering(sg, sg_count, merged);
206                         if (merged == -1)
207                                 sg_count++;
208                 } else
209                         sg_count++;
210                 TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
211                         sg_count);
212         }
213
214         if (clustered && (trans_tbl != NULL)) {
215                 pg = 0;
216                 for (i = 0; i < pages; i++) {
217                         int n = (sg[i].length >> PAGE_SHIFT) +
218                                 ((sg[i].length & ~PAGE_MASK) != 0);
219                         trans_tbl[i].pg_count = pg;
220                         for (j = 0; j < n; j++)
221                                 trans_tbl[pg++].sg_num = i+1;
222                         TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
223                                 trans_tbl[i].pg_count);
224                 }
225         }
226
227 out:
228         TRACE_MEM("sg_count=%d", sg_count);
229         return sg_count;
230
231 out_no_mem:
232         alloc_fns->free_pages_fn(sg, sg_count, priv);
233         sg_count = 0;
234         goto out;
235 }
236
237 static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
238         int pages_to_alloc, int order, gfp_t gfp_mask)
239 {
240         int sz, tsz = 0;
241         int res = 0;
242
243         TRACE_ENTRY();
244
245         sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
246
247         obj->sg_entries = kmalloc(sz, gfp_mask);
248         if (unlikely(obj->sg_entries == NULL)) {
249                 TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
250                         "SG vector failed (size %d)", sz);
251                 res = -ENOMEM;
252                 goto out;
253         }
254
255         sg_init_table(obj->sg_entries, pages_to_alloc);
256
257         if (obj->owner_pool->clustered) {
258                 if (order <= sgv_pools_mgr.sgv_max_trans_order) {
259                         obj->trans_tbl =
260                                 (struct trans_tbl_ent *)obj->sg_entries_data;
261                         /*
262                          * No need to clear trans_tbl, if needed, it will be
263                          * fully rewritten in scst_alloc_sg_entries()
264                          */
265                 } else {
266                         tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
267                         obj->trans_tbl = kzalloc(tsz, gfp_mask);
268                         if (unlikely(obj->trans_tbl == NULL)) {
269                                 TRACE(TRACE_OUT_OF_MEM, "Allocation of "
270                                         "trans_tbl failed (size %d)", tsz);
271                                 res = -ENOMEM;
272                                 goto out_free;
273                         }
274                 }
275         }
276
277         TRACE_MEM("pages_to_alloc %d, order %d, sz %d, tsz %d, obj %p, "
278                 "sg_entries %p, trans_tbl %p", pages_to_alloc, order,
279                 sz, tsz, obj, obj->sg_entries, obj->trans_tbl);
280
281 out:
282         TRACE_EXIT_RES(res);
283         return res;
284
285 out_free:
286         kfree(obj->sg_entries);
287         obj->sg_entries = NULL;
288         goto out;
289 }
290
291 static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
292 {
293         if (obj->sg_count != 0) {
294                 obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
295                         obj->sg_count, obj->allocator_priv);
296         }
297         if (obj->sg_entries != obj->sg_entries_data) {
298                 if (obj->trans_tbl !=
299                     (struct trans_tbl_ent *)obj->sg_entries_data) {
300                         /* kfree() handles NULL parameter */
301                         kfree(obj->trans_tbl);
302                         obj->trans_tbl = NULL;
303                 }
304                 kfree(obj->sg_entries);
305         }
306
307         kmem_cache_free(obj->owner_pool->caches[obj->order_or_pages], obj);
308         return;
309 }
310
311 static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
312         int order, gfp_t gfp_mask)
313 {
314         struct sgv_pool_obj *obj;
315         int pages = 1 << order;
316
317         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
318         if (likely(!list_empty(&pool->recycling_lists[order]))) {
319                 obj = list_entry(pool->recycling_lists[order].next,
320                          struct sgv_pool_obj,
321                         recycle_entry.recycling_list_entry);
322
323                 list_del(&obj->recycle_entry.sorted_recycling_list_entry);
324                 list_del(&obj->recycle_entry.recycling_list_entry);
325
326                 sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
327                 sgv_pools_mgr.mgr.throttle.active_pages_total += pages;
328
329                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
330
331                 EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
332                 goto out;
333         }
334
335         pool->acc.cached_entries++;
336         pool->acc.cached_pages += pages;
337
338         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
339
340         obj = kmem_cache_alloc(pool->caches[order],
341                 gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
342         if (likely(obj)) {
343                 memset(obj, 0, sizeof(*obj));
344                 obj->order_or_pages = order;
345                 obj->owner_pool = pool;
346         } else {
347                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
348                 pool->acc.cached_entries--;
349                 pool->acc.cached_pages -= pages;
350                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
351         }
352
353 out:
354         return obj;
355 }
356
357 static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
358 {
359         struct sgv_pool *owner = sgv->owner_pool;
360         struct list_head *entry;
361         struct list_head *list = &owner->recycling_lists[sgv->order_or_pages];
362         int sched = 0;
363         int pages = 1 << sgv->order_or_pages;
364
365         EXTRACHECKS_BUG_ON(sgv->order_or_pages < 0);
366
367         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
368
369         TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order_or_pages,
370                 sgv->sg_count);
371
372         if (owner->clustered) {
373                 /* Make objects with less entries more preferred */
374                 __list_for_each(entry, list) {
375                         struct sgv_pool_obj *tmp = list_entry(entry,
376                                 struct sgv_pool_obj,
377                                 recycle_entry.recycling_list_entry);
378                         TRACE_DBG("tmp %p, order %d, sg_count %d", tmp,
379                                 tmp->order_or_pages, tmp->sg_count);
380                         if (sgv->sg_count <= tmp->sg_count)
381                                 break;
382                 }
383                 entry = entry->prev;
384         } else
385                 entry = list;
386
387         TRACE_DBG("Adding in %p (list %p)", entry, list);
388         list_add(&sgv->recycle_entry.recycling_list_entry, entry);
389
390         list_add_tail(&sgv->recycle_entry.sorted_recycling_list_entry,
391                 &sgv_pools_mgr.mgr.sorted_recycling_list);
392
393         sgv->recycle_entry.time_stamp = jiffies;
394
395         sgv_pools_mgr.mgr.throttle.inactive_pages_total += pages;
396         sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
397
398         if (!sgv_pools_mgr.mgr.pitbool_running) {
399                 sgv_pools_mgr.mgr.pitbool_running = 1;
400                 sched = 1;
401         }
402
403         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
404
405         if (sched)
406                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
407                         PURGE_INTERVAL);
408 }
409
410 /* Must be called under pool_mgr_lock held */
411 static void __sgv_pool_cached_purge(struct sgv_pool_obj *e)
412 {
413         int pages = 1 << e->order_or_pages;
414
415         list_del(&e->recycle_entry.sorted_recycling_list_entry);
416         list_del(&e->recycle_entry.recycling_list_entry);
417         e->owner_pool->acc.cached_entries--;
418         e->owner_pool->acc.cached_pages -= pages;
419         sgv_pools_mgr.mgr.throttle.inactive_pages_total -= pages;
420
421         return;
422 }
423
424 /* Must be called under pool_mgr_lock held */
425 static int sgv_pool_cached_purge(struct sgv_pool_obj *e, int t,
426         unsigned long rt)
427 {
428         EXTRACHECKS_BUG_ON(t == 0);
429
430         if (time_after(rt, (e->recycle_entry.time_stamp + t))) {
431                 __sgv_pool_cached_purge(e);
432                 return 0;
433         }
434         return 1;
435 }
436
437 /* Called under pool_mgr_lock held, but drops/reaquires it inside */
438 static int sgv_pool_oom_free_objs(int pgs)
439         __releases(sgv_pools_mgr.mgr.pool_mgr_lock)
440         __acquires(sgv_pools_mgr.mgr.pool_mgr_lock)
441 {
442         TRACE_MEM("Shrinking pools about %d pages", pgs);
443         while ((sgv_pools_mgr.mgr.throttle.inactive_pages_total >
444                         sgv_pools_mgr.mgr.throttle.lo_wmk) &&
445               (pgs > 0)) {
446                 struct sgv_pool_obj *e;
447
448                 sBUG_ON(list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list));
449
450                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
451                                struct sgv_pool_obj,
452                                recycle_entry.sorted_recycling_list_entry);
453
454                 __sgv_pool_cached_purge(e);
455                 pgs -= 1 << e->order_or_pages;
456
457                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
458                 sgv_dtor_and_free(e);
459                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
460         }
461
462         TRACE_MEM("Pages remaining %d ", pgs);
463         return pgs;
464 }
465
466 static int sgv_pool_hiwmk_check(int pages_to_alloc)
467 {
468         int res = 0;
469         int pages = pages_to_alloc;
470
471         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
472
473         pages += sgv_pools_mgr.mgr.throttle.active_pages_total;
474         pages += sgv_pools_mgr.mgr.throttle.inactive_pages_total;
475
476         if (unlikely((u32)pages > sgv_pools_mgr.mgr.throttle.hi_wmk)) {
477                 pages -= sgv_pools_mgr.mgr.throttle.hi_wmk;
478                 sgv_pools_mgr.mgr.throttle.releases_on_hiwmk++;
479
480                 pages = sgv_pool_oom_free_objs(pages);
481                 if (pages > 0) {
482                         TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
483                             "memory (%d pages) for being executed "
484                             "commands together with the already "
485                             "allocated memory exceeds the allowed "
486                             "maximum %d. Should you increase "
487                             "scst_max_cmd_mem?", pages_to_alloc,
488                            sgv_pools_mgr.mgr.throttle.hi_wmk);
489                         sgv_pools_mgr.mgr.throttle.releases_failed++;
490                         res = -ENOMEM;
491                         goto out_unlock;
492                 }
493         }
494
495         sgv_pools_mgr.mgr.throttle.active_pages_total += pages_to_alloc;
496
497 out_unlock:
498         TRACE_MEM("pages_to_alloc %d, new active %d", pages_to_alloc,
499                 sgv_pools_mgr.mgr.throttle.active_pages_total);
500
501         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
502         return res;
503 }
504
505 static void sgv_pool_hiwmk_uncheck(int pages)
506 {
507         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
508         sgv_pools_mgr.mgr.throttle.active_pages_total -= pages;
509         TRACE_MEM("pages %d, new active %d", pages,
510                 sgv_pools_mgr.mgr.throttle.active_pages_total);
511         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
512         return;
513 }
514
515 static bool scst_check_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
516 {
517         int alloced;
518         bool res = true;
519
520         alloced = atomic_add_return(pages, &mem_lim->alloced_pages);
521         if (unlikely(alloced > mem_lim->max_allowed_pages)) {
522                 TRACE(TRACE_OUT_OF_MEM, "Requested amount of memory "
523                         "(%d pages) for being executed commands on a device "
524                         "together with the already allocated memory exceeds "
525                         "the allowed maximum %d. Should you increase "
526                         "scst_max_dev_cmd_mem?", pages,
527                         mem_lim->max_allowed_pages);
528                 atomic_sub(pages, &mem_lim->alloced_pages);
529                 res = false;
530         }
531
532         TRACE_MEM("mem_lim %p, pages %d, res %d, new alloced %d", mem_lim,
533                 pages, res, atomic_read(&mem_lim->alloced_pages));
534
535         return res;
536 }
537
538 static void scst_uncheck_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
539 {
540         atomic_sub(pages, &mem_lim->alloced_pages);
541
542         TRACE_MEM("mem_lim %p, pages %d, new alloced %d", mem_lim,
543                 pages, atomic_read(&mem_lim->alloced_pages));
544         return;
545 }
546
547 struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
548         gfp_t gfp_mask, int flags, int *count,
549         struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
550 {
551         struct sgv_pool_obj *obj;
552         int order, pages, cnt;
553         struct scatterlist *res = NULL;
554         int pages_to_alloc;
555         struct kmem_cache *cache;
556         int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
557         bool allowed_mem_checked = false, hiwmk_checked = false;
558
559         TRACE_ENTRY();
560
561         if (unlikely(size == 0))
562                 goto out;
563
564         sBUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
565
566         pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
567         order = get_order(size);
568
569         TRACE_MEM("size=%d, pages=%d, order=%d, flags=%x, *sgv %p", size, pages,
570                 order, flags, *sgv);
571
572         if (*sgv != NULL) {
573                 obj = *sgv;
574                 pages_to_alloc = (1 << order);
575                 cache = pool->caches[obj->order_or_pages];
576
577                 TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj,
578                         obj->order_or_pages);
579
580                 EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
581                 EXTRACHECKS_BUG_ON(obj->sg_count != 0);
582
583                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
584                         goto out_fail_free_sg_entries;
585                 allowed_mem_checked = true;
586
587                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
588                         goto out_fail_free_sg_entries;
589                 hiwmk_checked = true;
590         } else if ((order < SGV_POOL_ELEMENTS) && !no_cached) {
591                 pages_to_alloc = (1 << order);
592                 cache = pool->caches[order];
593
594                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
595                         goto out_fail;
596                 allowed_mem_checked = true;
597
598                 obj = sgv_pool_cached_get(pool, order, gfp_mask);
599                 if (unlikely(obj == NULL)) {
600                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
601                                 "sgv_pool_obj failed (size %d)", size);
602                         goto out_fail;
603                 }
604
605                 if (obj->sg_count != 0) {
606                         TRACE_MEM("Cached sgv_obj %p", obj);
607                         EXTRACHECKS_BUG_ON(obj->order_or_pages != order);
608                         atomic_inc(&pool->cache_acc[order].hit_alloc);
609                         goto success;
610                 }
611
612                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) {
613                         if (!(flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
614                                 goto out_fail_free;
615                 }
616
617                 TRACE_MEM("Brand new sgv_obj %p", obj);
618
619                 if (order <= sgv_pools_mgr.sgv_max_local_order) {
620                         obj->sg_entries = obj->sg_entries_data;
621                         sg_init_table(obj->sg_entries, pages_to_alloc);
622                         TRACE_MEM("sg_entries %p", obj->sg_entries);
623                         if (pool->clustered) {
624                                 obj->trans_tbl = (struct trans_tbl_ent *)
625                                         (obj->sg_entries + pages_to_alloc);
626                                 TRACE_MEM("trans_tbl %p", obj->trans_tbl);
627                                 /*
628                                  * No need to clear trans_tbl, if needed, it
629                                  * will be fully rewritten in
630                                  * scst_alloc_sg_entries(),
631                                  */
632                         }
633                 } else {
634                         if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
635                                         order, gfp_mask) != 0))
636                                 goto out_fail_free;
637                 }
638
639                 if ((flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) &&
640                     (flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
641                         goto out_return;
642
643                 obj->allocator_priv = priv;
644
645                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
646                         goto out_fail_free_sg_entries;
647                 hiwmk_checked = true;
648         } else {
649                 int sz;
650
651                 pages_to_alloc = pages;
652
653                 if (unlikely(!scst_check_allowed_mem(mem_lim, pages_to_alloc)))
654                         goto out_fail;
655                 allowed_mem_checked = true;
656
657                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS)
658                         goto out_return2;
659
660                 cache = NULL;
661                 sz = sizeof(*obj) + pages*sizeof(obj->sg_entries[0]);
662
663                 obj = kmalloc(sz, gfp_mask);
664                 if (unlikely(obj == NULL)) {
665                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
666                                 "sgv_pool_obj failed (size %d)", size);
667                         goto out_fail;
668                 }
669                 memset(obj, 0, sizeof(*obj));
670
671                 obj->owner_pool = pool;
672                 obj->order_or_pages = -pages_to_alloc;
673                 obj->allocator_priv = priv;
674
675                 obj->sg_entries = obj->sg_entries_data;
676                 sg_init_table(obj->sg_entries, pages);
677
678                 if (unlikely(sgv_pool_hiwmk_check(pages_to_alloc) != 0))
679                         goto out_fail_free_sg_entries;
680                 hiwmk_checked = true;
681
682                 TRACE_MEM("Big or no_cached sgv_obj %p (size %d)", obj, sz);
683         }
684
685         obj->sg_count = scst_alloc_sg_entries(obj->sg_entries,
686                 pages_to_alloc, gfp_mask, pool->clustered, obj->trans_tbl,
687                 &pool->alloc_fns, priv);
688         if (unlikely(obj->sg_count <= 0)) {
689                 obj->sg_count = 0;
690                 if ((flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL) && cache)
691                         goto out_return1;
692                 else
693                         goto out_fail_free_sg_entries;
694         }
695
696         if (cache) {
697                 atomic_add(pages_to_alloc - obj->sg_count,
698                         &pool->cache_acc[order].merged);
699         } else {
700                 if (no_cached) {
701                         atomic_add(pages_to_alloc,
702                                 &pool->acc.other_pages);
703                         atomic_add(pages_to_alloc - obj->sg_count,
704                                 &pool->acc.other_merged);
705                 } else {
706                         atomic_add(pages_to_alloc,
707                                 &pool->acc.big_pages);
708                         atomic_add(pages_to_alloc - obj->sg_count,
709                                 &pool->acc.big_merged);
710                 }
711         }
712
713 success:
714         if (cache) {
715                 int sg;
716                 atomic_inc(&pool->cache_acc[order].total_alloc);
717                 if (pool->clustered)
718                         cnt = obj->trans_tbl[pages-1].sg_num;
719                 else
720                         cnt = pages;
721                 sg = cnt-1;
722                 obj->orig_sg = sg;
723                 obj->orig_length = obj->sg_entries[sg].length;
724                 if (pool->clustered) {
725                         obj->sg_entries[sg].length =
726                                 (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
727                 }
728         } else {
729                 cnt = obj->sg_count;
730                 if (no_cached)
731                         atomic_inc(&pool->acc.other_alloc);
732                 else
733                         atomic_inc(&pool->acc.big_alloc);
734         }
735
736         *count = cnt;
737         res = obj->sg_entries;
738         *sgv = obj;
739
740         if (size & ~PAGE_MASK)
741                 obj->sg_entries[cnt-1].length -=
742                         PAGE_SIZE - (size & ~PAGE_MASK);
743
744         TRACE_MEM("sgv_obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
745                 "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
746                 obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
747
748 out:
749         TRACE_EXIT_HRES(res);
750         return res;
751
752 out_return:
753         obj->allocator_priv = priv;
754         obj->owner_pool = pool;
755
756 out_return1:
757         *sgv = obj;
758         TRACE_MEM("Returning failed sgv_obj %p (count %d)", obj, *count);
759
760 out_return2:
761         *count = pages_to_alloc;
762         res = NULL;
763         goto out_uncheck;
764
765 out_fail_free_sg_entries:
766         if (obj->sg_entries != obj->sg_entries_data) {
767                 if (obj->trans_tbl !=
768                         (struct trans_tbl_ent *)obj->sg_entries_data) {
769                         /* kfree() handles NULL parameter */
770                         kfree(obj->trans_tbl);
771                         obj->trans_tbl = NULL;
772                 }
773                 kfree(obj->sg_entries);
774                 obj->sg_entries = NULL;
775         }
776
777 out_fail_free:
778         if (cache)
779                 sgv_pool_cached_put(obj);
780         else
781                 kfree(obj);
782
783 out_fail:
784         res = NULL;
785         *count = 0;
786         *sgv = NULL;
787         TRACE_MEM("%s", "Allocation failed");
788
789 out_uncheck:
790         if (hiwmk_checked)
791                 sgv_pool_hiwmk_uncheck(pages_to_alloc);
792         if (allowed_mem_checked)
793                 scst_uncheck_allowed_mem(mem_lim, pages_to_alloc);
794         goto out;
795 }
796 EXPORT_SYMBOL(sgv_pool_alloc);
797
798 void *sgv_get_priv(struct sgv_pool_obj *sgv)
799 {
800         return sgv->allocator_priv;
801 }
802 EXPORT_SYMBOL(sgv_get_priv);
803
804 void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim)
805 {
806         int pages;
807
808         TRACE_MEM("Freeing sgv_obj %p, order %d, sg_entries %p, "
809                 "sg_count %d, allocator_priv %p", sgv, sgv->order_or_pages,
810                 sgv->sg_entries, sgv->sg_count, sgv->allocator_priv);
811
812         if (sgv->order_or_pages >= 0) {
813                 sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
814                 pages = (sgv->sg_count != 0) ? 1 << sgv->order_or_pages : 0;
815                 sgv_pool_cached_put(sgv);
816         } else {
817                 sgv->owner_pool->alloc_fns.free_pages_fn(sgv->sg_entries,
818                         sgv->sg_count, sgv->allocator_priv);
819                 pages = (sgv->sg_count != 0) ? -sgv->order_or_pages : 0;
820                 kfree(sgv);
821                 sgv_pool_hiwmk_uncheck(pages);
822         }
823
824         scst_uncheck_allowed_mem(mem_lim, pages);
825
826         return;
827 }
828 EXPORT_SYMBOL(sgv_pool_free);
829
830 struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count)
831 {
832         struct scatterlist *res;
833         int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
834         struct sgv_pool_alloc_fns sys_alloc_fns = {
835                 scst_alloc_sys_pages, scst_free_sys_sg_entries };
836         int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
837
838         TRACE_ENTRY();
839
840         atomic_inc(&sgv_pools_mgr.sgv_other_total_alloc);
841
842         if (unlikely(!no_fail)) {
843                 if (unlikely(sgv_pool_hiwmk_check(pages) != 0)) {
844                         res = NULL;
845                         goto out;
846                 }
847         }
848
849         res = kmalloc(pages*sizeof(*res), gfp_mask);
850         if (res == NULL) {
851                 TRACE(TRACE_OUT_OF_MEM, "Unable to allocate sg for %d pages",
852                         pages);
853                 goto out_uncheck;
854         }
855
856         sg_init_table(res, pages);
857
858         /*
859          * If we allow use clustering here, we will have troubles in
860          * scst_free() to figure out how many pages are in the SG vector.
861          * So, always don't use clustering.
862          */
863         *count = scst_alloc_sg_entries(res, pages, gfp_mask, 0, NULL,
864                         &sys_alloc_fns, NULL);
865         if (*count <= 0)
866                 goto out_free;
867
868 out:
869         TRACE_MEM("Alloced sg %p (count %d)", res, *count);
870
871         TRACE_EXIT_HRES(res);
872         return res;
873
874 out_free:
875         kfree(res);
876         res = NULL;
877
878 out_uncheck:
879         if (!no_fail)
880                 sgv_pool_hiwmk_uncheck(pages);
881         goto out;
882 }
883 EXPORT_SYMBOL(scst_alloc);
884
885 void scst_free(struct scatterlist *sg, int count)
886 {
887         TRACE_MEM("Freeing sg=%p", sg);
888
889         sgv_pool_hiwmk_uncheck(count);
890
891         scst_free_sys_sg_entries(sg, count, NULL);
892         kfree(sg);
893         return;
894 }
895 EXPORT_SYMBOL(scst_free);
896
897 static void sgv_pool_cached_init(struct sgv_pool *pool)
898 {
899         int i;
900         for (i = 0; i < SGV_POOL_ELEMENTS; i++)
901                 INIT_LIST_HEAD(&pool->recycling_lists[i]);
902 }
903
904 int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
905 {
906         int res = -ENOMEM;
907         int i;
908         struct sgv_pool_obj *obj;
909
910         TRACE_ENTRY();
911
912         memset(pool, 0, sizeof(*pool));
913
914         atomic_set(&pool->acc.other_alloc, 0);
915         atomic_set(&pool->acc.big_alloc, 0);
916         atomic_set(&pool->acc.other_pages, 0);
917         atomic_set(&pool->acc.big_pages, 0);
918         atomic_set(&pool->acc.other_merged, 0);
919         atomic_set(&pool->acc.big_merged, 0);
920
921         pool->clustered = clustered;
922         pool->alloc_fns.alloc_pages_fn = scst_alloc_sys_pages;
923         pool->alloc_fns.free_pages_fn = scst_free_sys_sg_entries;
924
925         TRACE_MEM("name %s, sizeof(*obj)=%zd, clustered=%d", name, sizeof(*obj),
926                 clustered);
927
928         strncpy(pool->name, name, sizeof(pool->name)-1);
929         pool->name[sizeof(pool->name)-1] = '\0';
930
931         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
932                 int size;
933
934                 atomic_set(&pool->cache_acc[i].total_alloc, 0);
935                 atomic_set(&pool->cache_acc[i].hit_alloc, 0);
936                 atomic_set(&pool->cache_acc[i].merged, 0);
937
938                 if (i <= sgv_pools_mgr.sgv_max_local_order) {
939                         size = sizeof(*obj) + (1 << i) *
940                                 (sizeof(obj->sg_entries[0]) +
941                                  (clustered ? sizeof(obj->trans_tbl[0]) : 0));
942                 } else if (i <= sgv_pools_mgr.sgv_max_trans_order) {
943                         /* 
944                          * sgv ie sg_entries is allocated outside object, but
945                          * ttbl is still embedded.
946                          */
947                         size = sizeof(*obj) + (1 << i) *
948                                 ((clustered ? sizeof(obj->trans_tbl[0]) : 0));
949                 } else {
950                         size = sizeof(*obj);
951
952                         /* both sgv and ttbl are kallocated() */
953                 }
954
955                 TRACE_MEM("pages=%d, size=%d", 1 << i, size);
956
957                 scnprintf(pool->cache_names[i], sizeof(pool->cache_names[i]),
958                         "%s-%luK", name, (PAGE_SIZE >> 10) << i);
959                 pool->caches[i] = kmem_cache_create(pool->cache_names[i],
960                         size, 0, SCST_SLAB_FLAGS, NULL
961 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
962                         , NULL);
963 #else
964                         );
965 #endif
966                 if (pool->caches[i] == NULL) {
967                         TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool cache "
968                                 "%s(%d) failed", name, i);
969                         goto out_free;
970                 }
971         }
972
973         sgv_pool_cached_init(pool);
974
975         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
976         list_add_tail(&pool->sgv_pool_list_entry,
977                 &sgv_pools_mgr.scst_sgv_pool_list);
978         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
979
980         res = 0;
981
982 out:
983         TRACE_EXIT_RES(res);
984         return res;
985
986 out_free:
987         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
988                 if (pool->caches[i]) {
989                         kmem_cache_destroy(pool->caches[i]);
990                         pool->caches[i] = NULL;
991                 } else
992                         break;
993         }
994         goto out;
995 }
996
997 static void sgv_pool_evaluate_local_order(struct scst_sgv_pools_manager *pmgr)
998 {
999         int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
1000
1001         pmgr->sgv_max_local_order = get_order(
1002                 (((space4sgv_ttbl /
1003                   (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))) *
1004                         PAGE_SIZE) & PAGE_MASK)) - 1;
1005
1006         pmgr->sgv_max_trans_order = get_order(
1007                 (((space4sgv_ttbl / sizeof(struct trans_tbl_ent)) * PAGE_SIZE)
1008                  & PAGE_MASK)) - 1;
1009
1010         TRACE_MEM("sgv_max_local_order %d, sgv_max_trans_order %d",
1011                 pmgr->sgv_max_local_order, pmgr->sgv_max_trans_order);
1012         TRACE_MEM("max object size with embedded sgv & ttbl %zd",
1013                 (1 << pmgr->sgv_max_local_order) *
1014                 (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))
1015                 + sizeof(struct sgv_pool_obj));
1016         TRACE_MEM("max object size with embedded sgv (!clustered) %zd",
1017                 (1 << pmgr->sgv_max_local_order) *
1018                 (sizeof(struct scatterlist))
1019                 + sizeof(struct sgv_pool_obj));
1020         TRACE_MEM("max object size with embedded ttbl %zd",
1021                 (1 << pmgr->sgv_max_trans_order) * sizeof(struct trans_tbl_ent)
1022                 + sizeof(struct sgv_pool_obj));
1023 }
1024
1025 void sgv_pool_deinit(struct sgv_pool *pool)
1026 {
1027         int i;
1028
1029         TRACE_ENTRY();
1030
1031         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1032         list_del(&pool->sgv_pool_list_entry);
1033         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1034
1035         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1036                 struct sgv_pool_obj *e;
1037
1038                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1039                 while (!list_empty(&pool->recycling_lists[i])) {
1040                         e = list_entry(pool->recycling_lists[i].next,
1041                                  struct sgv_pool_obj,
1042                                 recycle_entry.recycling_list_entry);
1043
1044                         __sgv_pool_cached_purge(e);
1045                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1046
1047                         EXTRACHECKS_BUG_ON(e->owner_pool != pool);
1048                         sgv_dtor_and_free(e);
1049
1050                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1051                 }
1052                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1053
1054                 if (pool->caches[i])
1055                         kmem_cache_destroy(pool->caches[i]);
1056                 pool->caches[i] = NULL;
1057         }
1058
1059         TRACE_EXIT();
1060 }
1061
1062 void sgv_pool_set_allocator(struct sgv_pool *pool,
1063         struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
1064         void (*free_pages_fn)(struct scatterlist *, int, void *))
1065 {
1066         pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
1067         pool->alloc_fns.free_pages_fn = free_pages_fn;
1068         return;
1069 }
1070 EXPORT_SYMBOL(sgv_pool_set_allocator);
1071
1072 struct sgv_pool *sgv_pool_create(const char *name, int clustered)
1073 {
1074         struct sgv_pool *pool;
1075         int rc;
1076
1077         TRACE_ENTRY();
1078
1079         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1080         if (pool == NULL) {
1081                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
1082                 goto out;
1083         }
1084
1085         rc = sgv_pool_init(pool, name, clustered);
1086         if (rc != 0)
1087                 goto out_free;
1088
1089 out:
1090         TRACE_EXIT_RES(pool != NULL);
1091         return pool;
1092
1093 out_free:
1094         kfree(pool);
1095         pool = NULL;
1096         goto out;
1097 }
1098 EXPORT_SYMBOL(sgv_pool_create);
1099
1100 void sgv_pool_destroy(struct sgv_pool *pool)
1101 {
1102         TRACE_ENTRY();
1103
1104         sgv_pool_deinit(pool);
1105         kfree(pool);
1106
1107         TRACE_EXIT();
1108 }
1109 EXPORT_SYMBOL(sgv_pool_destroy);
1110
1111 static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
1112 {
1113         TRACE_ENTRY();
1114
1115         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1116
1117         if (nr > 0) {
1118                 struct sgv_pool_obj *e;
1119                 unsigned long rt = jiffies;
1120
1121                 while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1122                         e = list_entry(
1123                                 sgv_pools_mgr.mgr.sorted_recycling_list.next,
1124                                 struct sgv_pool_obj,
1125                                 recycle_entry.sorted_recycling_list_entry);
1126
1127                         if (sgv_pool_cached_purge(e, SHRINK_TIME_AFTER, rt) == 0) {
1128                                 nr -= 1 << e->order_or_pages;
1129                                 spin_unlock_bh(
1130                                         &sgv_pools_mgr.mgr.pool_mgr_lock);
1131                                 sgv_dtor_and_free(e);
1132                                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1133                         } else
1134                                 break;
1135
1136                         if (nr <= 0)
1137                                 break;
1138                 }
1139         }
1140
1141         nr = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
1142
1143         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1144
1145         TRACE_EXIT();
1146         return nr;
1147 }
1148
1149 static void sgv_pool_cached_pitbool(void *p)
1150 {
1151         u32 total_pages;
1152         struct sgv_pool_obj *e;
1153         unsigned long rt = jiffies;
1154
1155         TRACE_ENTRY();
1156
1157         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1158
1159         sgv_pools_mgr.mgr.pitbool_running = 0;
1160
1161         while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1162                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
1163                         struct sgv_pool_obj,
1164                         recycle_entry.sorted_recycling_list_entry);
1165
1166                 if (sgv_pool_cached_purge(e, PURGE_TIME_AFTER, rt) == 0) {
1167                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1168                         sgv_dtor_and_free(e);
1169                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1170                 } else
1171                         break;
1172         }
1173
1174         total_pages = sgv_pools_mgr.mgr.throttle.inactive_pages_total;
1175
1176         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1177
1178         if (total_pages) {
1179                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
1180                         PURGE_INTERVAL);
1181         }
1182
1183         TRACE_EXIT();
1184         return;
1185 }
1186
1187 /* Both parameters in pages */
1188 int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
1189 {
1190         int res;
1191         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1192
1193         TRACE_ENTRY();
1194
1195         memset(pools, 0, sizeof(*pools));
1196
1197         sgv_pools_mgr.mgr.throttle.hi_wmk = mem_hwmark;
1198         sgv_pools_mgr.mgr.throttle.lo_wmk = mem_lwmark;
1199
1200         sgv_pool_evaluate_local_order(&sgv_pools_mgr);
1201
1202         atomic_set(&pools->sgv_other_total_alloc, 0);
1203         INIT_LIST_HEAD(&pools->scst_sgv_pool_list);
1204         mutex_init(&pools->scst_sgv_pool_mutex);
1205
1206         INIT_LIST_HEAD(&pools->mgr.sorted_recycling_list);
1207         spin_lock_init(&pools->mgr.pool_mgr_lock);
1208
1209         res = sgv_pool_init(&pools->default_set.norm, "sgv", 0);
1210         if (res != 0)
1211                 goto out;
1212
1213         res = sgv_pool_init(&pools->default_set.norm_clust, "sgv-clust", 1);
1214         if (res != 0)
1215                 goto out_free_clust;
1216
1217         res = sgv_pool_init(&pools->default_set.dma, "sgv-dma", 0);
1218         if (res != 0)
1219                 goto out_free_norm;
1220
1221 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
1222         INIT_DELAYED_WORK(&pools->mgr.apit_pool,
1223                 (void (*)(struct work_struct *))sgv_pool_cached_pitbool);
1224 #else
1225         INIT_WORK(&pools->mgr.apit_pool, sgv_pool_cached_pitbool, NULL);
1226 #endif
1227
1228 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1229         pools->mgr.sgv_shrinker = set_shrinker(DEFAULT_SEEKS,
1230                 sgv_pool_cached_shrinker);
1231 #else
1232         pools->mgr.sgv_shrinker.shrink = sgv_pool_cached_shrinker;
1233         pools->mgr.sgv_shrinker.seeks = DEFAULT_SEEKS;
1234         register_shrinker(&pools->mgr.sgv_shrinker);
1235 #endif
1236
1237 out:
1238         TRACE_EXIT_RES(res);
1239         return res;
1240
1241 out_free_norm:
1242         sgv_pool_deinit(&pools->default_set.norm);
1243
1244 out_free_clust:
1245         sgv_pool_deinit(&pools->default_set.norm_clust);
1246         goto out;
1247 }
1248
1249 void scst_sgv_pools_deinit(void)
1250 {
1251         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1252
1253         TRACE_ENTRY();
1254
1255 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1256         remove_shrinker(pools->mgr.sgv_shrinker);
1257 #else
1258         unregister_shrinker(&pools->mgr.sgv_shrinker);
1259 #endif
1260
1261         cancel_delayed_work(&pools->mgr.apit_pool);
1262
1263         sgv_pool_deinit(&pools->default_set.dma);
1264         sgv_pool_deinit(&pools->default_set.norm);
1265         sgv_pool_deinit(&pools->default_set.norm_clust);
1266
1267         flush_scheduled_work();
1268
1269         TRACE_EXIT();
1270         return;
1271 }
1272
1273 static void scst_do_sgv_read(struct seq_file *seq, const struct sgv_pool *pool)
1274 {
1275         int i, total = 0, hit = 0, merged = 0, allocated = 0;
1276         int oa, om;
1277
1278         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1279                 int t;
1280
1281                 hit += atomic_read(&pool->cache_acc[i].hit_alloc);
1282                 total += atomic_read(&pool->cache_acc[i].total_alloc);
1283
1284                 t = atomic_read(&pool->cache_acc[i].total_alloc) -
1285                         atomic_read(&pool->cache_acc[i].hit_alloc);
1286                 allocated += t * (1 << i);
1287                 merged += atomic_read(&pool->cache_acc[i].merged);
1288         }
1289
1290         seq_printf(seq, "\n%-30s %-11d %-11d %-11d %d/%d (P/O)\n", pool->name,
1291                 hit, total, (allocated != 0) ? merged*100/allocated : 0,
1292                 pool->acc.cached_pages, pool->acc.cached_entries);
1293
1294         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1295                 int t = atomic_read(&pool->cache_acc[i].total_alloc) -
1296                         atomic_read(&pool->cache_acc[i].hit_alloc);
1297                 allocated = t * (1 << i);
1298                 merged = atomic_read(&pool->cache_acc[i].merged);
1299
1300                 seq_printf(seq, "  %-28s %-11d %-11d %d\n",
1301                         pool->cache_names[i],
1302                         atomic_read(&pool->cache_acc[i].hit_alloc),
1303                         atomic_read(&pool->cache_acc[i].total_alloc),
1304                         (allocated != 0) ? merged*100/allocated : 0);
1305         }
1306
1307         allocated = atomic_read(&pool->acc.big_pages);
1308         merged = atomic_read(&pool->acc.big_merged);
1309         oa = atomic_read(&pool->acc.other_pages);
1310         om = atomic_read(&pool->acc.other_merged);
1311
1312         seq_printf(seq, "  %-40s %d/%-9d %d/%d\n", "big/other",
1313                    atomic_read(&pool->acc.big_alloc),
1314                    atomic_read(&pool->acc.other_alloc),
1315                    (allocated != 0) ? merged*100/allocated : 0,
1316                    (oa != 0) ? om/oa : 0);
1317
1318         return;
1319 }
1320
1321 int sgv_pool_procinfo_show(struct seq_file *seq, void *v)
1322 {
1323         struct sgv_pool *pool;
1324
1325         TRACE_ENTRY();
1326
1327         seq_printf(seq, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n\n",
1328                 "Inactive/active pages",
1329                 sgv_pools_mgr.mgr.throttle.inactive_pages_total,
1330                 sgv_pools_mgr.mgr.throttle.active_pages_total,
1331                 "Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.throttle.hi_wmk,
1332                 sgv_pools_mgr.mgr.throttle.lo_wmk,
1333                 "Hi watermark releases/failures",
1334                 sgv_pools_mgr.mgr.throttle.releases_on_hiwmk,
1335                 sgv_pools_mgr.mgr.throttle.releases_failed);
1336
1337         seq_printf(seq, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
1338                 "% merged", "Cached");
1339
1340         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1341         list_for_each_entry(pool, &sgv_pools_mgr.scst_sgv_pool_list,
1342                         sgv_pool_list_entry) {
1343                 scst_do_sgv_read(seq, pool);
1344         }
1345         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1346
1347         seq_printf(seq, "\n%-42s %-11d\n", "other",
1348                 atomic_read(&sgv_pools_mgr.sgv_other_total_alloc));
1349
1350         TRACE_EXIT();
1351         return 0;
1352 }