Patch from Bart Van Assche <bart.vanassche@gmail.com>:
[mirror/scst/.git] / scst / src / scst_mem.c
1 /*
2  *  scst_mem.c
3  *
4  *  Copyright (C) 2006-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2007 Krzysztof Blaszkowski <kb@sysmikro.com.pl>
6  *  Copyright (C) 2007 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <linux/unistd.h>
28 #include <linux/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scst.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #define PURGE_INTERVAL          (60 * HZ)
39 #define PURGE_TIME_AFTER        PURGE_INTERVAL
40 #define SHRINK_TIME_AFTER       (1 * HZ)
41
42 static struct scst_sgv_pools_manager sgv_pools_mgr;
43
44 void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
45 {
46         tgt_dev->gfp_mask = __GFP_NOWARN;
47         tgt_dev->pool = &sgv_pools_mgr.default_set.norm;
48 }
49
50 void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
51 {
52         TRACE_MEM("%s", "Use clustering");
53         tgt_dev->gfp_mask = __GFP_NOWARN;
54         tgt_dev->pool = &sgv_pools_mgr.default_set.norm_clust;
55 }
56
57 void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
58 {
59         TRACE_MEM("%s", "Use ISA DMA memory");
60         tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
61         tgt_dev->pool = &sgv_pools_mgr.default_set.dma;
62 }
63
64 #ifdef SCST_HIGHMEM
65 void scst_sgv_pool_use_highmem(struct scst_tgt_dev *tgt_dev)
66 {
67         TRACE_MEM("%s", "Use HIGHMEM");
68         tgt_dev->gfp_mask = __GFP_NOWARN | __GFP_HIGHMEM;
69         tgt_dev->pool = &sgv_pools_mgr.default_set.highmem;
70 }
71 #endif
72
73 static int scst_check_clustering(struct scatterlist *sg, int cur, int hint)
74 {
75         int res = -1;
76         int i = hint;
77         unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
78         int len_cur = sg[cur].length;
79         unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
80         int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
81         unsigned long pfn, pfn_next, full_page;
82
83 #ifdef SCST_HIGHMEM
84         if (page >= highmem_start_page) {
85                 TRACE_MEM("%s", "HIGHMEM page allocated, no clustering")
86                 goto out;
87         }
88 #endif
89
90 #if 0
91         TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
92                 pfn_cur, pfn_cur_next, len_cur, full_page_cur);
93 #endif
94
95         /* check the hint first */
96         if (i >= 0) {
97                 pfn = page_to_pfn(sg_page(&sg[i]));
98                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
99                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
100
101                 if ((pfn == pfn_cur_next) && full_page_cur)
102                         goto out_head;
103
104                 if ((pfn_next == pfn_cur) && full_page)
105                         goto out_tail;
106         }
107
108         /* ToDo: implement more intelligent search */
109         for (i = cur - 1; i >= 0; i--) {
110                 pfn = page_to_pfn(sg_page(&sg[i]));
111                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
112                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
113
114                 if ((pfn == pfn_cur_next) && full_page_cur)
115                         goto out_head;
116
117                 if ((pfn_next == pfn_cur) && full_page)
118                         goto out_tail;
119         }
120
121 out:
122         return res;
123
124 out_tail:
125         TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
126         sg[i].length += len_cur;
127         sg_clear(&sg[cur]);
128         res = i;
129         goto out;
130
131 out_head:
132         TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
133         sg_assign_page(&sg[i], sg_page(&sg[cur]));
134         sg[i].length += len_cur;
135         sg_clear(&sg[cur]);
136         res = i;
137         goto out;
138 }
139
140 static void scst_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
141         void *priv)
142 {
143         int i;
144
145         TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
146
147         for (i = 0; i < sg_count; i++) {
148                 struct page *p = sg_page(&sg[i]);
149                 int len = sg[i].length;
150                 int pages =
151                         (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
152
153                 TRACE_MEM("page %lx, len %d, pages %d",
154                         (unsigned long)p, len, pages);
155
156                 while (pages > 0) {
157                         int order = 0;
158
159 /*
160  * __free_pages() doesn't like freeing pages with not that order with
161  * which they were allocated, so disable this small optimization.
162  */
163 #if 0
164                         if (len > 0) {
165                                 while (((1 << order) << PAGE_SHIFT) < len)
166                                         order++;
167                                 len = 0;
168                         }
169 #endif
170                         TRACE_MEM("free_pages(): order %d, page %lx",
171                                 order, (unsigned long)p);
172
173                         __free_pages(p, order);
174
175                         pages -= 1 << order;
176                         p += 1 << order;
177                 }
178         }
179 }
180
181 static struct page *scst_alloc_sys_pages(struct scatterlist *sg,
182         gfp_t gfp_mask, void *priv)
183 {
184         struct page *page = alloc_pages(gfp_mask, 0);
185
186         sg_set_page(sg, page, PAGE_SIZE, 0);
187         TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
188         if (page == NULL) {
189                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
190                         "sg page failed");
191         }
192         return page;
193 }
194
195 static int scst_alloc_sg_entries(struct scatterlist *sg, int pages,
196         gfp_t gfp_mask, int clustered, struct trans_tbl_ent *trans_tbl,
197         const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
198 {
199         int sg_count = 0;
200         int pg, i, j;
201         int merged = -1;
202
203         TRACE_MEM("pages=%d, clustered=%d", pages, clustered);
204
205 #if 0
206         gfp_mask |= __GFP_COLD;
207 #endif
208 #ifdef SCST_STRICT_SECURITY
209         gfp_mask |= __GFP_ZERO;
210 #endif
211
212         for (pg = 0; pg < pages; pg++) {
213                 void *rc;
214 #ifdef DEBUG_OOM
215                 if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
216                     ((scst_random() % 10000) == 55))
217                         rc = NULL;
218                 else
219 #endif
220                         rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
221                                 priv);
222                 if (rc == NULL)
223                         goto out_no_mem;
224                 if (clustered) {
225                         merged = scst_check_clustering(sg, sg_count, merged);
226                         if (merged == -1)
227                                 sg_count++;
228                 } else
229                         sg_count++;
230                 TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
231                         sg_count);
232         }
233
234         if (clustered && (trans_tbl != NULL)) {
235                 pg = 0;
236                 for (i = 0; i < pages; i++) {
237                         int n = (sg[i].length >> PAGE_SHIFT) +
238                                 ((sg[i].length & ~PAGE_MASK) != 0);
239                         trans_tbl[i].pg_count = pg;
240                         for (j = 0; j < n; j++)
241                                 trans_tbl[pg++].sg_num = i+1;
242                         TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
243                                 trans_tbl[i].pg_count);
244                 }
245         }
246
247 out:
248         TRACE_MEM("sg_count=%d", sg_count);
249         return sg_count;
250
251 out_no_mem:
252         alloc_fns->free_pages_fn(sg, sg_count, priv);
253         sg_count = 0;
254         goto out;
255 }
256
257 static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
258         int pages_to_alloc, int order, unsigned long gfp_mask)
259 {
260         int sz, tsz = 0;
261         int res = 0;
262
263         TRACE_ENTRY();
264
265         sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
266
267         obj->sg_entries = (struct scatterlist *)kmalloc(sz, gfp_mask);
268         if (unlikely(obj->sg_entries == NULL)) {
269                 TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
270                         "SG vector failed (size %d)", sz);
271                 res = -ENOMEM;
272                 goto out;
273         }
274
275         sg_init_table(obj->sg_entries, pages_to_alloc);
276
277         if (obj->owner_pool->clustered) {
278                 if (order <= sgv_pools_mgr.sgv_max_trans_order) {
279                         obj->trans_tbl = (struct trans_tbl_ent *)obj->sg_entries_data;
280                         /*
281                          * No need to clear trans_tbl, if needed, it will be
282                          * fully rewritten in scst_alloc_sg_entries()
283                          */
284                 } else {
285                         tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
286                         obj->trans_tbl = (struct trans_tbl_ent *)kzalloc(tsz, gfp_mask);
287                         if (unlikely(obj->trans_tbl == NULL)) {
288                                 TRACE(TRACE_OUT_OF_MEM, "Allocation of trans_tbl "
289                                         "failed (size %d)", tsz);
290                                 res = -ENOMEM;
291                                 goto out_free;
292                         }
293                 }
294         }
295
296         TRACE_MEM("pages_to_alloc %d, order %d, sz %d, tsz %d, obj %p, "
297                 "sg_entries %p, trans_tbl %p", pages_to_alloc, order,
298                 sz, tsz, obj, obj->sg_entries, obj->trans_tbl);
299
300 out:
301         TRACE_EXIT_RES(res);
302         return res;
303
304 out_free:
305         kfree(obj->sg_entries);
306         obj->sg_entries = NULL;
307         goto out;
308 }
309
310 static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
311 {
312         if (obj->sg_count != 0) {
313                 obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
314                         obj->sg_count, obj->allocator_priv);
315         }
316         if (obj->sg_entries != obj->sg_entries_data) {
317                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
318                         /* kfree() handles NULL parameter */
319                         kfree(obj->trans_tbl);
320                         obj->trans_tbl = NULL;
321                 }
322                 kfree(obj->sg_entries);
323         }
324
325         kmem_cache_free(obj->owner_pool->caches[obj->order], obj);
326         return;
327 }
328
329 static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
330         int order, unsigned long gfp_mask)
331 {
332         struct sgv_pool_obj *obj;
333
334         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
335         if (likely(!list_empty(&pool->recycling_lists[order]))) {
336                 obj = list_entry(pool->recycling_lists[order].next,
337                          struct sgv_pool_obj,
338                         recycle_entry.recycling_list_entry);
339                 list_del(&obj->recycle_entry.sorted_recycling_list_entry);
340                 list_del(&obj->recycle_entry.recycling_list_entry);
341                 sgv_pools_mgr.mgr.thr.inactive_pages_total -= 1 << order;
342                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
343                 EXTRACHECKS_BUG_ON(obj->order != order);
344                 goto out;
345         }
346
347         pool->acc.cached_entries++;
348         pool->acc.cached_pages += (1 << order);
349
350         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
351
352         obj = kmem_cache_alloc(pool->caches[order],
353                 gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
354         if (likely(obj)) {
355                 memset(obj, 0, sizeof(*obj));
356                 obj->order = order;
357                 obj->owner_pool = pool;
358         } else {
359                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
360                 pool->acc.cached_entries--;
361                 pool->acc.cached_pages -= (1 << order);
362                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
363         }
364
365 out:
366         return obj;
367 }
368
369 static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
370 {
371         struct sgv_pool *owner = sgv->owner_pool;
372         struct list_head *entry;
373         struct list_head *list = &owner->recycling_lists[sgv->order];
374         int sched = 0;
375
376         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
377
378         TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order,
379                 sgv->sg_count);
380
381         if (owner->clustered) {
382                 /* Make objects with less entries more preferred */
383                 __list_for_each(entry, list) {
384                         struct sgv_pool_obj *tmp = list_entry(entry,
385                                 struct sgv_pool_obj,
386                                 recycle_entry.recycling_list_entry);
387                         TRACE_DBG("tmp %p, order %d, sg_count %d", tmp,
388                                 tmp->order, tmp->sg_count);
389                         if (sgv->sg_count <= tmp->sg_count)
390                                 break;
391                 }
392                 entry = entry->prev;
393         } else
394                 entry = list;
395
396         TRACE_DBG("Adding in %p (list %p)", entry, list);
397         list_add(&sgv->recycle_entry.recycling_list_entry, entry);
398
399         list_add_tail(&sgv->recycle_entry.sorted_recycling_list_entry,
400                 &sgv_pools_mgr.mgr.sorted_recycling_list);
401
402         sgv->recycle_entry.time_stamp = jiffies;
403
404         sgv_pools_mgr.mgr.thr.inactive_pages_total += 1 << sgv->order;
405         if (!sgv_pools_mgr.mgr.pitbool_running) {
406                 sgv_pools_mgr.mgr.pitbool_running = 1;
407                 sched = 1;
408         }
409
410         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
411
412         if (sched)
413                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
414                         PURGE_INTERVAL);
415 }
416
417 /* Must be called under pool_mgr_lock held */
418 static void __sgv_pool_cached_purge(struct sgv_pool_obj *e)
419 {
420         int pages = 1 << e->order;
421
422         list_del(&e->recycle_entry.sorted_recycling_list_entry);
423         list_del(&e->recycle_entry.recycling_list_entry);
424         e->owner_pool->acc.cached_entries--;
425         e->owner_pool->acc.cached_pages -= pages;
426         sgv_pools_mgr.mgr.thr.inactive_pages_total -= pages;
427
428         return;
429 }
430
431 /* Must be called under pool_mgr_lock held */
432 static int sgv_pool_cached_purge(struct sgv_pool_obj *e, int t,
433         unsigned long rt)
434 {
435         EXTRACHECKS_BUG_ON(t == 0);
436
437         if (time_after(rt, (e->recycle_entry.time_stamp + t))) {
438                 __sgv_pool_cached_purge(e);
439                 return 0;
440         }
441         return 1;
442 }
443
444 /* Called under pool_mgr_lock held, but drops/reaquires it inside */
445 static int sgv_pool_oom_free_objs(int pgs)
446 {
447         TRACE_MEM("Shrinking pools about %d pages", pgs);
448         while ((sgv_pools_mgr.mgr.thr.inactive_pages_total >
449                         sgv_pools_mgr.mgr.thr.lo_wmk) &&
450               (pgs > 0)) {
451                 struct sgv_pool_obj *e;
452
453                 sBUG_ON(list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list));
454
455                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
456                                struct sgv_pool_obj,
457                                recycle_entry.sorted_recycling_list_entry);
458
459                 __sgv_pool_cached_purge(e);
460                 pgs -= 1 << e->order;
461
462                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
463                 sgv_dtor_and_free(e);
464                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
465         }
466
467         TRACE_MEM("Pages remaining %d ", pgs);
468         return pgs;
469 }
470
471 static int sgv_pool_hiwmk_check(int pages_to_alloc, int no_fail)
472 {
473         int res = 0;
474         int pages = pages_to_alloc;
475
476         if (unlikely(no_fail))
477                 goto out;
478
479         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
480
481         pages += atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total);
482         pages += sgv_pools_mgr.mgr.thr.inactive_pages_total;
483
484         if (unlikely((u32)pages > sgv_pools_mgr.mgr.thr.hi_wmk)) {
485                 pages -= sgv_pools_mgr.mgr.thr.hi_wmk;
486                 sgv_pools_mgr.mgr.thr.releases_on_hiwmk++;
487
488                 pages = sgv_pool_oom_free_objs(pages);
489                 if (pages > 0) {
490                         TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
491                             "memory (%d pages) for being executed "
492                             "commands together with the already "
493                             "allocated memory exceeds the allowed "
494                             "maximum %dMB. Should you increase "
495                             "scst_max_cmd_mem?", pages_to_alloc,
496                            sgv_pools_mgr.mgr.thr.hi_wmk >>
497                                 (20-PAGE_SHIFT));
498                         sgv_pools_mgr.mgr.thr.releases_failed++;
499                         res = -ENOMEM;
500                         goto out_unlock;
501                 }
502         }
503
504 out_unlock:
505         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
506
507 out:
508         return res;
509 }
510
511 struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
512         unsigned long gfp_mask, int flags, int *count,
513         struct sgv_pool_obj **sgv, void *priv)
514 {
515         struct sgv_pool_obj *obj;
516         int order, pages, cnt;
517         struct scatterlist *res = NULL;
518         int pages_to_alloc;
519         struct kmem_cache *cache;
520         int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
521         bool no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
522
523         TRACE_ENTRY();
524
525         if (unlikely(size == 0))
526                 goto out;
527
528         pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
529         order = get_order(size);
530
531         TRACE_MEM("size=%d, pages=%d, order=%d, flags=%x, *sgv %p", size, pages,
532                 order, flags, *sgv);
533
534         if (*sgv != NULL) {
535                 obj = *sgv;
536
537                 TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj, obj->order);
538                 EXTRACHECKS_BUG_ON(obj->order != order);
539                 EXTRACHECKS_BUG_ON(obj->sg_count != 0);
540                 pages_to_alloc = (1 << order);
541                 cache = pool->caches[obj->order];
542                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
543                         goto out_fail_free_sg_entries;
544         } else if ((order < SGV_POOL_ELEMENTS) && !no_cached) {
545                 cache = pool->caches[order];
546                 obj = sgv_pool_cached_get(pool, order, gfp_mask);
547                 if (unlikely(obj == NULL)) {
548                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
549                                 "sgv_pool_obj failed (size %d)", size);
550                         goto out_fail;
551                 }
552                 if (obj->sg_count != 0) {
553                         TRACE_MEM("Cached sgv_obj %p", obj);
554                         EXTRACHECKS_BUG_ON(obj->order != order);
555                         atomic_inc(&pool->cache_acc[order].hit_alloc);
556                         goto success;
557                 }
558                 pages_to_alloc = (1 << order);
559                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) {
560                         if (!(flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
561                                 goto out_fail_free;
562                 }
563                 TRACE_MEM("Brand new sgv_obj %p", obj);
564                 if (order <= sgv_pools_mgr.sgv_max_local_order) {
565                         obj->sg_entries = obj->sg_entries_data;
566                         sg_init_table(obj->sg_entries, pages_to_alloc);
567                         TRACE_MEM("sg_entries %p", obj->sg_entries);
568                         if (pool->clustered) {
569                                 obj->trans_tbl = (struct trans_tbl_ent *)
570                                         (obj->sg_entries + pages_to_alloc);
571                                 TRACE_MEM("trans_tbl %p", obj->trans_tbl);
572                                 /*
573                                  * No need to clear trans_tbl, if needed, it will
574                                  * be fully rewritten in scst_alloc_sg_entries()
575                                  */
576                         }
577                 } else {
578                         if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
579                                         order, gfp_mask) != 0))
580                                 goto out_fail_free;
581                 }
582
583                 if ((flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) &&
584                     (flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
585                         goto out_return;
586
587                 obj->allocator_priv = priv;
588                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
589                         goto out_fail_free_sg_entries;
590         } else {
591                 int sz;
592                 pages_to_alloc = pages;
593                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS)
594                         goto out_return2;
595                 cache = NULL;
596                 sz = sizeof(*obj) + pages*sizeof(obj->sg_entries[0]);
597                 obj = kmalloc(sz, gfp_mask);
598                 if (unlikely(obj == NULL)) {
599                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
600                                 "sgv_pool_obj failed (size %d)", size);
601                         goto out_fail;
602                 }
603                 memset(obj, 0, sizeof(*obj));
604                 obj->owner_pool = pool;
605                 obj->order = -1 - order;
606                 obj->allocator_priv = priv;
607
608                 obj->sg_entries = obj->sg_entries_data;
609                 sg_init_table(obj->sg_entries, pages);
610
611                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
612                         goto out_fail_free_sg_entries;
613                 TRACE_MEM("Big or no_cached sgv_obj %p (size %d)", obj, sz);
614         }
615
616         obj->sg_count = scst_alloc_sg_entries(obj->sg_entries,
617                 pages_to_alloc, gfp_mask, pool->clustered, obj->trans_tbl,
618                 &pool->alloc_fns, priv);
619         if (unlikely(obj->sg_count <= 0)) {
620                 obj->sg_count = 0;
621                 if ((flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL) && cache)
622                         goto out_return1;
623                 else
624                         goto out_fail_free_sg_entries;
625         }
626
627         if (cache) {
628                 atomic_add(pages_to_alloc - obj->sg_count,
629                         &pool->cache_acc[order].merged);
630         } else {
631                 if (no_cached) {
632                         atomic_add(pages_to_alloc,
633                                 &pool->acc.other_pages);
634                         atomic_add(pages_to_alloc - obj->sg_count,
635                                 &pool->acc.other_merged);
636                 } else {
637                         atomic_add(pages_to_alloc,
638                                 &pool->acc.big_pages);
639                         atomic_add(pages_to_alloc - obj->sg_count,
640                                 &pool->acc.big_merged);
641                 }
642         }
643
644 success:
645         atomic_add(1 << order, &sgv_pools_mgr.mgr.thr.active_pages_total);
646
647         if (cache) {
648                 int sg;
649                 atomic_inc(&pool->cache_acc[order].total_alloc);
650                 if (pool->clustered)
651                         cnt = obj->trans_tbl[pages-1].sg_num;
652                 else
653                         cnt = pages;
654                 sg = cnt-1;
655                 obj->orig_sg = sg;
656                 obj->orig_length = obj->sg_entries[sg].length;
657                 if (pool->clustered) {
658                         obj->sg_entries[sg].length =
659                                 (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
660                 }
661         } else {
662                 cnt = obj->sg_count;
663                 if (no_cached)
664                         atomic_inc(&pool->acc.other_alloc);
665                 else
666                         atomic_inc(&pool->acc.big_alloc);
667         }
668
669         *count = cnt;
670         res = obj->sg_entries;
671         *sgv = obj;
672
673         if (size & ~PAGE_MASK)
674                 obj->sg_entries[cnt-1].length -= PAGE_SIZE - (size & ~PAGE_MASK);
675
676         TRACE_MEM("sgv_obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
677                 "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
678                 obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
679
680 out:
681         TRACE_EXIT_HRES(res);
682         return res;
683
684 out_return:
685         obj->allocator_priv = priv;
686         obj->owner_pool = pool;
687
688 out_return1:
689         *sgv = obj;
690         TRACE_MEM("Returning failed sgv_obj %p (count %d)", obj, *count);
691
692 out_return2:
693         *count = pages_to_alloc;
694         res = NULL;
695         goto out;
696
697 out_fail_free_sg_entries:
698         if (obj->sg_entries != obj->sg_entries_data) {
699                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
700                         /* kfree() handles NULL parameter */
701                         kfree(obj->trans_tbl);
702                         obj->trans_tbl = NULL;
703                 }
704                 kfree(obj->sg_entries);
705                 obj->sg_entries = NULL;
706         }
707
708 out_fail_free:
709         if (cache)
710                 sgv_pool_cached_put(obj);
711         else
712                 kfree(obj);
713
714 out_fail:
715         res = NULL;
716         *count = 0;
717         *sgv = NULL;
718         TRACE_MEM("%s", "Allocation failed");
719         goto out;
720 }
721
722 void *sgv_get_priv(struct sgv_pool_obj *sgv)
723 {
724         return sgv->allocator_priv;
725 }
726
727 void sgv_pool_free(struct sgv_pool_obj *sgv)
728 {
729         int order = sgv->order, pages;
730
731         TRACE_MEM("Freeing sgv_obj %p, order %d, sg_entries %p, "
732                 "sg_count %d, allocator_priv %p", sgv, order,
733                 sgv->sg_entries, sgv->sg_count, sgv->allocator_priv);
734         if (order >= 0) {
735                 sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
736
737                 pages = (sgv->sg_count) ? 1 << order : 0;
738                 sgv_pool_cached_put(sgv);
739         } else {
740                 sgv->owner_pool->alloc_fns.free_pages_fn(sgv->sg_entries,
741                         sgv->sg_count, sgv->allocator_priv);
742
743                 pages = (sgv->sg_count) ? 1 << (-order - 1) : 0;
744                 kfree(sgv);
745         }
746
747         atomic_sub(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
748         return;
749 }
750
751 struct scatterlist *scst_alloc(int size, unsigned long gfp_mask, int *count)
752 {
753         struct scatterlist *res;
754         int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
755         struct sgv_pool_alloc_fns sys_alloc_fns = {
756                 scst_alloc_sys_pages, scst_free_sys_sg_entries };
757         int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
758
759         TRACE_ENTRY();
760
761         atomic_inc(&sgv_pools_mgr.sgv_other_total_alloc);
762
763         if (sgv_pool_hiwmk_check(pages, no_fail) != 0) {
764                 res = NULL;
765                 goto out;
766         }
767
768         res = kmalloc(pages*sizeof(*res), gfp_mask);
769         if (res == NULL)
770                 goto out;
771
772         sg_init_table(res, pages);
773
774         /*
775          * If we allow use clustering here, we will have troubles in
776          * scst_free() to figure out how many pages are in the SG vector.
777          * So, always don't use clustering.
778          */
779         *count = scst_alloc_sg_entries(res, pages, gfp_mask, 0, NULL,
780                         &sys_alloc_fns, NULL);
781         if (*count <= 0)
782                 goto out_free;
783
784         atomic_add(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
785
786 out:
787         TRACE_MEM("Alloced sg %p (count %d)", res, *count);
788
789         TRACE_EXIT_HRES(res);
790         return res;
791
792 out_free:
793         kfree(res);
794         res = NULL;
795         goto out;
796 }
797
798 void scst_free(struct scatterlist *sg, int count)
799 {
800         TRACE_MEM("Freeing sg=%p", sg);
801
802         atomic_sub(count, &sgv_pools_mgr.mgr.thr.active_pages_total);
803
804         scst_free_sys_sg_entries(sg, count, NULL);
805         kfree(sg);
806         return;
807 }
808
809 static void sgv_pool_cached_init(struct sgv_pool *pool)
810 {
811         int i;
812         for (i = 0; i < SGV_POOL_ELEMENTS; i++)
813                 INIT_LIST_HEAD(&pool->recycling_lists[i]);
814 }
815
816 int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
817 {
818         int res = -ENOMEM;
819         int i;
820         struct sgv_pool_obj *obj;
821
822         TRACE_ENTRY();
823
824         memset(pool, 0, sizeof(*pool));
825
826         atomic_set(&pool->acc.other_alloc, 0);
827         atomic_set(&pool->acc.big_alloc, 0);
828         atomic_set(&pool->acc.other_pages, 0);
829         atomic_set(&pool->acc.big_pages, 0);
830         atomic_set(&pool->acc.other_merged, 0);
831         atomic_set(&pool->acc.big_merged, 0);
832
833         pool->clustered = clustered;
834         pool->alloc_fns.alloc_pages_fn = scst_alloc_sys_pages;
835         pool->alloc_fns.free_pages_fn = scst_free_sys_sg_entries;
836
837         TRACE_MEM("name %s, sizeof(*obj)=%zd, clustered=%d", name, sizeof(*obj),
838                 clustered);
839
840         strncpy(pool->name, name, sizeof(pool->name)-1);
841         pool->name[sizeof(pool->name)-1] = '\0';
842
843         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
844                 int size;
845
846                 atomic_set(&pool->cache_acc[i].total_alloc, 0);
847                 atomic_set(&pool->cache_acc[i].hit_alloc, 0);
848                 atomic_set(&pool->cache_acc[i].merged, 0);
849
850                 if (i <= sgv_pools_mgr.sgv_max_local_order) {
851                         size = sizeof(*obj) + (1 << i) *
852                                 (sizeof(obj->sg_entries[0]) +
853                                  (clustered ? sizeof(obj->trans_tbl[0]) : 0));
854                 } else if (i <= sgv_pools_mgr.sgv_max_trans_order) {
855                         /* sgv ie sg_entries is allocated outside object but ttbl
856                         is embedded still */
857                         size = sizeof(*obj) + (1 << i) *
858                                 ((clustered ? sizeof(obj->trans_tbl[0]) : 0));
859                 } else {
860                         size = sizeof(*obj);
861
862                         /* both sgv and ttbl are kallocated() */
863                 }
864
865                 TRACE_MEM("pages=%d, size=%d", 1 << i, size);
866
867                 scnprintf(pool->cache_names[i], sizeof(pool->cache_names[i]),
868                         "%s-%luK", name, (PAGE_SIZE >> 10) << i);
869                 pool->caches[i] = kmem_cache_create(pool->cache_names[i],
870                         size, 0, SCST_SLAB_FLAGS, NULL
871 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
872                         , NULL);
873 #else
874                         );
875 #endif
876                 if (pool->caches[i] == NULL) {
877                         TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool cache "
878                                 "%s(%d) failed", name, i);
879                         goto out_free;
880                 }
881         }
882
883         sgv_pool_cached_init(pool);
884
885         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
886         list_add_tail(&pool->sgv_pool_list_entry,
887                 &sgv_pools_mgr.scst_sgv_pool_list);
888         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
889
890         res = 0;
891
892 out:
893         TRACE_EXIT_RES(res);
894         return res;
895
896 out_free:
897         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
898                 if (pool->caches[i]) {
899                         kmem_cache_destroy(pool->caches[i]);
900                         pool->caches[i] = NULL;
901                 } else
902                         break;
903         }
904         goto out;
905 }
906
907 static void sgv_pool_evaluate_local_order(struct scst_sgv_pools_manager *pmgr)
908 {
909         int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
910
911         pmgr->sgv_max_local_order = get_order(
912                 (((space4sgv_ttbl /
913                   (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))) *
914                         PAGE_SIZE) & PAGE_MASK)) - 1;
915
916         pmgr->sgv_max_trans_order = get_order(
917                 (((space4sgv_ttbl /
918                   (sizeof(struct trans_tbl_ent))) * PAGE_SIZE) & PAGE_MASK)) - 1;
919
920         TRACE_MEM("sgv_max_local_order %d, sgv_max_trans_order %d",
921                 pmgr->sgv_max_local_order, pmgr->sgv_max_trans_order);
922         TRACE_MEM("max object size with embedded sgv & ttbl %zd",
923                 (1 << pmgr->sgv_max_local_order) *
924                 (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))
925                 + sizeof(struct sgv_pool_obj));
926         TRACE_MEM("max object size with embedded sgv (!clustered) %zd",
927                 (1 << pmgr->sgv_max_local_order) *
928                 (sizeof(struct scatterlist))
929                 + sizeof(struct sgv_pool_obj));
930         TRACE_MEM("max object size with embedded ttbl %zd",
931                 (1 << pmgr->sgv_max_trans_order) * sizeof(struct trans_tbl_ent) +
932                 sizeof(struct sgv_pool_obj));
933 }
934
935 void sgv_pool_deinit(struct sgv_pool *pool)
936 {
937         int i;
938
939         TRACE_ENTRY();
940
941         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
942         list_del(&pool->sgv_pool_list_entry);
943         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
944
945         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
946                 struct sgv_pool_obj *e;
947
948                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
949                 while (!list_empty(&pool->recycling_lists[i])) {
950                         e = list_entry(pool->recycling_lists[i].next,
951                                  struct sgv_pool_obj,
952                                 recycle_entry.recycling_list_entry);
953
954                         __sgv_pool_cached_purge(e);
955                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
956
957                         EXTRACHECKS_BUG_ON(e->owner_pool != pool);
958                         sgv_dtor_and_free(e);
959
960                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
961                 }
962                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
963
964                 if (pool->caches[i])
965                         kmem_cache_destroy(pool->caches[i]);
966                 pool->caches[i] = NULL;
967         }
968
969         TRACE_EXIT();
970 }
971
972 void sgv_pool_set_allocator(struct sgv_pool *pool,
973         struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
974         void (*free_pages_fn)(struct scatterlist *, int, void *))
975 {
976         pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
977         pool->alloc_fns.free_pages_fn = free_pages_fn;
978         return;
979 }
980
981 struct sgv_pool *sgv_pool_create(const char *name, int clustered)
982 {
983         struct sgv_pool *pool;
984         int rc;
985
986         TRACE_ENTRY();
987
988         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
989         if (pool == NULL) {
990                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
991                 goto out;
992         }
993
994         rc = sgv_pool_init(pool, name, clustered);
995         if (rc != 0)
996                 goto out_free;
997
998 out:
999         TRACE_EXIT_RES(pool != NULL);
1000         return pool;
1001
1002 out_free:
1003         kfree(pool);
1004         pool = NULL;
1005         goto out;
1006 }
1007
1008 void sgv_pool_destroy(struct sgv_pool *pool)
1009 {
1010         TRACE_ENTRY();
1011
1012         sgv_pool_deinit(pool);
1013         kfree(pool);
1014
1015         TRACE_EXIT();
1016 }
1017
1018 static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
1019 {
1020         TRACE_ENTRY();
1021
1022         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1023
1024         if (nr > 0) {
1025                 struct sgv_pool_obj *e;
1026                 unsigned long rt = jiffies;
1027
1028                 while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1029                         e = list_entry(
1030                                 sgv_pools_mgr.mgr.sorted_recycling_list.next,
1031                                 struct sgv_pool_obj,
1032                                 recycle_entry.sorted_recycling_list_entry);
1033
1034                         if (sgv_pool_cached_purge(e, SHRINK_TIME_AFTER, rt) == 0) {
1035                                 nr -= 1 << e->order;
1036                                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1037                                 sgv_dtor_and_free(e);
1038                                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1039                         } else
1040                                 break;
1041
1042                         if (nr <= 0)
1043                                 break;
1044                 }
1045         }
1046
1047         nr = sgv_pools_mgr.mgr.thr.inactive_pages_total;
1048
1049         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1050
1051         TRACE_EXIT();
1052         return nr;
1053 }
1054
1055 static void sgv_pool_cached_pitbool(void *p)
1056 {
1057         u32 total_pages;
1058         struct sgv_pool_obj *e;
1059         unsigned long rt = jiffies;
1060
1061         TRACE_ENTRY();
1062
1063         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1064
1065         sgv_pools_mgr.mgr.pitbool_running = 0;
1066
1067         while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1068                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
1069                         struct sgv_pool_obj,
1070                         recycle_entry.sorted_recycling_list_entry);
1071
1072                 if (sgv_pool_cached_purge(e, PURGE_TIME_AFTER, rt) == 0) {
1073                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1074                         sgv_dtor_and_free(e);
1075                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1076                 } else
1077                         break;
1078         }
1079
1080         total_pages = sgv_pools_mgr.mgr.thr.inactive_pages_total;
1081
1082         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1083
1084         if (total_pages) {
1085                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
1086                         PURGE_INTERVAL);
1087         }
1088
1089         TRACE_EXIT();
1090         return;
1091 }
1092
1093 int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
1094 {
1095         int res;
1096         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1097
1098         TRACE_ENTRY();
1099
1100         memset(pools, 0, sizeof(*pools));
1101
1102         atomic_set(&sgv_pools_mgr.mgr.thr.active_pages_total, 0);
1103
1104         sgv_pools_mgr.mgr.thr.hi_wmk = mem_hwmark >> PAGE_SHIFT;
1105         sgv_pools_mgr.mgr.thr.lo_wmk = mem_lwmark >> PAGE_SHIFT;
1106
1107         sgv_pool_evaluate_local_order(&sgv_pools_mgr);
1108
1109         atomic_set(&pools->sgv_other_total_alloc, 0);
1110         INIT_LIST_HEAD(&pools->scst_sgv_pool_list);
1111         mutex_init(&pools->scst_sgv_pool_mutex);
1112
1113         INIT_LIST_HEAD(&pools->mgr.sorted_recycling_list);
1114         spin_lock_init(&pools->mgr.pool_mgr_lock);
1115
1116         res = sgv_pool_init(&pools->default_set.norm, "sgv", 0);
1117         if (res != 0)
1118                 goto out;
1119
1120         res = sgv_pool_init(&pools->default_set.norm_clust, "sgv-clust", 1);
1121         if (res != 0)
1122                 goto out_free_clust;
1123
1124         res = sgv_pool_init(&pools->default_set.dma, "sgv-dma", 0);
1125         if (res != 0)
1126                 goto out_free_norm;
1127
1128 #ifdef SCST_HIGHMEM
1129         res = sgv_pool_init(&pools->default_set.highmem, "sgv-high", 0);
1130         if (res != 0)
1131                 goto out_free_dma;
1132 #endif
1133
1134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
1135         INIT_DELAYED_WORK(&pools->mgr.apit_pool,
1136                 (void (*)(struct work_struct *))sgv_pool_cached_pitbool);
1137 #else
1138         INIT_WORK(&pools->mgr.apit_pool, sgv_pool_cached_pitbool, NULL);
1139 #endif
1140
1141 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1142         pools->mgr.sgv_shrinker = set_shrinker(DEFAULT_SEEKS,
1143                 sgv_pool_cached_shrinker);
1144 #else
1145         pools->mgr.sgv_shrinker.shrink = sgv_pool_cached_shrinker;
1146         pools->mgr.sgv_shrinker.seeks = DEFAULT_SEEKS;
1147         register_shrinker(&pools->mgr.sgv_shrinker);
1148 #endif
1149
1150 out:
1151         TRACE_EXIT_RES(res);
1152         return res;
1153
1154 #ifdef SCST_HIGHMEM
1155 out_free_dma:
1156         sgv_pool_deinit(&pools->default_set.dma);
1157 #endif
1158
1159 out_free_norm:
1160         sgv_pool_deinit(&pools->default_set.norm);
1161
1162 out_free_clust:
1163         sgv_pool_deinit(&pools->default_set.norm_clust);
1164         goto out;
1165 }
1166
1167 void scst_sgv_pools_deinit(void)
1168 {
1169         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1170
1171         TRACE_ENTRY();
1172
1173 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1174         remove_shrinker(pools->mgr.sgv_shrinker);
1175 #else
1176         unregister_shrinker(&pools->mgr.sgv_shrinker);
1177 #endif
1178
1179         cancel_delayed_work(&pools->mgr.apit_pool);
1180
1181 #ifdef SCST_HIGHMEM
1182         sgv_pool_deinit(&pools->default_set.highmem);
1183 #endif
1184         sgv_pool_deinit(&pools->default_set.dma);
1185         sgv_pool_deinit(&pools->default_set.norm);
1186         sgv_pool_deinit(&pools->default_set.norm_clust);
1187
1188         flush_scheduled_work();
1189
1190         TRACE_EXIT();
1191         return;
1192 }
1193
1194 static void scst_do_sgv_read(struct seq_file *seq, const struct sgv_pool *pool)
1195 {
1196         int i, total = 0, hit = 0, merged = 0, allocated = 0;
1197         int oa, om;
1198
1199         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1200                 int t;
1201
1202                 hit += atomic_read(&pool->cache_acc[i].hit_alloc);
1203                 total += atomic_read(&pool->cache_acc[i].total_alloc);
1204
1205                 t = atomic_read(&pool->cache_acc[i].total_alloc) -
1206                         atomic_read(&pool->cache_acc[i].hit_alloc);
1207                 allocated += t * (1 << i);
1208                 merged += atomic_read(&pool->cache_acc[i].merged);
1209         }
1210
1211         seq_printf(seq, "\n%-30s %-11d %-11d %-11d %d/%d (P/O)\n", pool->name,
1212                 hit, total, (allocated != 0) ? merged*100/allocated : 0,
1213                 pool->acc.cached_pages, pool->acc.cached_entries);
1214
1215         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1216                 int t = atomic_read(&pool->cache_acc[i].total_alloc) -
1217                         atomic_read(&pool->cache_acc[i].hit_alloc);
1218                 allocated = t * (1 << i);
1219                 merged = atomic_read(&pool->cache_acc[i].merged);
1220
1221                 seq_printf(seq, "  %-28s %-11d %-11d %d\n",
1222                         pool->cache_names[i],
1223                         atomic_read(&pool->cache_acc[i].hit_alloc),
1224                         atomic_read(&pool->cache_acc[i].total_alloc),
1225                         (allocated != 0) ? merged*100/allocated : 0);
1226         }
1227
1228         allocated = atomic_read(&pool->acc.big_pages);
1229         merged = atomic_read(&pool->acc.big_merged);
1230         oa = atomic_read(&pool->acc.other_pages);
1231         om = atomic_read(&pool->acc.other_merged);
1232
1233         seq_printf(seq, "  %-40s %d/%-9d %d/%d\n", "big/other",
1234                    atomic_read(&pool->acc.big_alloc),
1235                    atomic_read(&pool->acc.other_alloc),
1236                    (allocated != 0) ? merged*100/allocated : 0,
1237                    (oa != 0) ? om/oa : 0);
1238
1239         return;
1240 }
1241
1242 int sgv_pool_procinfo_show(struct seq_file *seq, void *v)
1243 {
1244         struct sgv_pool *pool;
1245
1246         TRACE_ENTRY();
1247
1248         seq_printf(seq, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n\n",
1249                 "Inactive/active pages",
1250                 sgv_pools_mgr.mgr.thr.inactive_pages_total,
1251                 atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total),
1252                 "Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.thr.hi_wmk,
1253                 sgv_pools_mgr.mgr.thr.lo_wmk, "Hi watermark releases/failures",
1254                 sgv_pools_mgr.mgr.thr.releases_on_hiwmk,
1255                 sgv_pools_mgr.mgr.thr.releases_failed);
1256
1257         seq_printf(seq, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
1258                 "% merged", "Cached");
1259
1260         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1261         list_for_each_entry(pool, &sgv_pools_mgr.scst_sgv_pool_list,
1262                         sgv_pool_list_entry) {
1263                 scst_do_sgv_read(seq, pool);
1264         }
1265         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1266
1267         seq_printf(seq, "\n%-42s %-11d\n", "other",
1268                 atomic_read(&sgv_pools_mgr.sgv_other_total_alloc));
1269
1270         TRACE_EXIT();
1271         return 0;
1272 }