Patch from Bart Van Assche <bart.vanassche@gmail.com>:
[mirror/scst/.git] / scst / src / scst_mem.c
1 /*
2  *  scst_mem.c
3  *
4  *  Copyright (C) 2006-2007 Vladislav Bolkhovitin <vst@vlnb.net>
5  *  Copyright (C) 2007 Krzysztof Blaszkowski <kb@sysmikro.com.pl>
6  *  Copyright (C) 2007 CMS Distribution Limited
7  *
8  *  This program is free software; you can redistribute it and/or
9  *  modify it under the terms of the GNU General Public License
10  *  as published by the Free Software Foundation, version 2
11  *  of the License.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  *  GNU General Public License for more details.
17  */
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/mm.h>
27 #include <asm/unistd.h>
28 #include <asm/string.h>
29
30 #ifdef SCST_HIGHMEM
31 #include <linux/highmem.h>
32 #endif
33
34 #include "scst.h"
35 #include "scst_priv.h"
36 #include "scst_mem.h"
37
38 #define PURGE_INTERVAL          (60 * HZ)
39 #define PURGE_TIME_AFTER        PURGE_INTERVAL
40 #define SHRINK_TIME_AFTER       (1 * HZ)
41
42 static struct scst_sgv_pools_manager sgv_pools_mgr;
43
44 void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
45 {
46         tgt_dev->gfp_mask = __GFP_NOWARN;
47         tgt_dev->pool = &sgv_pools_mgr.default_set.norm;
48 }
49
50 void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
51 {
52         TRACE_MEM("%s", "Use clustering");
53         tgt_dev->gfp_mask = __GFP_NOWARN;
54         tgt_dev->pool = &sgv_pools_mgr.default_set.norm_clust;
55 }
56
57 void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
58 {
59         TRACE_MEM("%s", "Use ISA DMA memory");
60         tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
61         tgt_dev->pool = &sgv_pools_mgr.default_set.dma;
62 }
63
64 #ifdef SCST_HIGHMEM
65 void scst_sgv_pool_use_highmem(struct scst_tgt_dev *tgt_dev)
66 {
67         TRACE_MEM("%s", "Use HIGHMEM");
68         tgt_dev->gfp_mask = __GFP_NOWARN | __GFP_HIGHMEM;
69         tgt_dev->pool = &sgv_pools_mgr.default_set.highmem;
70 }
71 #endif
72
73 static int scst_check_clustering(struct scatterlist *sg, int cur, int hint)
74 {
75         int res = -1;
76         int i = hint;
77         unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
78         int len_cur = sg[cur].length;
79         unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
80         int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
81         unsigned long pfn, pfn_next, full_page;
82
83 #ifdef SCST_HIGHMEM
84         if (page >= highmem_start_page) {
85                 TRACE_MEM("%s", "HIGHMEM page allocated, no clustering")
86                 goto out;
87         }
88 #endif
89
90 #if 0
91         TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
92                 pfn_cur, pfn_cur_next, len_cur, full_page_cur);
93 #endif
94
95         /* check the hint first */
96         if (i >= 0) {
97                 pfn = page_to_pfn(sg_page(&sg[i]));
98                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
99                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
100
101                 if ((pfn == pfn_cur_next) && full_page_cur)
102                         goto out_head;
103
104                 if ((pfn_next == pfn_cur) && full_page)
105                         goto out_tail;
106         }
107
108         /* ToDo: implement more intelligent search */
109         for (i = cur - 1; i >= 0; i--) {
110                 pfn = page_to_pfn(sg_page(&sg[i]));
111                 pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
112                 full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
113
114                 if ((pfn == pfn_cur_next) && full_page_cur)
115                         goto out_head;
116
117                 if ((pfn_next == pfn_cur) && full_page)
118                         goto out_tail;
119         }
120
121 out:
122         return res;
123
124 out_tail:
125         TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
126         sg[i].length += len_cur;
127         sg_clear(&sg[cur]);
128         res = i;
129         goto out;
130
131 out_head:
132         TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
133         sg_assign_page(&sg[i], sg_page(&sg[cur]));
134         sg[i].length += len_cur;
135         sg_clear(&sg[cur]);
136         res = i;
137         goto out;
138 }
139
140 static void scst_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
141         void *priv)
142 {
143         int i;
144
145         TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
146
147         for (i = 0; i < sg_count; i++) {
148                 struct page *p = sg_page(&sg[i]);
149                 int len = sg[i].length;
150                 int pages =
151                         (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
152
153                 TRACE_MEM("page %lx, len %d, pages %d",
154                         (unsigned long)p, len, pages);
155
156                 while (pages > 0) {
157                         int order = 0;
158
159 /*
160  * __free_pages() doesn't like freeing pages with not that order with
161  * which they were allocated, so disable this small optimization.
162  */
163 #if 0
164                         if (len > 0) {
165                                 while (((1 << order) << PAGE_SHIFT) < len)
166                                         order++;
167                                 len = 0;
168                         }
169 #endif
170                         TRACE_MEM("free_pages(): order %d, page %lx",
171                                 order, (unsigned long)p);
172
173                         __free_pages(p, order);
174
175                         pages -= 1 << order;
176                         p += 1 << order;
177                 }
178         }
179 }
180
181 static struct page *scst_alloc_sys_pages(struct scatterlist *sg,
182         gfp_t gfp_mask, void *priv)
183 {
184         struct page *page = alloc_pages(gfp_mask, 0);
185
186         sg_set_page(sg, page, PAGE_SIZE, 0);
187         TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
188         if (page == NULL) {
189                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
190                         "sg page failed");
191         }
192         return page;
193 }
194
195 static int scst_alloc_sg_entries(struct scatterlist *sg, int pages,
196         gfp_t gfp_mask, int clustered, struct trans_tbl_ent *trans_tbl,
197         const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
198 {
199         int sg_count = 0;
200         int pg, i, j;
201         int merged = -1;
202
203         TRACE_MEM("pages=%d, clustered=%d", pages, clustered);
204
205 #if 0
206         gfp_mask |= __GFP_COLD;
207 #endif
208 #ifdef SCST_STRICT_SECURITY
209         gfp_mask |= __GFP_ZERO;
210 #endif
211
212         for (pg = 0; pg < pages; pg++) {
213                 void *rc;
214 #ifdef DEBUG_OOM
215                 if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
216                     ((scst_random() % 10000) == 55))
217                         rc = NULL;
218                 else
219 #endif
220                         rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
221                                 priv);
222                 if (rc == NULL)
223                         goto out_no_mem;
224                 if (clustered) {
225                         merged = scst_check_clustering(sg, sg_count, merged);
226                         if (merged == -1)
227                                 sg_count++;
228                 } else
229                         sg_count++;
230                 TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
231                         sg_count);
232         }
233
234         if (clustered && (trans_tbl != NULL)) {
235                 pg = 0;
236                 for (i = 0; i < pages; i++) {
237                         int n = (sg[i].length >> PAGE_SHIFT) +
238                                 ((sg[i].length & ~PAGE_MASK) != 0);
239                         trans_tbl[i].pg_count = pg;
240                         for (j = 0; j < n; j++)
241                                 trans_tbl[pg++].sg_num = i+1;
242                         TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
243                                 trans_tbl[i].pg_count);
244                 }
245         }
246
247 out:
248         TRACE_MEM("sg_count=%d", sg_count);
249         return sg_count;
250
251 out_no_mem:
252         alloc_fns->free_pages_fn(sg, sg_count, priv);
253         sg_count = 0;
254         goto out;
255 }
256
257 static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
258         int pages_to_alloc, int order, unsigned long gfp_mask)
259 {
260         int sz, tsz = 0;
261         int res = 0;
262
263         TRACE_ENTRY();
264
265         sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
266
267         obj->sg_entries = (struct scatterlist *)kmalloc(sz, gfp_mask);
268         if (unlikely(obj->sg_entries == NULL)) {
269                 TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
270                         "SG vector failed (size %d)", sz);
271                 res = -ENOMEM;
272                 goto out;
273         }
274
275         sg_init_table(obj->sg_entries, pages_to_alloc);
276
277         if (obj->owner_pool->clustered) {
278                 if (order <= sgv_pools_mgr.sgv_max_trans_order) {
279                         obj->trans_tbl = (struct trans_tbl_ent *)obj->sg_entries_data;
280                         /*
281                          * No need to clear trans_tbl, if needed, it will be
282                          * fully rewritten in scst_alloc_sg_entries()
283                          */
284                 } else {
285                         tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
286                         obj->trans_tbl = (struct trans_tbl_ent *)kzalloc(tsz, gfp_mask);
287                         if (unlikely(obj->trans_tbl == NULL)) {
288                                 TRACE(TRACE_OUT_OF_MEM, "Allocation of trans_tbl "
289                                         "failed (size %d)", tsz);
290                                 res = -ENOMEM;
291                                 goto out_free;
292                         }
293                 }
294         }
295
296         TRACE_MEM("pages_to_alloc %d, order %d, sz %d, tsz %d, obj %p, "
297                 "sg_entries %p, trans_tbl %p", pages_to_alloc, order,
298                 sz, tsz, obj, obj->sg_entries, obj->trans_tbl);
299
300 out:
301         TRACE_EXIT_RES(res);
302         return res;
303
304 out_free:
305         kfree(obj->sg_entries);
306         obj->sg_entries = NULL;
307         goto out;
308 }
309
310 static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
311 {
312         if (obj->sg_count != 0) {
313                 obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
314                         obj->sg_count, obj->allocator_priv);
315         }
316         if (obj->sg_entries != obj->sg_entries_data) {
317                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
318                         /* kfree() handles NULL parameter */
319                         kfree(obj->trans_tbl);
320                         obj->trans_tbl = NULL;
321                 }
322                 kfree(obj->sg_entries);
323         }
324
325         kmem_cache_free(obj->owner_pool->caches[obj->order], obj);
326         return;
327 }
328
329 static struct sgv_pool_obj *sgv_pool_cached_get(struct sgv_pool *pool,
330         int order, unsigned long gfp_mask)
331 {
332         struct sgv_pool_obj *obj;
333
334         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
335         if (likely(!list_empty(&pool->recycling_lists[order]))) {
336                 obj = list_entry(pool->recycling_lists[order].next,
337                          struct sgv_pool_obj,
338                         recycle_entry.recycling_list_entry);
339                 list_del(&obj->recycle_entry.sorted_recycling_list_entry);
340                 list_del(&obj->recycle_entry.recycling_list_entry);
341                 sgv_pools_mgr.mgr.thr.inactive_pages_total -= 1 << order;
342                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
343                 EXTRACHECKS_BUG_ON(obj->order != order);
344                 goto out;
345         }
346
347         pool->acc.cached_entries++;
348         pool->acc.cached_pages += (1 << order);
349
350         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
351
352         obj = kmem_cache_alloc(pool->caches[order],
353                 gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
354         if (likely(obj)) {
355                 memset(obj, 0, sizeof(*obj));
356                 obj->order = order;
357                 obj->owner_pool = pool;
358         } else {
359                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
360                 pool->acc.cached_entries--;
361                 pool->acc.cached_pages -= (1 << order);
362                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
363         }
364
365 out:
366         return obj;
367 }
368
369 static void sgv_pool_cached_put(struct sgv_pool_obj *sgv)
370 {
371         struct sgv_pool *owner = sgv->owner_pool;
372         struct list_head *entry;
373         struct list_head *list = &owner->recycling_lists[sgv->order];
374         int sched = 0;
375
376         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
377
378         TRACE_MEM("sgv %p, order %d, sg_count %d", sgv, sgv->order,
379                 sgv->sg_count);
380
381         if (owner->clustered) {
382                 /* Make objects with less entries more preferred */
383                 __list_for_each(entry, list) {
384                         struct sgv_pool_obj *tmp = list_entry(entry,
385                                 struct sgv_pool_obj,
386                                 recycle_entry.recycling_list_entry);
387                         TRACE_DBG("tmp %p, order %d, sg_count %d", tmp,
388                                 tmp->order, tmp->sg_count);
389                         if (sgv->sg_count <= tmp->sg_count)
390                                 break;
391                 }
392                 entry = entry->prev;
393         } else
394                 entry = list;
395
396         TRACE_DBG("Adding in %p (list %p)", entry, list);
397         list_add(&sgv->recycle_entry.recycling_list_entry, entry);
398
399         list_add_tail(&sgv->recycle_entry.sorted_recycling_list_entry,
400                 &sgv_pools_mgr.mgr.sorted_recycling_list);
401
402         sgv->recycle_entry.time_stamp = jiffies;
403
404         sgv_pools_mgr.mgr.thr.inactive_pages_total += 1 << sgv->order;
405         if (!sgv_pools_mgr.mgr.pitbool_running) {
406                 sgv_pools_mgr.mgr.pitbool_running = 1;
407                 sched = 1;
408         }
409
410         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
411
412         if (sched)
413                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
414                         PURGE_INTERVAL);
415 }
416
417 /* Must be called under pool_mgr_lock held */
418 static void __sgv_pool_cached_purge(struct sgv_pool_obj *e)
419 {
420         int pages = 1 << e->order;
421
422         list_del(&e->recycle_entry.sorted_recycling_list_entry);
423         list_del(&e->recycle_entry.recycling_list_entry);
424         e->owner_pool->acc.cached_entries--;
425         e->owner_pool->acc.cached_pages -= pages;
426         sgv_pools_mgr.mgr.thr.inactive_pages_total -= pages;
427
428         return;
429 }
430
431 /* Must be called under pool_mgr_lock held */
432 static int sgv_pool_cached_purge(struct sgv_pool_obj *e, int t,
433         unsigned long rt)
434 {
435         EXTRACHECKS_BUG_ON(t == 0);
436
437         if (time_after(rt, (e->recycle_entry.time_stamp + t))) {
438                 __sgv_pool_cached_purge(e);
439                 return 0;
440         }
441         return 1;
442 }
443
444 /* Called under pool_mgr_lock held, but drops/reaquires it inside */
445 static int sgv_pool_oom_free_objs(int pgs)
446 {
447         TRACE_MEM("Shrinking pools about %d pages", pgs);
448         while ((sgv_pools_mgr.mgr.thr.inactive_pages_total >
449                         sgv_pools_mgr.mgr.thr.lo_wmk) &&
450               (pgs > 0)) {
451                 struct sgv_pool_obj *e;
452
453                 sBUG_ON(list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list));
454
455                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
456                                struct sgv_pool_obj,
457                                recycle_entry.sorted_recycling_list_entry);
458
459                 __sgv_pool_cached_purge(e);
460                 pgs -= 1 << e->order;
461
462                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
463                 sgv_dtor_and_free(e);
464                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
465         }
466
467         TRACE_MEM("Pages remaining %d ", pgs);
468         return pgs;
469 }
470
471 static int sgv_pool_hiwmk_check(int pages_to_alloc, int no_fail)
472 {
473         int res = 0;
474         int pages = pages_to_alloc;
475
476         if (unlikely(no_fail))
477                 goto out;
478
479         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
480
481         pages += atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total);
482         pages += sgv_pools_mgr.mgr.thr.inactive_pages_total;
483
484         if (unlikely((u32)pages > sgv_pools_mgr.mgr.thr.hi_wmk)) {
485                 pages -= sgv_pools_mgr.mgr.thr.hi_wmk;
486                 sgv_pools_mgr.mgr.thr.releases_on_hiwmk++;
487
488                 pages = sgv_pool_oom_free_objs(pages);
489                 if (pages > 0) {
490                         TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
491                             "memory (%d pages) for being executed "
492                             "commands together with the already "
493                             "allocated memory exceeds the allowed "
494                             "maximum %dMB. Should you increase "
495                             "scst_max_cmd_mem?", pages_to_alloc,
496                            sgv_pools_mgr.mgr.thr.hi_wmk >>
497                                 (20-PAGE_SHIFT));
498                         sgv_pools_mgr.mgr.thr.releases_failed++;
499                         res = -ENOMEM;
500                         goto out_unlock;
501                 }
502         }
503
504 out_unlock:
505         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
506
507 out:
508         return res;
509 }
510
511 struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
512         unsigned long gfp_mask, int flags, int *count,
513         struct sgv_pool_obj **sgv, void *priv)
514 {
515         struct sgv_pool_obj *obj;
516         int order, pages, cnt;
517         struct scatterlist *res = NULL;
518         int pages_to_alloc;
519         struct kmem_cache *cache;
520         int no_cached = flags & SCST_POOL_ALLOC_NO_CACHED;
521         bool no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
522
523         TRACE_ENTRY();
524
525         if (unlikely(size == 0))
526                 goto out;
527
528         pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
529         order = get_order(size);
530
531         TRACE_MEM("size=%d, pages=%d, order=%d, flags=%x, *sgv %p", size, pages,
532                 order, flags, *sgv);
533
534         if (*sgv != NULL) {
535                 obj = *sgv;
536
537                 TRACE_MEM("Supplied sgv_obj %p, sgv_order %d", obj, obj->order);
538                 EXTRACHECKS_BUG_ON(obj->order != order);
539                 EXTRACHECKS_BUG_ON(obj->sg_count != 0);
540                 pages_to_alloc = (1 << order);
541                 cache = pool->caches[obj->order];
542                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
543                         goto out_fail_free_sg_entries;
544         } else if ((order < SGV_POOL_ELEMENTS) && !no_cached) {
545                 cache = pool->caches[order];
546                 obj = sgv_pool_cached_get(pool, order, gfp_mask);
547                 if (unlikely(obj == NULL)) {
548                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
549                                 "sgv_pool_obj failed (size %d)", size);
550                         goto out_fail;
551                 }
552                 if (obj->sg_count != 0) {
553                         TRACE_MEM("Cached sgv_obj %p", obj);
554                         EXTRACHECKS_BUG_ON(obj->order != order);
555                         atomic_inc(&pool->cache_acc[order].hit_alloc);
556                         goto success;
557                 }
558                 pages_to_alloc = (1 << order);
559                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) {
560                         if (!(flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
561                                 goto out_fail_free;
562                 }
563                 TRACE_MEM("Brand new sgv_obj %p", obj);
564                 if (order <= sgv_pools_mgr.sgv_max_local_order) {
565                         obj->sg_entries = obj->sg_entries_data;
566                         sg_init_table(obj->sg_entries, pages_to_alloc);
567                         TRACE_MEM("sg_entries %p", obj->sg_entries);
568                         if (pool->clustered) {
569                                 obj->trans_tbl = (struct trans_tbl_ent *)
570                                         (obj->sg_entries + pages_to_alloc);
571                                 TRACE_MEM("trans_tbl %p", obj->trans_tbl);
572                                 /*
573                                  * No need to clear trans_tbl, if needed, it will
574                                  * be fully rewritten in scst_alloc_sg_entries()
575                                  */
576                         }
577                 } else {
578                         if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
579                                         order, gfp_mask) != 0))
580                                 goto out_fail_free;
581                 }
582
583                 if ((flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS) &&
584                     (flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
585                         goto out_return;
586
587                 obj->allocator_priv = priv;
588                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
589                         goto out_fail_free_sg_entries;
590         } else {
591                 int sz;
592                 pages_to_alloc = pages;
593                 if (flags & SCST_POOL_NO_ALLOC_ON_CACHE_MISS)
594                         goto out_return2;
595                 cache = NULL;
596                 sz = sizeof(*obj) + pages*sizeof(obj->sg_entries[0]);
597                 obj = kmalloc(sz, gfp_mask);
598                 if (unlikely(obj == NULL)) {
599                         TRACE(TRACE_OUT_OF_MEM, "Allocation of "
600                                 "sgv_pool_obj failed (size %d)", size);
601                         goto out_fail;
602                 }
603                 memset(obj, 0, sizeof(*obj));
604                 obj->owner_pool = pool;
605                 obj->order = -1 - order;
606                 obj->allocator_priv = priv;
607
608                 obj->sg_entries = obj->sg_entries_data;
609                 sg_init_table(obj->sg_entries, pages);
610
611                 if (sgv_pool_hiwmk_check(pages_to_alloc, no_fail) != 0)
612                         goto out_fail_free_sg_entries;
613                 TRACE_MEM("Big or no_cached sgv_obj %p (size %d)", obj, sz);
614         }
615
616         obj->sg_count = scst_alloc_sg_entries(obj->sg_entries,
617                 pages_to_alloc, gfp_mask, pool->clustered, obj->trans_tbl,
618                 &pool->alloc_fns, priv);
619         if (unlikely(obj->sg_count <= 0)) {
620                 obj->sg_count = 0;
621                 if ((flags & SCST_POOL_RETURN_OBJ_ON_ALLOC_FAIL) && cache)
622                         goto out_return1;
623                 else
624                         goto out_fail_free_sg_entries;
625         }
626
627         if (cache) {
628                 atomic_add(pages_to_alloc - obj->sg_count,
629                         &pool->cache_acc[order].merged);
630         } else {
631                 if (no_cached) {
632                         atomic_add(pages_to_alloc,
633                                 &pool->acc.other_pages);
634                         atomic_add(pages_to_alloc - obj->sg_count,
635                                 &pool->acc.other_merged);
636                 } else {
637                         atomic_add(pages_to_alloc,
638                                 &pool->acc.big_pages);
639                         atomic_add(pages_to_alloc - obj->sg_count,
640                                 &pool->acc.big_merged);
641                 }
642         }
643
644 success:
645         atomic_add(1 << order, &sgv_pools_mgr.mgr.thr.active_pages_total);
646
647         if (cache) {
648                 int sg;
649                 atomic_inc(&pool->cache_acc[order].total_alloc);
650                 if (pool->clustered)
651                         cnt = obj->trans_tbl[pages-1].sg_num;
652                 else
653                         cnt = pages;
654                 sg = cnt-1;
655                 obj->orig_sg = sg;
656                 obj->orig_length = obj->sg_entries[sg].length;
657                 if (pool->clustered) {
658                         obj->sg_entries[sg].length =
659                                 (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
660                 }
661         } else {
662                 cnt = obj->sg_count;
663                 if (no_cached)
664                         atomic_inc(&pool->acc.other_alloc);
665                 else
666                         atomic_inc(&pool->acc.big_alloc);
667         }
668
669         *count = cnt;
670         res = obj->sg_entries;
671         *sgv = obj;
672
673         if (size & ~PAGE_MASK)
674                 obj->sg_entries[cnt-1].length -= PAGE_SIZE - (size & ~PAGE_MASK);
675
676         TRACE_MEM("sgv_obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
677                 "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
678                 obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
679
680 out:
681         TRACE_EXIT_HRES(res);
682         return res;
683
684 out_return:
685         obj->allocator_priv = priv;
686         obj->owner_pool = pool;
687
688 out_return1:
689         *sgv = obj;
690         TRACE_MEM("Returning failed sgv_obj %p (count %d)", obj, *count);
691
692 out_return2:
693         *count = pages_to_alloc;
694         res = NULL;
695         goto out;
696
697 out_fail_free_sg_entries:
698         if (obj->sg_entries != obj->sg_entries_data) {
699                 if (obj->trans_tbl != (struct trans_tbl_ent *)obj->sg_entries_data) {
700                         /* kfree() handles NULL parameter */
701                         kfree(obj->trans_tbl);
702                         obj->trans_tbl = NULL;
703                 }
704                 kfree(obj->sg_entries);
705                 obj->sg_entries = NULL;
706         }
707
708 out_fail_free:
709         if (cache)
710                 sgv_pool_cached_put(obj);
711         else
712                 kfree(obj);
713
714 out_fail:
715         res = NULL;
716         *count = 0;
717         *sgv = NULL;
718         TRACE_MEM("%s", "Allocation failed");
719         goto out;
720 }
721
722 void *sgv_get_priv(struct sgv_pool_obj *sgv)
723 {
724         return sgv->allocator_priv;
725 }
726
727 void sgv_pool_free(struct sgv_pool_obj *sgv)
728 {
729         int order = sgv->order, pages;
730
731         TRACE_MEM("Freeing sgv_obj %p, order %d, sg_entries %p, "
732                 "sg_count %d, allocator_priv %p", sgv, order,
733                 sgv->sg_entries, sgv->sg_count, sgv->allocator_priv);
734         if (order >= 0) {
735                 sgv->sg_entries[sgv->orig_sg].length = sgv->orig_length;
736
737                 pages = (sgv->sg_count) ? 1 << order : 0;
738                 sgv_pool_cached_put(sgv);
739         } else {
740                 sgv->owner_pool->alloc_fns.free_pages_fn(sgv->sg_entries,
741                         sgv->sg_count, sgv->allocator_priv);
742
743                 pages = (sgv->sg_count) ? 1 << (-order - 1) : 0;
744                 kfree(sgv);
745         }
746
747         atomic_sub(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
748         return;
749 }
750
751 struct scatterlist *scst_alloc(int size, unsigned long gfp_mask, int *count)
752 {
753         struct scatterlist *res;
754         int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
755         struct sgv_pool_alloc_fns sys_alloc_fns = {
756                 scst_alloc_sys_pages, scst_free_sys_sg_entries };
757         int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
758
759         TRACE_ENTRY();
760
761         atomic_inc(&sgv_pools_mgr.sgv_other_total_alloc);
762
763         if (sgv_pool_hiwmk_check(pages, no_fail) != 0) {
764                 res = NULL;
765                 goto out;
766         }
767
768         res = kmalloc(pages*sizeof(*res), gfp_mask);
769         if (res == NULL)
770                 goto out;
771
772         sg_init_table(res, pages);
773
774         /*
775          * If we allow use clustering here, we will have troubles in
776          * scst_free() to figure out how many pages are in the SG vector.
777          * So, always don't use clustering.
778          */
779         *count = scst_alloc_sg_entries(res, pages, gfp_mask, 0, NULL,
780                         &sys_alloc_fns, NULL);
781         if (*count <= 0)
782                 goto out_free;
783
784         atomic_add(pages, &sgv_pools_mgr.mgr.thr.active_pages_total);
785
786 out:
787         TRACE_MEM("Alloced sg %p (count %d)", res, *count);
788
789         TRACE_EXIT_HRES(res);
790         return res;
791
792 out_free:
793         kfree(res);
794         res = NULL;
795         goto out;
796 }
797
798 void scst_free(struct scatterlist *sg, int count)
799 {
800         TRACE_MEM("Freeing sg=%p", sg);
801
802         atomic_sub(count, &sgv_pools_mgr.mgr.thr.active_pages_total);
803
804         scst_free_sys_sg_entries(sg, count, NULL);
805         kfree(sg);
806         return;
807 }
808
809 static void sgv_pool_cached_init(struct sgv_pool *pool)
810 {
811         int i;
812         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
813                 INIT_LIST_HEAD(&pool->recycling_lists[i]);
814         }
815 }
816
817 int sgv_pool_init(struct sgv_pool *pool, const char *name, int clustered)
818 {
819         int res = -ENOMEM;
820         int i;
821         struct sgv_pool_obj *obj;
822
823         TRACE_ENTRY();
824
825         memset(pool, 0, sizeof(*pool));
826
827         atomic_set(&pool->acc.other_alloc, 0);
828         atomic_set(&pool->acc.big_alloc, 0);
829         atomic_set(&pool->acc.other_pages, 0);
830         atomic_set(&pool->acc.big_pages, 0);
831         atomic_set(&pool->acc.other_merged, 0);
832         atomic_set(&pool->acc.big_merged, 0);
833
834         pool->clustered = clustered;
835         pool->alloc_fns.alloc_pages_fn = scst_alloc_sys_pages;
836         pool->alloc_fns.free_pages_fn = scst_free_sys_sg_entries;
837
838         TRACE_MEM("name %s, sizeof(*obj)=%zd, clustered=%d", name, sizeof(*obj),
839                 clustered);
840
841         strncpy(pool->name, name, sizeof(pool->name)-1);
842         pool->name[sizeof(pool->name)-1] = '\0';
843
844         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
845                 int size;
846
847                 atomic_set(&pool->cache_acc[i].total_alloc, 0);
848                 atomic_set(&pool->cache_acc[i].hit_alloc, 0);
849                 atomic_set(&pool->cache_acc[i].merged, 0);
850
851                 if (i <= sgv_pools_mgr.sgv_max_local_order) {
852                         size = sizeof(*obj) + (1 << i) *
853                                 (sizeof(obj->sg_entries[0]) +
854                                  (clustered ? sizeof(obj->trans_tbl[0]) : 0));
855                 } else if (i <= sgv_pools_mgr.sgv_max_trans_order) {
856                         /* sgv ie sg_entries is allocated outside object but ttbl
857                         is embedded still */
858                         size = sizeof(*obj) + (1 << i) *
859                                 ((clustered ? sizeof(obj->trans_tbl[0]) : 0));
860                 } else {
861                         size = sizeof(*obj);
862
863                         /* both sgv and ttbl are kallocated() */
864                 }
865
866                 TRACE_MEM("pages=%d, size=%d", 1 << i, size);
867
868                 scnprintf(pool->cache_names[i], sizeof(pool->cache_names[i]),
869                         "%s-%luK", name, (PAGE_SIZE >> 10) << i);
870                 pool->caches[i] = kmem_cache_create(pool->cache_names[i],
871                         size, 0, SCST_SLAB_FLAGS, NULL
872 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
873                         , NULL);
874 #else
875                         );
876 #endif
877                 if (pool->caches[i] == NULL) {
878                         TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool cache "
879                                 "%s(%d) failed", name, i);
880                         goto out_free;
881                 }
882         }
883
884         sgv_pool_cached_init(pool);
885
886         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
887         list_add_tail(&pool->sgv_pool_list_entry,
888                 &sgv_pools_mgr.scst_sgv_pool_list);
889         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
890
891         res = 0;
892
893 out:
894         TRACE_EXIT_RES(res);
895         return res;
896
897 out_free:
898         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
899                 if (pool->caches[i]) {
900                         kmem_cache_destroy(pool->caches[i]);
901                         pool->caches[i] = NULL;
902                 } else
903                         break;
904         }
905         goto out;
906 }
907
908 static void sgv_pool_evaluate_local_order(struct scst_sgv_pools_manager *pmgr)
909 {
910         int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
911
912         pmgr->sgv_max_local_order = get_order(
913                 (((space4sgv_ttbl /
914                   (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))) *
915                         PAGE_SIZE) & PAGE_MASK)) - 1;
916
917         pmgr->sgv_max_trans_order = get_order(
918                 (((space4sgv_ttbl /
919                   (sizeof(struct trans_tbl_ent))) * PAGE_SIZE) & PAGE_MASK)) - 1;
920
921         TRACE_MEM("sgv_max_local_order %d, sgv_max_trans_order %d",
922                 pmgr->sgv_max_local_order, pmgr->sgv_max_trans_order);
923         TRACE_MEM("max object size with embedded sgv & ttbl %zd",
924                 (1 << pmgr->sgv_max_local_order) *
925                 (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist))
926                 + sizeof(struct sgv_pool_obj));
927         TRACE_MEM("max object size with embedded sgv (!clustered) %zd",
928                 (1 << pmgr->sgv_max_local_order) *
929                 (sizeof(struct scatterlist))
930                 + sizeof(struct sgv_pool_obj));
931         TRACE_MEM("max object size with embedded ttbl %zd",
932                 (1 << pmgr->sgv_max_trans_order) * sizeof(struct trans_tbl_ent) +
933                 sizeof(struct sgv_pool_obj));
934 }
935
936 void sgv_pool_deinit(struct sgv_pool *pool)
937 {
938         int i;
939
940         TRACE_ENTRY();
941
942         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
943         list_del(&pool->sgv_pool_list_entry);
944         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
945
946         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
947                 struct sgv_pool_obj *e;
948
949                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
950                 while (!list_empty(&pool->recycling_lists[i])) {
951                         e = list_entry(pool->recycling_lists[i].next,
952                                  struct sgv_pool_obj,
953                                 recycle_entry.recycling_list_entry);
954
955                         __sgv_pool_cached_purge(e);
956                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
957
958                         EXTRACHECKS_BUG_ON(e->owner_pool != pool);
959                         sgv_dtor_and_free(e);
960
961                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
962                 }
963                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
964
965                 if (pool->caches[i])
966                         kmem_cache_destroy(pool->caches[i]);
967                 pool->caches[i] = NULL;
968         }
969
970         TRACE_EXIT();
971 }
972
973 void sgv_pool_set_allocator(struct sgv_pool *pool,
974         struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
975         void (*free_pages_fn)(struct scatterlist *, int, void *))
976 {
977         pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
978         pool->alloc_fns.free_pages_fn = free_pages_fn;
979         return;
980 }
981
982 struct sgv_pool *sgv_pool_create(const char *name, int clustered)
983 {
984         struct sgv_pool *pool;
985         int rc;
986
987         TRACE_ENTRY();
988
989         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
990         if (pool == NULL) {
991                 TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
992                 goto out;
993         }
994
995         rc = sgv_pool_init(pool, name, clustered);
996         if (rc != 0)
997                 goto out_free;
998
999 out:
1000         TRACE_EXIT_RES(pool != NULL);
1001         return pool;
1002
1003 out_free:
1004         kfree(pool);
1005         pool = NULL;
1006         goto out;
1007 }
1008
1009 void sgv_pool_destroy(struct sgv_pool *pool)
1010 {
1011         TRACE_ENTRY();
1012
1013         sgv_pool_deinit(pool);
1014         kfree(pool);
1015
1016         TRACE_EXIT();
1017 }
1018
1019 static int sgv_pool_cached_shrinker(int nr, gfp_t gfpm)
1020 {
1021         TRACE_ENTRY();
1022
1023         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1024
1025         if (nr > 0) {
1026                 struct sgv_pool_obj *e;
1027                 unsigned long rt = jiffies;
1028
1029                 while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1030                         e = list_entry(
1031                                 sgv_pools_mgr.mgr.sorted_recycling_list.next,
1032                                 struct sgv_pool_obj,
1033                                 recycle_entry.sorted_recycling_list_entry);
1034
1035                         if (sgv_pool_cached_purge(e, SHRINK_TIME_AFTER, rt) == 0) {
1036                                 nr -= 1 << e->order;
1037                                 spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1038                                 sgv_dtor_and_free(e);
1039                                 spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1040                         } else
1041                                 break;
1042
1043                         if (nr <= 0)
1044                                 break;
1045                 }
1046         }
1047
1048         nr = sgv_pools_mgr.mgr.thr.inactive_pages_total;
1049
1050         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1051
1052         TRACE_EXIT();
1053         return nr;
1054 }
1055
1056 static void sgv_pool_cached_pitbool(void *p)
1057 {
1058         u32 total_pages;
1059         struct sgv_pool_obj *e;
1060         unsigned long rt = jiffies;
1061
1062         TRACE_ENTRY();
1063
1064         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1065
1066         sgv_pools_mgr.mgr.pitbool_running = 0;
1067
1068         while (!list_empty(&sgv_pools_mgr.mgr.sorted_recycling_list)) {
1069                 e = list_entry(sgv_pools_mgr.mgr.sorted_recycling_list.next,
1070                         struct sgv_pool_obj,
1071                         recycle_entry.sorted_recycling_list_entry);
1072
1073                 if (sgv_pool_cached_purge(e, PURGE_TIME_AFTER, rt) == 0) {
1074                         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1075                         sgv_dtor_and_free(e);
1076                         spin_lock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1077                 } else
1078                         break;
1079         }
1080
1081         total_pages = sgv_pools_mgr.mgr.thr.inactive_pages_total;
1082
1083         spin_unlock_bh(&sgv_pools_mgr.mgr.pool_mgr_lock);
1084
1085         if (total_pages) {
1086                 schedule_delayed_work(&sgv_pools_mgr.mgr.apit_pool,
1087                         PURGE_INTERVAL);
1088         }
1089
1090         TRACE_EXIT();
1091         return;
1092 }
1093
1094 int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
1095 {
1096         int res;
1097         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1098
1099         TRACE_ENTRY();
1100
1101         memset(pools, 0, sizeof(*pools));
1102
1103         atomic_set(&sgv_pools_mgr.mgr.thr.active_pages_total, 0);
1104
1105         sgv_pools_mgr.mgr.thr.hi_wmk = mem_hwmark >> PAGE_SHIFT;
1106         sgv_pools_mgr.mgr.thr.lo_wmk = mem_lwmark >> PAGE_SHIFT;
1107
1108         sgv_pool_evaluate_local_order(&sgv_pools_mgr);
1109
1110         atomic_set(&pools->sgv_other_total_alloc, 0);
1111         INIT_LIST_HEAD(&pools->scst_sgv_pool_list);
1112         mutex_init(&pools->scst_sgv_pool_mutex);
1113
1114         INIT_LIST_HEAD(&pools->mgr.sorted_recycling_list);
1115         spin_lock_init(&pools->mgr.pool_mgr_lock);
1116
1117         res = sgv_pool_init(&pools->default_set.norm, "sgv", 0);
1118         if (res != 0)
1119                 goto out;
1120
1121         res = sgv_pool_init(&pools->default_set.norm_clust, "sgv-clust", 1);
1122         if (res != 0)
1123                 goto out_free_clust;
1124
1125         res = sgv_pool_init(&pools->default_set.dma, "sgv-dma", 0);
1126         if (res != 0)
1127                 goto out_free_norm;
1128
1129 #ifdef SCST_HIGHMEM
1130         res = sgv_pool_init(&pools->default_set.highmem, "sgv-high", 0);
1131         if (res != 0)
1132                 goto out_free_dma;
1133 #endif
1134
1135 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
1136         INIT_DELAYED_WORK(&pools->mgr.apit_pool,
1137                 (void (*)(struct work_struct *))sgv_pool_cached_pitbool);
1138 #else
1139         INIT_WORK(&pools->mgr.apit_pool, sgv_pool_cached_pitbool, NULL);
1140 #endif
1141
1142 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1143         pools->mgr.sgv_shrinker = set_shrinker(DEFAULT_SEEKS,
1144                 sgv_pool_cached_shrinker);
1145 #else
1146         pools->mgr.sgv_shrinker.shrink = sgv_pool_cached_shrinker;
1147         pools->mgr.sgv_shrinker.seeks = DEFAULT_SEEKS;
1148         register_shrinker(&pools->mgr.sgv_shrinker);
1149 #endif
1150
1151 out:
1152         TRACE_EXIT_RES(res);
1153         return res;
1154
1155 #ifdef SCST_HIGHMEM
1156 out_free_dma:
1157         sgv_pool_deinit(&pools->default_set.dma);
1158 #endif
1159
1160 out_free_norm:
1161         sgv_pool_deinit(&pools->default_set.norm);
1162
1163 out_free_clust:
1164         sgv_pool_deinit(&pools->default_set.norm_clust);
1165         goto out;
1166 }
1167
1168 void scst_sgv_pools_deinit(void)
1169 {
1170         struct scst_sgv_pools_manager *pools = &sgv_pools_mgr;
1171
1172         TRACE_ENTRY();
1173
1174 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
1175         remove_shrinker(pools->mgr.sgv_shrinker);
1176 #else
1177         unregister_shrinker(&pools->mgr.sgv_shrinker);
1178 #endif
1179
1180         cancel_delayed_work(&pools->mgr.apit_pool);
1181
1182 #ifdef SCST_HIGHMEM
1183         sgv_pool_deinit(&pools->default_set.highmem);
1184 #endif
1185         sgv_pool_deinit(&pools->default_set.dma);
1186         sgv_pool_deinit(&pools->default_set.norm);
1187         sgv_pool_deinit(&pools->default_set.norm_clust);
1188
1189         flush_scheduled_work();
1190
1191         TRACE_EXIT();
1192         return;
1193 }
1194
1195 static void scst_do_sgv_read(struct seq_file *seq, const struct sgv_pool *pool)
1196 {
1197         int i, total = 0, hit = 0, merged = 0, allocated = 0;
1198         int oa, om;
1199
1200         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1201                 int t;
1202
1203                 hit += atomic_read(&pool->cache_acc[i].hit_alloc);
1204                 total += atomic_read(&pool->cache_acc[i].total_alloc);
1205
1206                 t = atomic_read(&pool->cache_acc[i].total_alloc) -
1207                         atomic_read(&pool->cache_acc[i].hit_alloc);
1208                 allocated += t * (1 << i);
1209                 merged += atomic_read(&pool->cache_acc[i].merged);
1210         }
1211
1212         seq_printf(seq, "\n%-30s %-11d %-11d %-11d %d/%d (P/O)\n", pool->name,
1213                 hit, total, (allocated != 0) ? merged*100/allocated : 0,
1214                 pool->acc.cached_pages, pool->acc.cached_entries);
1215
1216         for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
1217                 int t = atomic_read(&pool->cache_acc[i].total_alloc) -
1218                         atomic_read(&pool->cache_acc[i].hit_alloc);
1219                 allocated = t * (1 << i);
1220                 merged = atomic_read(&pool->cache_acc[i].merged);
1221
1222                 seq_printf(seq, "  %-28s %-11d %-11d %d\n",
1223                         pool->cache_names[i],
1224                         atomic_read(&pool->cache_acc[i].hit_alloc),
1225                         atomic_read(&pool->cache_acc[i].total_alloc),
1226                         (allocated != 0) ? merged*100/allocated : 0);
1227         }
1228
1229         allocated = atomic_read(&pool->acc.big_pages);
1230         merged = atomic_read(&pool->acc.big_merged);
1231         oa = atomic_read(&pool->acc.other_pages);
1232         om = atomic_read(&pool->acc.other_merged);
1233
1234         seq_printf(seq, "  %-40s %d/%-9d %d/%d\n", "big/other",
1235                    atomic_read(&pool->acc.big_alloc),
1236                    atomic_read(&pool->acc.other_alloc),
1237                    (allocated != 0) ? merged*100/allocated : 0,
1238                    (oa != 0) ? om/oa : 0);
1239
1240         return;
1241 }
1242
1243 int sgv_pool_procinfo_show(struct seq_file *seq, void *v)
1244 {
1245         struct sgv_pool *pool;
1246
1247         TRACE_ENTRY();
1248
1249         seq_printf(seq, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n\n",
1250                 "Inactive/active pages",
1251                 sgv_pools_mgr.mgr.thr.inactive_pages_total,
1252                 atomic_read(&sgv_pools_mgr.mgr.thr.active_pages_total),
1253                 "Hi/lo watermarks [pages]", sgv_pools_mgr.mgr.thr.hi_wmk,
1254                 sgv_pools_mgr.mgr.thr.lo_wmk, "Hi watermark releases/failures",
1255                 sgv_pools_mgr.mgr.thr.releases_on_hiwmk,
1256                 sgv_pools_mgr.mgr.thr.releases_failed);
1257
1258         seq_printf(seq, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
1259                 "% merged", "Cached");
1260
1261         mutex_lock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1262         list_for_each_entry(pool, &sgv_pools_mgr.scst_sgv_pool_list,
1263                         sgv_pool_list_entry) {
1264                 scst_do_sgv_read(seq, pool);
1265         }
1266         mutex_unlock(&sgv_pools_mgr.scst_sgv_pool_mutex);
1267
1268         seq_printf(seq, "\n%-42s %-11d\n", "other",
1269                 atomic_read(&sgv_pools_mgr.sgv_other_total_alloc));
1270
1271         TRACE_EXIT();
1272         return 0;
1273 }