list_for_each_entry(tgt_dev, shead,
sess_tgt_dev_list_entry) {
- spin_lock_nested(&tgt_dev->tgt_dev_lock,
- tgt_dev->lun);
+ /* Lockdep triggers here a false positive.. */
+ spin_lock(&tgt_dev->tgt_dev_lock);
}
}
struct scst_tgt_dev *tgt_dev;
list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
sess_tgt_dev_list_entry) {
- spin_lock_nested(&tgt_dev->tgt_dev_lock,
- tgt_dev->lun);
+ /* Lockdep triggers here a false positive.. */
+ spin_lock(&tgt_dev->tgt_dev_lock);
}
}
struct scst_tgt_dev *tgt_dev;
list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
sess_tgt_dev_list_entry) {
- spin_unlock_bh(&tgt_dev->tgt_dev_lock);
+ spin_unlock(&tgt_dev->tgt_dev_lock);
}
}
goto out_destroy_acg_cache;
}
+ /*
+ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
+ * may have fatal consequences, so let's have big pools for them.
+ */
+
scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
mempool_free_slab, scst_mgmt_stub_cachep);
if (scst_mgmt_stub_mempool == NULL) {
goto out_destroy_mgmt_mempool;
}
- scst_ua_mempool = mempool_create(64, mempool_alloc_slab,
+ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
mempool_free_slab, scst_ua_cachep);
if (scst_ua_mempool == NULL) {
res = -ENOMEM;
goto out_destroy_mgmt_stub_mempool;
}
- /*
- * Loosing sense may have fatal consequences, so let's have a big pool
- */
- scst_sense_mempool = mempool_create(128, mempool_alloc_slab,
+ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
mempool_free_slab, scst_sense_cachep);
if (scst_sense_mempool == NULL) {
res = -ENOMEM;