this patch has been backported from 2.6.18

slabinfo will be printed in kernel log if you write veid in
/proc/.ub_slabinfo

Signed-off-by: Andrey Vagin <avagin@parallels.com>
---
 include/linux/slab.h |    5 ++++
 kernel/ub/ub_proc.c  |   43 ++++++++++++++++++++++++++++++++++++++++
 mm/slab.c            |   53 ++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 101 insertions(+), 0 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0e87a85..90a8de7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,11 @@ void pgd_dtor(void *, kmem_cache_t *, unsigned long);
 
 void show_slab_info(void);
 
+
+struct user_beancounter;
+extern void slab_walk_ub(struct user_beancounter *ub,
+		void (*show)(const char *name, int count));
+
 #endif	/* __KERNEL__ */
 
 #endif	/* _LINUX_SLAB_H */
diff --git a/kernel/ub/ub_proc.c b/kernel/ub/ub_proc.c
index 891fbf4..ae387ba 100644
--- a/kernel/ub/ub_proc.c
+++ b/kernel/ub/ub_proc.c
@@ -237,6 +237,46 @@ static struct file_operations ub_file_operations = {
 	.open = &ub_proc_open
 };
 
+static void ub_count_slab_show_one(const char *name, int count)
+{
+       printk("%s: %u\n", name, count);
+}
+
+static int ub_count_slab_show(struct user_beancounter *ub)
+{
+	slab_walk_ub(ub, ub_count_slab_show_one);
+	return 0;
+}
+
+static ssize_t ub_count_slab_write(struct file *file, const char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	char *veid_str, *tmp;
+	unsigned long veid;
+	struct user_beancounter *ub;
+	if (count) {
+		veid_str = kmalloc(count, GFP_KERNEL);
+		if (!veid_str)
+			return -ENOMEM;
+		if (copy_from_user(veid_str, buf, count)) {
+			kfree(veid_str);
+			return -EFAULT;
+		}
+		veid = simple_strtoul(veid_str, &tmp, 10);
+		kfree(veid_str);
+		ub = get_beancounter_byuid(veid, 0);
+		if (!ub)
+			return -ENOENT;
+		ub_count_slab_show(ub);
+		put_beancounter(ub);
+	}
+	return count;
+}
+
+static struct file_operations ub_count_slab_entry = {
+	.write		= ub_count_slab_write,
+};
+
 #ifdef CONFIG_UBC_DEBUG_KMEM
 #include <linux/seq_file.h>
 #include <linux/kmem_cache.h>
@@ -377,4 +417,7 @@ void __init beancounter_proc_init(void)
 	else
 		panic("Can't create /proc/user_beancounters_debug entry!\n");
 #endif
+	entry = create_proc_entry(".ub_slabinfo", S_IWUSR, NULL);
+	if (entry)
+		entry->proc_fops = &ub_count_slab_entry;
 }
diff --git a/mm/slab.c b/mm/slab.c
index 25d3420..f15cba8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2949,6 +2949,59 @@ struct seq_operations slabinfo_op = {
 	.show	= s_show,
 };
 
+/* Show object, belonging to each beancounter */
+static int check_ubcs_on_slab(kmem_cache_t *c, struct slab *s,
+		struct user_beancounter *ub)
+{
+	int i, sum = 0;
+	struct user_beancounter **ubcs;
+
+	ubcs = slab_ubcs(c, s);
+	for (i = 0; i < c->num; i++)
+		if (ubcs[i] == ub)
+			sum++;
+
+	return sum;
+}
+
+static int check_ubcs_on_cache(kmem_cache_t *c,
+		struct user_beancounter *ub)
+{
+	int sum = 0;
+	struct kmem_list3 *l3;
+	unsigned long flags;
+	struct slab *slab;
+
+	l3 = list3_data(c);
+	if (l3 == NULL)
+		goto out;
+
+	spin_lock_irqsave(&c->spinlock, flags);
+	list_for_each_entry(slab, &l3->slabs_full, list)
+		sum += check_ubcs_on_slab(c, slab, ub);
+	list_for_each_entry(slab, &l3->slabs_partial, list)
+		sum += check_ubcs_on_slab(c, slab, ub);
+	spin_unlock_irqrestore(&c->spinlock, flags);
+out:
+	return sum;
+}
+
+void slab_walk_ub(struct user_beancounter *ub,
+		void (*show)(const char *name, int count))
+{
+	kmem_cache_t *c;
+	int cnt;
+
+	down(&cache_chain_sem);
+	list_for_each_entry(c, &cache_chain, next) {
+		if (c->flags & SLAB_UBC) {
+			cnt = check_ubcs_on_cache(c, ub);
+			show(c->name, cnt);
+		}
+	}
+	up(&cache_chain_sem);
+}
+
 #define MAX_SLABINFO_WRITE 128
 /**
  * slabinfo_write - Tuning for the slab allocator
-- 1.6.6 
