+ kmem_cache_data_t *kcd;
+
+ spin_lock(&kct->kct_lock);
+ while (!list_empty(&kct->kct_list)) {
+ kcd = list_entry(kct->kct_list.next,
+ kmem_cache_data_t, kcd_node);
+ list_del(&kcd->kcd_node);
+ spin_unlock(&kct->kct_lock);
+
+ kmem_cache_free(kcp->kcp_cache, kcd);
+
+ spin_lock(&kct->kct_lock);
+ }
+ spin_unlock(&kct->kct_lock);
+}
+
+static int
+splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
+ kmem_cache_thread_t *kct, int count)
+{
+ kmem_cache_data_t *kcd;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
+ if (kcd == NULL) {
+ splat_kmem_cache_test_kcd_free(kcp, kct);
+ return -ENOMEM;
+ }
+
+ spin_lock(&kct->kct_lock);
+ list_add_tail(&kcd->kcd_node, &kct->kct_list);
+ spin_unlock(&kct->kct_lock);
+ }
+
+ return 0;
+}
+
+static void
+splat_kmem_cache_test_debug(struct file *file, char *name,
+ kmem_cache_priv_t *kcp)
+{
+ int j;
+
+ splat_vprint(file, name, "%s cache objects %d",
+ kcp->kcp_cache->skc_name, kcp->kcp_count);
+
+ if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+ splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
+ (unsigned)kcp->kcp_cache->skc_slab_alloc,
+ (unsigned)kcp->kcp_cache->skc_slab_total,
+ (unsigned)kcp->kcp_cache->skc_obj_alloc,
+ (unsigned)kcp->kcp_cache->skc_obj_total);
+
+ if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
+ splat_vprint(file, name, "%s", "mags");
+
+ for_each_online_cpu(j)
+ splat_print(file, "%u/%u ",
+ kcp->kcp_cache->skc_mag[j]->skm_avail,
+ kcp->kcp_cache->skc_mag[j]->skm_size);
+ }
+ }
+
+ splat_print(file, "%s\n", "");