[#28561] Ruby::DL vs Ruby::FFI — Aston <blackapache512-ticket@...>

Ruby.DL and FFI libraries are great for programmers like me who are not internet programmers, but are more interested in scientific and number processing etc.

11 messages 2010/03/08

[#28686] trunk (26947) build fail with msys/mingw/vista — Jon <jon.forums@...>

I get the following build failure when msysgit's "c:\git\cmd" dir is on PATH.

8 messages 2010/03/16

[#28687] [Bug #2973] rb_bug - Segmentation fault - error.c:213 — rudolf gavlas <redmine@...>

Bug #2973: rb_bug - Segmentation fault - error.c:213

10 messages 2010/03/16

[#28735] [Bug #2982] Ruby tries to link with both openssl and readline — Lucas Nussbaum <redmine@...>

Bug #2982: Ruby tries to link with both openssl and readline

16 messages 2010/03/18

[#28736] [Bug #2983] Ruby (GPLv2 only) tries to link to with readline (now GPLv3) — Lucas Nussbaum <redmine@...>

Bug #2983: Ruby (GPLv2 only) tries to link to with readline (now GPLv3)

10 messages 2010/03/18

[#28907] [Bug #3000] Open SSL Segfaults — Christian Höltje <redmine@...>

Bug #3000: Open SSL Segfaults

19 messages 2010/03/23

[#28924] [Bug #3005] Ruby core dump - [BUG] rb_sys_fail() - errno == 0 — Sebastian YEPES <redmine@...>

Bug #3005: Ruby core dump - [BUG] rb_sys_fail() - errno == 0

10 messages 2010/03/24

[#28954] [Feature #3010] slow require gems in ruby 1.9.1 — Miao Jiang <redmine@...>

Feature #3010: slow require gems in ruby 1.9.1

15 messages 2010/03/24

[#29179] [Bug #3071] Convert rubygems and rdoc to use psych — Aaron Patterson <redmine@...>

Bug #3071: Convert rubygems and rdoc to use psych

10 messages 2010/03/31

[ruby-core:28859] st.c: pool allocator for tables and entries

From: Eric Wong <normalperson@...>
Date: 2010-03-22 08:05:24 UTC
List: ruby-core #28859
Hi all,

I started playing around with pool allocator for Ruby 1.9 for hashes.  I
cooked this up for an app that generates lots of Hashes.  After getting
some performance improvements from tweaking constants inside gc.c for
the app and using bsdmalloc[1], I still got a ~10% runtime reduction
with this small change (for one particular app).

This assumes st.c is only called inside GVL, and GVL == GIL which is
true these days in 1.9.1 and trunk.

The only other downside is that memory used for each type of struct
cannot be used anything else, nor released back to the OS (though some
malloc implementations don't do this anyways).

I've sized both of the pools conservatively (8K) for now.

Further testing and benchmarks results on different apps and comments
would be greatly appreciated.

This patch should apply cleanly to trunk, and works with 1.9.1, too.

[1] I used bsdmalloc from here: git://github.com/ice799/bsdmalloc
---
 st.c |   64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 54 insertions(+), 10 deletions(-)

diff --git a/st.c b/st.c
index ec518e9..cbc57d8 100644
--- a/st.c
+++ b/st.c
@@ -38,6 +38,50 @@ struct st_table_entry {
      *
      */
 
+#if __GNUC__ >= 3
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+#else /* __GNUC__ >= 3 */
+#define UNLIKELY(x) (x)
+#endif /* __GNUC__ >= 3 */
+
+#define DEFINE_ALLOC_POOL(name,type,size) \
+struct name##_alloc_ptr { \
+    union { \
+      type data; \
+      void *next; \
+    } as; \
+}; \
+static struct name##_alloc_ptr *name##_head; \
+static void more_##name(void) \
+{ \
+    long i; \
+    struct name##_alloc_ptr *cur, *tmp; \
+    tmp = ruby_xmalloc((size / sizeof(type)) * sizeof(type)); \
+    name##_head = tmp; \
+    for (i = size / sizeof(type); --i >= 0; ) { \
+	cur = tmp++; \
+	cur->as.next = tmp; \
+    } \
+    cur->as.next = NULL; \
+} \
+static type *alloc_##name(void) \
+{ \
+    struct name##_alloc_ptr *tmp; \
+    if (UNLIKELY(!name##_head)) more_##name(); \
+    tmp = name##_head; \
+    name##_head = name##_head->as.next; \
+    return (type *)tmp; \
+} \
+static void release_##name(void *ptr) \
+{ \
+    struct name##_alloc_ptr *tmp = ptr; \
+    tmp->as.next = name##_head ? name##_head : NULL; \
+    name##_head = tmp; \
+}
+
+DEFINE_ALLOC_POOL(entry, struct st_table_entry, 0x2000);
+DEFINE_ALLOC_POOL(table, struct st_table, 0x2000);
+
 static const struct st_hash_type type_numhash = {
     st_numcmp,
     st_numhash,
@@ -183,7 +227,7 @@ st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
 
     size = new_size(size);	/* round up to prime number */
 
-    tbl = alloc(st_table);
+    tbl = alloc_table();
     tbl->type = type;
     tbl->num_entries = 0;
     tbl->entries_packed = type == &type_numhash && size/2 <= MAX_PACKED_NUMHASH;
@@ -253,7 +297,7 @@ st_clear(st_table *table)
 	table->bins[i] = 0;
 	while (ptr != 0) {
 	    next = ptr->next;
-	    free(ptr);
+	    release_entry(ptr);
 	    ptr = next;
 	}
     }
@@ -267,7 +311,7 @@ st_free_table(st_table *table)
 {
     st_clear(table);
     free(table->bins);
-    free(table);
+    release_table(table);
 }
 
 size_t
@@ -394,7 +438,7 @@ do {\
         bin_pos = hash_val % table->num_bins;\
     }\
     \
-    entry = alloc(st_table_entry);\
+    entry = alloc_entry(); \
     \
     entry->hash = hash_val;\
     entry->key = key;\
@@ -562,7 +606,7 @@ st_copy(st_table *old_table)
     st_index_t num_bins = old_table->num_bins;
     st_index_t hash_val;
 
-    new_table = alloc(st_table);
+    new_table = alloc_table();
     if (new_table == 0) {
 	return 0;
     }
@@ -572,7 +616,7 @@ st_copy(st_table *old_table)
 	Calloc((unsigned)num_bins, sizeof(st_table_entry*));
 
     if (new_table->bins == 0) {
-	free(new_table);
+	release_table(new_table);
 	return 0;
     }
 
@@ -585,7 +629,7 @@ st_copy(st_table *old_table)
 	prev = 0;
 	tail = &new_table->head;
 	do {
-	    entry = alloc(st_table_entry);
+	    entry = alloc_entry();
 	    if (entry == 0) {
 		st_free_table(new_table);
 		return 0;
@@ -650,7 +694,7 @@ st_delete(register st_table *table, register st_data_t *key, st_data_t *value)
 	    REMOVE_ENTRY(table, ptr);
 	    if (value != 0) *value = ptr->record;
 	    *key = ptr->key;
-	    free(ptr);
+	    release_entry(ptr);
 	    return 1;
 	}
     }
@@ -722,7 +766,7 @@ st_cleanup_safe(st_table *table, st_data_t never)
 	    if (ptr->key == never) {
 		tmp = ptr;
 		*last = ptr = ptr->next;
-		free(tmp);
+		release_entry(tmp);
 	    }
 	    else {
 		ptr = *(last = &ptr->next);
@@ -798,7 +842,7 @@ st_foreach(st_table *table, int (*func)(ANYARGS), st_data_t arg)
 			tmp = ptr->fore;
 			*last = ptr->next;
 			REMOVE_ENTRY(table, ptr);
-			free(ptr);
+			release_entry(ptr);
 			if (ptr == tmp) return 0;
 			ptr = tmp;
 			break;
-- 
Eric Wong

In This Thread

Prev Next