[#15707] Schedule for the 1.8.7 release — "Akinori MUSHA" <knu@...>

Hi, developers,

21 messages 2008/03/01

[#15740] Copy-on-write friendly garbage collector — Hongli Lai <hongli@...99.net>

Hi.

31 messages 2008/03/03
[#15742] Re: Copy-on-write friendly garbage collector — Yukihiro Matsumoto <matz@...> 2008/03/03

Hi,

[#15829] Re: Copy-on-write friendly garbage collector — Daniel DeLorme <dan-ml@...42.com> 2008/03/08

Yukihiro Matsumoto wrote:

[#15756] embedding Ruby 1.9.0 inside pthread — "Suraj Kurapati" <sunaku@...>

Hello,

18 messages 2008/03/03
[#15759] Re: embedding Ruby 1.9.0 inside pthread — Nobuyoshi Nakada <nobu@...> 2008/03/04

Hi,

[#15760] Re: embedding Ruby 1.9.0 inside pthread — Yukihiro Matsumoto <matz@...> 2008/03/04

Hi,

[#15762] Re: embedding Ruby 1.9.0 inside pthread — "Suraj N. Kurapati" <sunaku@...> 2008/03/04

Yukihiro Matsumoto wrote:

[#15783] Adding startup and shutdown to Test::Unit — Daniel Berger <Daniel.Berger@...>

Hi all,

15 messages 2008/03/04

[#15835] TimeoutError in core, timeouts for ConditionVariable#wait — MenTaLguY <mental@...>

I've been reworking JRuby's stdlib to improve performance and fix

10 messages 2008/03/09

[#15990] Recent changes in Range#step behavior — "Vladimir Sizikov" <vsizikov@...>

Hi,

35 messages 2008/03/23
[#15991] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/23

[#15993] Re: Recent changes in Range#step behavior — "Vladimir Sizikov" <vsizikov@...> 2008/03/23

Hi Dave,

[#15997] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/23

[#16024] Re: Recent changes in Range#step behavior — "Vladimir Sizikov" <vsizikov@...> 2008/03/26

Hi Dave,

[#16025] Re: Recent changes in Range#step behavior — Yukihiro Matsumoto <matz@...> 2008/03/26

Hi,

[#16026] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/26

[#16027] Re: Recent changes in Range#step behavior — Yukihiro Matsumoto <matz@...> 2008/03/26

Hi,

[#16029] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/26

[#16030] Re: Recent changes in Range#step behavior — Yukihiro Matsumoto <matz@...> 2008/03/26

Hi,

[#16031] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/26

[#16032] Re: Recent changes in Range#step behavior — "Vladimir Sizikov" <vsizikov@...> 2008/03/26

On Wed, Mar 26, 2008 at 7:01 PM, Dave Thomas <dave@pragprog.com> wrote:

[#16033] Re: Recent changes in Range#step behavior — Dave Thomas <dave@...> 2008/03/26

[#16041] Re: Recent changes in Range#step behavior — David Flanagan <david@...> 2008/03/26

Dave Thomas wrote:

Re: Copy-on-write friendly garbage collector

From: Daniel DeLorme <dan-ml@...42.com>
Date: 2008-03-08 07:50:47 UTC
List: ruby-core #15830
I believe I managed to close the performance gap to only 6% slower than 
the current implementation (but I must admit I have trouble getting 
consistently reproducible benchmarks). The attached patch should be 
added on top of the one in Matz' email.

--
Daniel

Attachments (1)

cow-dan.patch (3.76 KB, text/x-patch)
--- gc.c.1	2008-03-08 16:18:18.000000000 +0900
+++ gc.c.2	2008-03-08 16:40:20.000000000 +0900
@@ -151,6 +151,7 @@
     void *membase;
     RVALUE *slot;
     int limit;
+    RVALUE *slotlimit;
     int *marks;
     int marks_size;
 } *heaps;
@@ -457,31 +458,26 @@
 find_heap_slot_for_object(RVALUE *object)
 {
     struct heaps_slot *heap;
-    register long hi, lo, mid;
+    register int i;
 
     /* Look in the cache first. */
     if (last_heap != NULL && object >= last_heap->slot
-	&& object < last_heap->slot + last_heap->limit) {
+	&& object < last_heap->slotlimit) {
 	return last_heap;
     }
-    /* find heap_slot for object using bsearch*/
-    lo = 0;
-    hi = heaps_used;
-    while (lo < hi) {
-	mid = (lo + hi) / 2;
-	heap = &heaps[mid];
+    /* find heap_slot for object using linear search
+     * (faster than bsearch because there are only a few heaps)
+     */
+    for(i=0; i<heaps_used; i++) {
+	heap = &heaps[i];
 	if (heap->slot <= object) {
-	    if (object < heap->slot + heap->limit) {
+	    if (object < heap->slotlimit) {
 		/* Cache this result. According to empirical evidence, the chance is
 		 * high that the next lookup will be for the same heap slot.
 		 */
 		last_heap = heap;
 		return heap;
 	    }
-	    lo = mid + 1;
-	}
-	else {
-	    hi = mid;
 	}
     }
     return NULL;
@@ -598,6 +594,7 @@
 	    p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
 	heaps[heaps_used].slot = p;
 	heaps[heaps_used].limit = heap_slots;
+	heaps[heaps_used].slotlimit = p + heap_slots;
         heaps[heaps_used].marks_size = (int) (ceil(heap_slots / (sizeof(int) * 8.0)));
         heaps[heaps_used].marks = (int *) calloc(heaps[heaps_used].marks_size, sizeof(int));
 	break;
@@ -837,13 +834,15 @@
 gc_mark_all(void)
 {
     RVALUE *p, *pend;
+    struct heaps_slot *heap;
     int i;
 
     init_mark_stack();
     for (i = 0; i < heaps_used; i++) {
-	p = heaps[i].slot; pend = p + heaps[i].limit;
+	heap = &heaps[i];
+	p = heap->slot; pend = heap->slotlimit;
 	while (p < pend) {
-	    if (rb_mark_table_contains(p) && (p->as.basic.flags != 0)) {
+	    if (rb_mark_table_heap_contains(heap, p) && (p->as.basic.flags != 0)) {
 		gc_mark_children((VALUE)p, 0);
 	    }
 	    p++;
@@ -1338,10 +1337,11 @@
 	int n = 0;
 	RVALUE *free = freelist;
 	RVALUE *final = final_list;
+	struct heaps_slot *heap = &heaps[i];
 
-	p = heaps[i].slot; pend = p + heaps[i].limit;
+	p = heap->slot; pend = heap->slotlimit;
 	while (p < pend) {
-	    if (!rb_mark_table_contains(p)) {
+	    if (!rb_mark_table_heap_contains(heap, p)) {
 		if (p->as.basic.flags) {
 		    obj_free((VALUE)p);
 		}
@@ -1368,7 +1368,7 @@
 		/* do nothing here */
 	    }
 	    else {
-		rb_mark_table_heap_remove(&heaps[i], p);
+		rb_mark_table_heap_remove(heap, p);
 		live++;
 	    }
 	    p++;
@@ -2161,6 +2161,7 @@
 rb_gc_call_finalizer_at_exit(void)
 {
     RVALUE *p, *pend;
+    struct heaps_slot *heap;
     int i;
 
     /* finalizers are part of garbage collection */
@@ -2184,13 +2185,14 @@
     }
     /* run data object's finalizers */
     for (i = 0; i < heaps_used; i++) {
-	p = heaps[i].slot; pend = p + heaps[i].limit;
+	heap = &heaps[i];
+	p = heap->slot; pend = heap->slotlimit;
 	while (p < pend) {
 	    if (BUILTIN_TYPE(p) == T_DATA &&
 		DATA_PTR(p) && RANY(p)->as.data.dfree &&
 		RANY(p)->as.basic.klass != rb_cThread) {
 		p->as.free.flags = 0;
-		rb_mark_table_remove(p);
+		rb_mark_table_heap_remove(heap, p);
 		if ((long)RANY(p)->as.data.dfree == -1) {
 		    RUBY_CRITICAL(free(DATA_PTR(p)));
 		}
@@ -2202,7 +2204,7 @@
 	    else if (BUILTIN_TYPE(p) == T_FILE) {
 		if (rb_io_fptr_finalize(RANY(p)->as.file.fptr)) {
 		    p->as.free.flags = 0;
-		    rb_mark_table_remove(p);
+		    rb_mark_table_heap_remove(heap, p);
                     VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
 		}
 	    }

In This Thread