2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
30 #include "JSGlobalObject.h"
32 #include "JSONObject.h"
36 #include "MarkStack.h"
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
79 #if HAVE(PTHREAD_NP_H)
80 #include <pthread_np.h>
85 #include <sys/procfs.h>
92 #define COLLECT_ON_EVERY_ALLOCATION 0
100 const size_t GROWTH_FACTOR = 2;
101 const size_t LOW_WATER_FACTOR = 4;
102 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
103 // This value has to be a macro to be used in max() without introducing
104 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
105 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
107 #if ENABLE(JSC_MULTIPLE_THREADS)
110 typedef mach_port_t PlatformThread;
112 typedef HANDLE PlatformThread;
117 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
118 : posixThread(pthread)
119 , platformThread(platThread)
125 pthread_t posixThread;
126 PlatformThread platformThread;
132 Heap::Heap(JSGlobalData* globalData)
134 #if ENABLE(JSC_MULTIPLE_THREADS)
135 , m_registeredThreads(0)
136 , m_currentThreadRegistrar(0)
138 , m_globalData(globalData)
140 , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)
144 memset(&m_heap, 0, sizeof(CollectorHeap));
150 // The destroy function must already have been called, so assert this.
151 ASSERT(!m_globalData);
156 JSLock lock(SilenceAssertionsOnly);
161 ASSERT(!m_globalData->dynamicGlobalObject);
164 // The global object is not GC protected at this point, so sweeping may delete it
165 // (and thus the global data) before other objects that may use the global data.
166 RefPtr<JSGlobalData> protect(m_globalData);
168 delete m_markListSet;
173 #if ENABLE(JSC_MULTIPLE_THREADS)
174 if (m_currentThreadRegistrar) {
175 int error = pthread_key_delete(m_currentThreadRegistrar);
176 ASSERT_UNUSED(error, !error);
179 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
180 for (Heap::Thread* t = m_registeredThreads; t;) {
181 Heap::Thread* next = t->next;
187 m_blockallocator.destroy();
192 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
195 vm_address_t address = 0;
196 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
198 void* address = m_blockallocator.alloc();
202 void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
204 #if COMPILER(MINGW) && !COMPILER(MINGW64)
205 void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
207 void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
209 memset(address, 0, BLOCK_SIZE);
210 #elif HAVE(POSIX_MEMALIGN)
212 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
215 #if ENABLE(JSC_MULTIPLE_THREADS)
216 #error Need to initialize pagesize safely.
218 static size_t pagesize = getpagesize();
221 if (BLOCK_SIZE > pagesize)
222 extra = BLOCK_SIZE - pagesize;
224 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
225 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
228 if ((address & BLOCK_OFFSET_MASK) != 0)
229 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
232 munmap(reinterpret_cast<char*>(address), adjust);
235 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
242 CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
244 clearMarkBits(block);
246 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
247 for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i)
248 new (block->cells + i) JSCell(dummyMarkableCellStructure);
250 // Add block to blocks vector.
252 size_t numBlocks = m_heap.numBlocks;
253 if (m_heap.usedBlocks == numBlocks) {
254 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
255 if (numBlocks > maxNumBlocks)
257 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
258 m_heap.numBlocks = numBlocks;
259 m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
261 m_heap.blocks[m_heap.usedBlocks++] = block;
266 NEVER_INLINE void Heap::freeBlock(size_t block)
268 m_heap.didShrink = true;
270 ObjectIterator it(m_heap, block);
271 ObjectIterator end(m_heap, block + 1);
272 for ( ; it != end; ++it)
274 freeBlockPtr(m_heap.blocks[block]);
276 // swap with the last block so we compact as we go
277 m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
280 if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
281 m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
282 m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
286 NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
289 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
291 m_blockallocator.free(reinterpret_cast<void*>(block));
293 VirtualFree(block, 0, MEM_RELEASE);
295 #if COMPILER(MINGW) && !COMPILER(MINGW64)
296 __mingw_aligned_free(block);
298 _aligned_free(block);
300 #elif HAVE(POSIX_MEMALIGN)
303 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
307 void Heap::freeBlocks()
309 ProtectCountSet protectedValuesCopy = m_protectedValues;
312 ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
313 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
317 m_heap.nextBlock = 0;
318 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
319 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
320 for ( ; it != end; ++it)
323 ASSERT(!protectedObjectCount());
325 protectedValuesEnd = protectedValuesCopy.end();
326 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
327 it->first->~JSCell();
329 for (size_t block = 0; block < m_heap.usedBlocks; ++block)
330 freeBlockPtr(m_heap.blocks[block]);
332 fastFree(m_heap.blocks);
334 memset(&m_heap, 0, sizeof(CollectorHeap));
337 void Heap::recordExtraCost(size_t cost)
339 // Our frequency of garbage collection tries to balance memory use against speed
340 // by collecting based on the number of newly created values. However, for values
341 // that hold on to a great deal of memory that's not in the form of other JS values,
342 // that is not good enough - in some cases a lot of those objects can pile up and
343 // use crazy amounts of memory without a GC happening. So we track these extra
344 // memory costs. Only unusually large objects are noted, and we only keep track
345 // of this extra cost until the next GC. In garbage collected languages, most values
346 // are either very short lived temporaries, or have extremely long lifetimes. So
347 // if a large value survives one garbage collection, there is not much point to
348 // collecting more frequently as long as it stays alive.
350 if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) {
351 // If the last iteration through the heap deallocated blocks, we need
352 // to clean up remaining garbage before marking. Otherwise, the conservative
353 // marking mechanism might follow a pointer to unmapped memory.
354 if (m_heap.didShrink)
358 m_heap.extraCost += cost;
361 void* Heap::allocate(size_t s)
363 typedef HeapConstants::Block Block;
364 typedef HeapConstants::Cell Cell;
366 ASSERT(JSLock::lockCount() > 0);
367 ASSERT(JSLock::currentThreadIsHoldingLock());
368 ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
370 ASSERT(m_heap.operationInProgress == NoOperation);
372 #if COLLECT_ON_EVERY_ALLOCATION
374 ASSERT(m_heap.operationInProgress == NoOperation);
379 // Fast case: find the next garbage cell and recycle it.
382 ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
383 Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
385 ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
386 if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
387 Cell* cell = block->cells + m_heap.nextCell;
389 m_heap.operationInProgress = Allocation;
390 JSCell* imp = reinterpret_cast<JSCell*>(cell);
392 m_heap.operationInProgress = NoOperation;
397 } while (++m_heap.nextCell != HeapConstants::cellsPerBlock);
399 } while (++m_heap.nextBlock != m_heap.usedBlocks);
401 // Slow case: reached the end of the heap. Mark live objects and start over.
407 void Heap::resizeBlocks()
409 m_heap.didShrink = false;
411 size_t usedCellCount = markedCells();
412 size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
413 size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
415 size_t maxCellCount = 1.25f * minCellCount;
416 size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
418 if (m_heap.usedBlocks < minBlockCount)
419 growBlocks(minBlockCount);
420 else if (m_heap.usedBlocks > maxBlockCount)
421 shrinkBlocks(maxBlockCount);
424 void Heap::growBlocks(size_t neededBlocks)
426 ASSERT(m_heap.usedBlocks < neededBlocks);
427 while (m_heap.usedBlocks < neededBlocks)
431 void Heap::shrinkBlocks(size_t neededBlocks)
433 ASSERT(m_heap.usedBlocks > neededBlocks);
435 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
436 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
437 m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
439 for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
440 if (m_heap.blocks[i]->marked.isEmpty()) {
446 // Reset the always-on last bit.
447 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
448 m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
452 void* g_stackBase = 0;
454 inline bool isPageWritable(void* page)
456 MEMORY_BASIC_INFORMATION memoryInformation;
457 DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
459 // return false on error, including ptr outside memory
460 if (result != sizeof(memoryInformation))
463 DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
464 return protect == PAGE_READWRITE
465 || protect == PAGE_WRITECOPY
466 || protect == PAGE_EXECUTE_READWRITE
467 || protect == PAGE_EXECUTE_WRITECOPY;
470 static void* getStackBase(void* previousFrame)
472 // find the address of this stack frame by taking the address of a local variable
473 bool isGrowingDownward;
474 void* thisFrame = (void*)(&isGrowingDownward);
476 isGrowingDownward = previousFrame < &thisFrame;
477 static DWORD pageSize = 0;
479 SYSTEM_INFO systemInfo;
480 GetSystemInfo(&systemInfo);
481 pageSize = systemInfo.dwPageSize;
484 // scan all of memory starting from this frame, and return the last writeable page found
485 register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
486 if (isGrowingDownward) {
487 while (currentPage > 0) {
488 // check for underflow
489 if (currentPage >= (char*)pageSize)
490 currentPage -= pageSize;
493 if (!isPageWritable(currentPage))
494 return currentPage + pageSize;
499 // guaranteed to complete because isPageWritable returns false at end of memory
500 currentPage += pageSize;
501 if (!isPageWritable(currentPage))
509 struct hpux_get_stack_base_data
512 _pthread_stack_info info;
515 static void *hpux_get_stack_base_internal(void *d)
517 hpux_get_stack_base_data *data = static_cast<hpux_get_stack_base_data *>(d);
519 // _pthread_stack_info_np requires the target thread to be suspended
520 // in order to get information about it
521 pthread_suspend(data->thread);
523 // _pthread_stack_info_np returns an errno code in case of failure
524 // or zero on success
525 if (_pthread_stack_info_np(data->thread, &data->info)) {
530 pthread_continue(data->thread);
534 static void *hpux_get_stack_base()
536 hpux_get_stack_base_data data;
537 data.thread = pthread_self();
539 // We cannot get the stack information for the current thread
540 // So we start a new thread to get that information and return it to us
542 pthread_create(&other, 0, hpux_get_stack_base_internal, &data);
545 pthread_join(other, &result);
547 return data.info.stk_stack_base;
553 static inline void *currentThreadStackBaseQNX()
555 static void* stackBase = 0;
556 static size_t stackSize = 0;
557 static pthread_t stackThread;
558 pthread_t thread = pthread_self();
559 if (stackBase == 0 || thread != stackThread) {
560 struct _debug_thread_info threadInfo;
561 memset(&threadInfo, 0, sizeof(threadInfo));
562 threadInfo.tid = pthread_self();
563 int fd = open("/proc/self", O_RDONLY);
565 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
568 devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
570 stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
571 stackSize = threadInfo.stksize;
573 stackThread = thread;
575 return static_cast<char*>(stackBase) + stackSize;
579 static inline void* currentThreadStackBase()
582 pthread_t thread = pthread_self();
583 return pthread_get_stackaddr_np(thread);
584 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
585 // offset 0x18 from the FS segment register gives a pointer to
586 // the thread information block for the current thread
592 return static_cast<void*>(pTib->StackBase);
593 #elif OS(WINDOWS) && CPU(X86_64) && (COMPILER(MSVC) || COMPILER(GCC))
594 // FIXME: why only for MSVC?
595 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
596 return reinterpret_cast<void*>(pTib->StackBase);
597 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
598 // offset 0x18 from the FS segment register gives a pointer to
599 // the thread information block for the current thread
601 asm ( "movl %%fs:0x18, %0\n"
604 return static_cast<void*>(pTib->StackBase);
606 return hpux_get_stack_base();
608 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
609 MutexLocker locker(mutex);
610 return currentThreadStackBaseQNX();
616 pthread_t thread = pthread_self();
617 struct __pthrdsinfo threadinfo;
619 int regbufsize = sizeof regbuf;
621 if (pthread_getthrds_np(&thread, PTHRDSINFO_QUERY_ALL,
622 &threadinfo, sizeof threadinfo,
623 ®buf, ®bufsize) == 0)
624 return threadinfo.__pi_stackaddr;
628 pthread_t thread = pthread_self();
630 pthread_stackseg_np(thread, &stack);
633 TThreadStackInfo info;
635 thread.StackInfo(info);
636 return (void*)info.iBase;
638 thread_info threadInfo;
639 get_thread_info(find_thread(NULL), &threadInfo);
640 return threadInfo.stack_end;
642 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
643 MutexLocker locker(mutex);
644 static void* stackBase = 0;
645 static size_t stackSize = 0;
646 static pthread_t stackThread;
647 pthread_t thread = pthread_self();
648 if (stackBase == 0 || thread != stackThread) {
649 pthread_attr_t sattr;
650 pthread_attr_init(&sattr);
651 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
652 // e.g. on FreeBSD 5.4, neundorf@kde.org
653 pthread_attr_get_np(thread, &sattr);
655 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
656 pthread_getattr_np(thread, &sattr);
658 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
659 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
661 pthread_attr_destroy(&sattr);
662 stackThread = thread;
664 return static_cast<char*>(stackBase) + stackSize;
666 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
667 MutexLocker locker(mutex);
672 return getStackBase(&dummy);
675 #error Need a way to get the stack base on this platform
679 #if ENABLE(JSC_MULTIPLE_THREADS)
681 static inline PlatformThread getCurrentPlatformThread()
684 return pthread_mach_thread_np(pthread_self());
686 return pthread_getw32threadhandle_np(pthread_self());
690 void Heap::makeUsableFromMultipleThreads()
692 if (m_currentThreadRegistrar)
695 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
700 void Heap::registerThread()
702 ASSERT(!m_globalData->mainThreadOnly || isMainThread());
704 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
707 pthread_setspecific(m_currentThreadRegistrar, this);
708 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
710 MutexLocker lock(m_registeredThreadsMutex);
712 thread->next = m_registeredThreads;
713 m_registeredThreads = thread;
716 void Heap::unregisterThread(void* p)
719 static_cast<Heap*>(p)->unregisterThread();
722 void Heap::unregisterThread()
724 pthread_t currentPosixThread = pthread_self();
726 MutexLocker lock(m_registeredThreadsMutex);
728 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
729 Thread* t = m_registeredThreads;
730 m_registeredThreads = m_registeredThreads->next;
733 Heap::Thread* last = m_registeredThreads;
735 for (t = m_registeredThreads->next; t; t = t->next) {
736 if (pthread_equal(t->posixThread, currentPosixThread)) {
737 last->next = t->next;
742 ASSERT(t); // If t is NULL, we never found ourselves in the list.
747 #else // ENABLE(JSC_MULTIPLE_THREADS)
749 void Heap::registerThread()
755 inline bool isPointerAligned(void* p)
757 return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
760 // Cell size needs to be a power of two for isPossibleCell to be valid.
761 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
764 static bool isHalfCellAligned(void *p)
766 return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0);
769 static inline bool isPossibleCell(void* p)
771 return isHalfCellAligned(p) && p;
776 static inline bool isCellAligned(void *p)
778 return (((intptr_t)(p) & CELL_MASK) == 0);
781 static inline bool isPossibleCell(void* p)
783 return isCellAligned(p) && p;
785 #endif // USE(JSVALUE32)
787 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
795 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
796 ASSERT(isPointerAligned(start));
797 ASSERT(isPointerAligned(end));
799 char** p = static_cast<char**>(start);
800 char** e = static_cast<char**>(end);
802 CollectorBlock** blocks = m_heap.blocks;
805 if (isPossibleCell(x)) {
807 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
808 xAsBits &= CELL_ALIGN_MASK;
810 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
811 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
812 if (offset > lastCellOffset)
815 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
816 usedBlocks = m_heap.usedBlocks;
817 for (size_t block = 0; block < usedBlocks; block++) {
818 if (blocks[block] != blockAddr)
820 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
827 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
830 void* stackPointer = &dummy;
831 void* stackBase = currentThreadStackBase();
832 markConservatively(markStack, stackPointer, stackBase);
836 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
838 #define REGISTER_BUFFER_ALIGNMENT
841 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
843 // setjmp forces volatile registers onto the stack
844 jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
846 #pragma warning(push)
847 #pragma warning(disable: 4611)
854 markCurrentThreadConservativelyInternal(markStack);
857 #if ENABLE(JSC_MULTIPLE_THREADS)
859 static inline void suspendThread(const PlatformThread& platformThread)
862 thread_suspend(platformThread);
864 SuspendThread(platformThread);
866 #error Need a way to suspend threads on this platform
870 static inline void resumeThread(const PlatformThread& platformThread)
873 thread_resume(platformThread);
875 ResumeThread(platformThread);
877 #error Need a way to resume threads on this platform
881 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
886 typedef i386_thread_state_t PlatformThreadRegisters;
888 typedef x86_thread_state64_t PlatformThreadRegisters;
890 typedef ppc_thread_state_t PlatformThreadRegisters;
892 typedef ppc_thread_state64_t PlatformThreadRegisters;
894 typedef arm_thread_state_t PlatformThreadRegisters;
896 #error Unknown Architecture
899 #elif OS(WINDOWS) && CPU(X86)
900 typedef CONTEXT PlatformThreadRegisters;
902 #error Need a thread register struct for this platform
905 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
910 unsigned user_count = sizeof(regs)/sizeof(int);
911 thread_state_flavor_t flavor = i386_THREAD_STATE;
913 unsigned user_count = x86_THREAD_STATE64_COUNT;
914 thread_state_flavor_t flavor = x86_THREAD_STATE64;
916 unsigned user_count = PPC_THREAD_STATE_COUNT;
917 thread_state_flavor_t flavor = PPC_THREAD_STATE;
919 unsigned user_count = PPC_THREAD_STATE64_COUNT;
920 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
922 unsigned user_count = ARM_THREAD_STATE_COUNT;
923 thread_state_flavor_t flavor = ARM_THREAD_STATE;
925 #error Unknown Architecture
928 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
929 if (result != KERN_SUCCESS) {
930 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
931 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
934 return user_count * sizeof(usword_t);
937 #elif OS(WINDOWS) && CPU(X86)
938 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
939 GetThreadContext(platformThread, ®s);
940 return sizeof(CONTEXT);
942 #error Need a way to get thread registers on this platform
946 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
953 return reinterpret_cast<void*>(regs.__esp);
955 return reinterpret_cast<void*>(regs.__rsp);
956 #elif CPU(PPC) || CPU(PPC64)
957 return reinterpret_cast<void*>(regs.__r1);
959 return reinterpret_cast<void*>(regs.__sp);
961 #error Unknown Architecture
964 #else // !__DARWIN_UNIX03
967 return reinterpret_cast<void*>(regs.esp);
969 return reinterpret_cast<void*>(regs.rsp);
970 #elif CPU(PPC) || CPU(PPC64)
971 return reinterpret_cast<void*>(regs.r1);
973 #error Unknown Architecture
976 #endif // __DARWIN_UNIX03
979 #elif CPU(X86) && OS(WINDOWS)
980 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
982 #error Need a way to get the stack pointer for another thread on this platform
986 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
988 suspendThread(thread->platformThread);
990 PlatformThreadRegisters regs;
991 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
993 // mark the thread's registers
994 markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize));
996 void* stackPointer = otherThreadStackPointer(regs);
997 markConservatively(markStack, stackPointer, thread->stackBase);
999 resumeThread(thread->platformThread);
1004 void Heap::markStackObjectsConservatively(MarkStack& markStack)
1006 markCurrentThreadConservatively(markStack);
1008 #if ENABLE(JSC_MULTIPLE_THREADS)
1010 if (m_currentThreadRegistrar) {
1012 MutexLocker lock(m_registeredThreadsMutex);
1015 // Forbid malloc during the mark phase. Marking a thread suspends it, so
1016 // a malloc inside markChildren() would risk a deadlock with a thread that had been
1017 // suspended while holding the malloc lock.
1020 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
1021 // and since this is a shared heap, they are real locks.
1022 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
1023 if (!pthread_equal(thread->posixThread, pthread_self()))
1024 markOtherThreadConservatively(markStack, thread);
1033 void Heap::protect(JSValue k)
1036 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1041 m_protectedValues.add(k.asCell());
1044 void Heap::unprotect(JSValue k)
1047 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1052 m_protectedValues.remove(k.asCell());
1055 void Heap::markProtectedObjects(MarkStack& markStack)
1057 ProtectCountSet::iterator end = m_protectedValues.end();
1058 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
1059 markStack.append(it->first);
1064 void Heap::clearMarkBits()
1066 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
1067 clearMarkBits(m_heap.blocks[i]);
1070 void Heap::clearMarkBits(CollectorBlock* block)
1072 // allocate assumes that the last cell in every block is marked.
1073 block->marked.clearAll();
1074 block->marked.set(HeapConstants::cellsPerBlock - 1);
1077 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
1079 ASSERT(startBlock <= m_heap.usedBlocks);
1080 ASSERT(startCell < HeapConstants::cellsPerBlock);
1082 if (startBlock >= m_heap.usedBlocks)
1086 result += m_heap.blocks[startBlock]->marked.count(startCell);
1087 for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
1088 result += m_heap.blocks[i]->marked.count();
1095 ASSERT(m_heap.operationInProgress == NoOperation);
1096 if (m_heap.operationInProgress != NoOperation)
1098 m_heap.operationInProgress = Collection;
1100 #if !ENABLE(JSC_ZOMBIES)
1101 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
1104 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
1105 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
1106 for ( ; it != end; ++it) {
1108 #if ENABLE(JSC_ZOMBIES)
1109 if (!cell->isZombie()) {
1110 const ClassInfo* info = cell->classInfo();
1112 new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
1113 Heap::markCell(cell);
1117 // Callers of sweep assume it's safe to mark any cell in the heap.
1118 new (cell) JSCell(dummyMarkableCellStructure);
1122 m_heap.operationInProgress = NoOperation;
1125 void Heap::markRoots()
1128 if (m_globalData->isSharedInstance) {
1129 ASSERT(JSLock::lockCount() > 0);
1130 ASSERT(JSLock::currentThreadIsHoldingLock());
1134 ASSERT(m_heap.operationInProgress == NoOperation);
1135 if (m_heap.operationInProgress != NoOperation)
1138 m_heap.operationInProgress = Collection;
1140 MarkStack& markStack = m_globalData->markStack;
1145 // Mark stack roots.
1146 markStackObjectsConservatively(markStack);
1147 m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1149 // Mark explicitly registered roots.
1150 markProtectedObjects(markStack);
1152 #if QT_BUILD_SCRIPT_LIB
1153 if (m_globalData->clientData)
1154 m_globalData->clientData->mark(markStack);
1157 // Mark misc. other roots.
1158 if (m_markListSet && m_markListSet->size())
1159 MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1160 if (m_globalData->exception)
1161 markStack.append(m_globalData->exception);
1162 m_globalData->smallStrings.markChildren(markStack);
1163 if (m_globalData->functionCodeBlockBeingReparsed)
1164 m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
1165 if (m_globalData->firstStringifierToMark)
1166 JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1169 markStack.compact();
1171 m_heap.operationInProgress = NoOperation;
1174 size_t Heap::objectCount() const
1176 return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
1177 + m_heap.nextCell // allocated cells in current block
1178 + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
1179 - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
1182 void Heap::addToStatistics(Heap::Statistics& statistics) const
1184 statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
1185 statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
1188 Heap::Statistics Heap::statistics() const
1190 Statistics statistics = { 0, 0 };
1191 addToStatistics(statistics);
1195 size_t Heap::globalObjectCount()
1198 if (JSGlobalObject* head = m_globalData->head) {
1199 JSGlobalObject* o = head;
1203 } while (o != head);
1208 size_t Heap::protectedGlobalObjectCount()
1211 if (JSGlobalObject* head = m_globalData->head) {
1212 JSGlobalObject* o = head;
1214 if (m_protectedValues.contains(o))
1217 } while (o != head);
1223 size_t Heap::protectedObjectCount()
1225 return m_protectedValues.size();
1228 static const char* typeName(JSCell* cell)
1230 if (cell->isString())
1233 if (cell->isNumber())
1236 if (cell->isGetterSetter())
1237 return "gettersetter";
1238 if (cell->isAPIValueWrapper())
1239 return "value wrapper";
1240 if (cell->isPropertyNameIterator())
1241 return "for-in iterator";
1242 ASSERT(cell->isObject());
1243 const ClassInfo* info = cell->classInfo();
1244 return info ? info->className : "Object";
1247 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1249 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1251 ProtectCountSet::iterator end = m_protectedValues.end();
1252 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1253 counts->add(typeName(it->first));
1260 return m_heap.operationInProgress != NoOperation;
1265 JAVASCRIPTCORE_GC_BEGIN();
1269 JAVASCRIPTCORE_GC_MARKED();
1271 m_heap.nextCell = 0;
1272 m_heap.nextBlock = 0;
1273 m_heap.nextNumber = 0;
1274 m_heap.extraCost = 0;
1275 #if ENABLE(JSC_ZOMBIES)
1280 JAVASCRIPTCORE_GC_END();
1283 void Heap::collectAllGarbage()
1285 JAVASCRIPTCORE_GC_BEGIN();
1287 // If the last iteration through the heap deallocated blocks, we need
1288 // to clean up remaining garbage before marking. Otherwise, the conservative
1289 // marking mechanism might follow a pointer to unmapped memory.
1290 if (m_heap.didShrink)
1295 JAVASCRIPTCORE_GC_MARKED();
1297 m_heap.nextCell = 0;
1298 m_heap.nextBlock = 0;
1299 m_heap.nextNumber = 0;
1300 m_heap.extraCost = 0;
1304 JAVASCRIPTCORE_GC_END();
1307 LiveObjectIterator Heap::primaryHeapBegin()
1309 return LiveObjectIterator(m_heap, 0);
1312 LiveObjectIterator Heap::primaryHeapEnd()
1314 return LiveObjectIterator(m_heap, m_heap.usedBlocks);