.Net Core CLR GC的浅度分析

.Net Core CLR 的GC分为两个部分,一个是GC的内存分配,另外一个是GC的垃圾回收。这里我们先讲一下垃圾回收。

一.垃圾回收的触发条件
1.在C#代码中调用 GC.Collection();
2.物理内存不足的情况下
3.分配量超过分配阈值
4.找不到可分配的内存空间
在满足了以上四个条件中的任何一个,CLR都会触发GC的垃圾回收,清理托管堆空间释放内存,以便下次使用。


二.垃圾回收的流程
1停止其它线程到并且切换到抢占模式(抢占模式不可访问托管堆代码,只可访问非托管)
2.重新定位回收的目标代,并且判断是否执行后台GC回收
3-1.如果执行后台GC回收分为两步(第一:后台标记(标记没被使用的托管堆空间),第二:后台清扫(清扫没被使用的托管堆空间))
3-2.如果不执行后台GC,则执行普通GC,普通GC一共分为五个阶段(1,标记。2计划。3.重定位。4.压缩。5清扫。)
4.恢复其它线程到合作模式

以上为GC垃圾回收的全部过程。

每次GC结束之后,都会重新计算分配量的阈值,以便下次分配对象使用,代码如下:

size_t gc_heap::desired_new_allocation (dynamic_data* dd,// 动态数据
                                        size_t out,// GC 结束之后该代存活对象的大小
                                         int gen_number, // 被GC的代
                                        int pass)
{
    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();

    if (dd_begin_data_size (dd) == 0)//假如说GC 开始前该代存活对象大小为零
    {
        size_t new_allocation = dd_min_size (dd);// 获取阈值的下限赋值给新的分配量new_allocation
        current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation;  //把新分配量存入动态数据      
        return new_allocation;//返回
    }
    else
    {
        float     cst;// 对象存活率
        size_t    previous_desired_allocation = dd_desired_allocation (dd);// 就分配量阈值
        size_t    current_size = dd_current_size (dd);// GC 结束之后存活对象的大小
        float     max_limit = dd_max_limit (dd);// 获取阈值系数的上限
        float     limit = dd_limit (dd);// 阈值系数的下限
        size_t    min_gc_size = dd_min_size (dd);//阈值的下限
        float     f = 0;// 阈值的增长系数
        size_t    max_size = dd_max_size (dd);// 阈值的上限
        size_t    new_allocation = 0;// 声明新分配量并且赋值为零
        float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd));// 计算偏移率(allocation_fraction ),GC开始前的实际分配量/ 就分配了阈值 
        if (gen_number >= max_generation)// 如果GC目标大于或者等于第二代对象
        {
            size_t    new_size = 0; // 声明新分配量

            cst = min (1.0f, float (out) / float (dd_begin_data_size (dd)));//GC的存活率(cst) Gc结束后该代存活对象的大小/ GC开始前该代存活对象的大小 

            f = surv_to_growth (cst, limit, max_limit);//通过GC的存活率,系数的上下限计算阈值增长系数
            size_t max_growth_size = (size_t)(max_size / f);
            if (current_size >= max_growth_size)// 当GC结束后存活对象的大小大于 最大阈值
            {
                new_size = max_size;// 把最大阈值赋值给新分配量阈值
            }
            else// 否则
            {
                new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size);// 阈值的增长系数F 乘以 GC结束后该代存活对象的大小和阈值下限两者取小,然后与阈值上限两者取大。得到结果为新的分配量。
            }

            assert ((new_size >= current_size) || (new_size == max_size));// 新分配量必须大于等于GC结束后对象的大小或者 等于阈值的上限。

            if (gen_number == max_generation)// GC目标代等于第二代对象
            {
                new_allocation  =  max((new_size - current_size), min_gc_size);

                new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
                                                          dd_desired_allocation (dd), dd_collection_count (dd));

                if ((dd_fragmentation (dd) > ((size_t)((f-1)*current_size))))
                {
                    //reducing allocation in case of fragmentation
                    size_t new_allocation1 = max (min_gc_size,
                                                  // CAN OVERFLOW
                                                  (size_t)((float)new_allocation * current_size /
                                                           ((float)current_size + 2*dd_fragmentation (dd))));
                    dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id",
                                 new_allocation, new_allocation1));
                    new_allocation = new_allocation1;
                }
            }
            else //large object heap
            {
                uint32_t memory_load = 0;
                uint64_t available_physical = 0;
                get_memory_info (&memory_load, &available_physical);
                if (heap_number == 0)
                    settings.exit_memory_load = memory_load;
                if (available_physical > 1024*1024)
                    available_physical -= 1024*1024;

                uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number));
                if (available_free > (uint64_t)MAX_PTR)
                {
                    available_free = (uint64_t)MAX_PTR;
                }

                //try to avoid OOM during large object allocation
                new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))), 
                                          (size_t)available_free), 
                                      max ((current_size/4), min_gc_size));

                new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
                                                          dd_desired_allocation (dd), dd_collection_count (dd));

            }
        }
        else
        {
            size_t survivors = out;
            cst = float (survivors) / float (dd_begin_data_size (dd));
            f = surv_to_growth (cst, limit, max_limit);
            new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size);

            new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
                                                      dd_desired_allocation (dd), dd_collection_count (dd));

            if (gen_number == 0)
            {
                if (pass == 0)
                {

                    //printf ("%f, %Id\n", cst, new_allocation);
                    size_t free_space = generation_free_list_space (generation_of (gen_number));
                    // DTREVIEW - is min_gc_size really a good choice? 
                    // on 64-bit this will almost always be true.
                    dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
                    if (free_space > min_gc_size)
                    {
                        settings.gen0_reduction_count = 2;
                    }
                    else
                    {
                        if (settings.gen0_reduction_count > 0)
                            settings.gen0_reduction_count--;
                    }
                }
                if (settings.gen0_reduction_count > 0)
                {
                    dprintf (2, ("Reducing new allocation based on fragmentation"));
                    new_allocation = min (new_allocation,
                                          max (min_gc_size, (max_size/3)));
                }
            }
        }

        size_t new_allocation_ret = 
            Align (new_allocation, get_alignment_constant (!(gen_number == (max_generation+1))));
        int gen_data_index = gen_number;
        gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]);
        gen_data->new_allocation = new_allocation_ret;

        dd_surv (dd) = cst;

#ifdef SIMPLE_DPRINTF
        dprintf (1, ("h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id",
                     heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)),
                     (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
#else
        dprintf (1,("gen: %d in: %Id out: %Id ", gen_number, generation_allocation_size (generation_of (gen_number)), out));
        dprintf (1,("current: %Id alloc: %Id ", current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd))));
        dprintf (1,(" surv: %d%% f: %d%% new-size: %Id new-alloc: %Id",
                    (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
#endif //SIMPLE_DPRINTF

        return new_allocation_ret;
    }
}

 

.Net Core CLR GC的浅度分析

上一篇:Django-url配置(分组)


下一篇:js的注释