1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
// 获取当前goroutine对应的m
mp := acquirem()
// 如果当前的m正在执行分配任务,则抛出错误
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
// 锁住当前的m进行分配
mp.mallocing = 1
shouldhelpgc := false
dataSize := size
// 获取当前goroutine的m的mcache
c := getMCache()
if c == nil {
throw("mallocgc called without a P or outside bootstrapping")
}
var span *mspan
var x unsafe.Pointer
noscan := typ == nil || typ.ptrdata == 0
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; isZeroed tracks that state.
isZeroed := true
// 判断大小,如果小于32kb
if size <= maxSmallSize {
// 如果小于16byte,微小对象的分配
if noscan && size < maxTinySize {
off := c.tinyoffset
// Align tiny pointer for required (conservative) alignment.
// 如果是8字节
if size&7 == 0 {
off = alignUp(off, 8)
} else if sys.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
// atomic access. See issue 37262.
// TODO(mknyszek): Remove this workaround if/when issue 36606
// is resolved.
off = alignUp(off, 8)
} else if size&3 == 0 {
off = alignUp(off, 4)
} else if size&1 == 0 {
off = alignUp(off, 2)
}
if off+size <= maxTinySize && c.tiny != 0 {
// The object fits into existing tiny block.
x = unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + size
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
return x
}
// Allocate a new maxTinySize block.
span = c.alloc[tinySpanClass]
v := nextFreeFast(span)
if v == 0 {
v, span, shouldhelpgc = c.nextFree(tinySpanClass)
}
x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
// Note: disabled when race detector is on, see comment near end of this function.
c.tiny = uintptr(x)
c.tinyoffset = size
}
size = maxTinySize
} else {
var sizeclass uint8
// 小对象,小于1kb
if size <= smallSizeMax-8 {
// 以8字节为跨度分级别,0-32个级别
sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
} else {
// 以128字节为跨度分级别,32-67级别
sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
}
// 根据级别获取到对应的对象大小(每个级别都会设定好对应的对象大小,因此对象其实是存在一定的内存浪费的)
size = uintptr(class_to_size[sizeclass])
// sizeclass * 2 + (noscan ? 1 : 0)
spc := makeSpanClass(sizeclass, noscan)
span = c.alloc[spc]
// 尝试快速的从这个span中分配
v := nextFreeFast(span)
if v == 0 {
// 分配失败, 可能需要从mcentral或者mheap中获取
// 如果从mcentral或者mheap获取了新的span, 则shouldhelpgc会等于true
// shouldhelpgc会等于true时会在下面判断是否要触发GC
v, span, shouldhelpgc = c.nextFree(spc)
}
x = unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(unsafe.Pointer(v), size)
}
}
} else {
// 大对象直接从mheap分配, 这里的s是一个特殊的span, 它的class是0
shouldhelpgc = true
// For large allocations, keep track of zeroed state so that
// bulk zeroing can be happen later in a preemptible context.
span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan)
span.freeindex = 1
span.allocCount = 1
x = unsafe.Pointer(span.base())
size = span.elemsize
}
|