type slice struct { array unsafe.Pointer lenint capint }
array指针指向底层数组
len表示切片长度
cap表示底层数组容量
slice的三种初始化方式
make初始化
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
funcmakeslice(et *_type, len, capint)slice { // NOTE: The len > maxElements check here is not strictly necessary, // but it produces a 'len out of range' error instead of a 'cap out of range' error // when someone does make([]T, bignumber). 'cap out of range' is true too, // but since the cap is only being supplied implicitly, saying len is clearer. // See issue 4085. maxElements := maxSliceCap(et.size) iflen < 0 || uintptr(len) > maxElements { panic(errorString("makeslice: len out of range")) }
ifcap < len || uintptr(cap) > maxElements { panic(errorString("makeslice: cap out of range")) }
p := mallocgc(et.size*uintptr(cap), et, true) return slice{p, len, cap} }
funcslicecopy(to, fm slice, width uintptr)int { if fm.len == 0 || to.len == 0 { return0 }
n := fm.len if to.len < n { n = to.len }
if width == 0 { return n }
if raceenabled { callerpc := getcallerpc() pc := funcPC(slicecopy) racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc) racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc) } if msanenabled { msanwrite(to.array, uintptr(n*int(width))) msanread(fm.array, uintptr(n*int(width))) }
size := uintptr(n) * width if size == 1 { // common case worth about 2x to do here // TODO: is this still worth it with new memmove impl? *(*byte)(to.array) = *(*byte)(fm.array) // known to be a byte pointer } else { memmove(to.array, fm.array, size) } return n }
type hchan struct { qcount uint// total data in the queue dataqsiz uint// size of the circular queue buf unsafe.Pointer // points to an array of dataqsiz elements elemsize uint16 closed uint32 elemtype *_type // element type sendx uint// send index recvx uint// receive index recvq waitq // list of recv waiters sendq waitq // list of send waiters lock mutex }
type sudog struct { // The following fields are protected by the hchan.lock of the // channel this sudog is blocking on. shrinkstack depends on // this for sudogs involved in channel ops.
g *g
// isSelect indicates g is participating in a select, so // g.selectDone must be CAS'd to win the wake-up race. isSelect bool next *sudog prev *sudog elem unsafe.Pointer // data element (may point to stack)
// The following fields are never accessed concurrently. // For channels, waitlink is only accessed by g. // For semaphores, all fields (including the ones above) // are only accessed when holding a semaRoot lock.
acquiretime int64 releasetime int64 ticket uint32 parent *sudog // semaRoot binary tree waitlink *sudog // g.waiting list or semaRoot waittail *sudog // semaRoot c *hchan // channel }
// entry point for c <- x from compiled code //go:nosplit funcchansend1(c *hchan, elem unsafe.Pointer) { chansend(c, elem, true, getcallerpc(unsafe.Pointer(&c))) }
var t0 int64 if blockprofilerate > 0 { t0 = cputicks() }
lock(&c.lock)
if c.closed != 0 { unlock(&c.lock) panic(plainError("send on closed channel")) } // 1 if sg := c.recvq.dequeue(); sg != nil { // Found a waiting receiver. We pass the value we want to send // directly to the receiver, bypassing the channel buffer (if any). send(c, sg, ep, func() { unlock(&c.lock) }, 3) returntrue }
// 2 if c.qcount < c.dataqsiz { // Space is available in the channel buffer. Enqueue the element to send. qp := chanbuf(c, c.sendx) if raceenabled { raceacquire(qp) racerelease(qp) } typedmemmove(c.elemtype, qp, ep) c.sendx++ if c.sendx == c.dataqsiz { c.sendx = 0 } c.qcount++ unlock(&c.lock) returntrue }
if !block { unlock(&c.lock) returnfalse }
// Block on the channel. Some receiver will complete our operation for us. gp := getg() mysg := acquireSudog() mysg.releasetime = 0 if t0 != 0 { mysg.releasetime = -1 } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it. mysg.elem = ep mysg.waitlink = nil mysg.g = gp mysg.selectdone = nil mysg.c = c gp.waiting = mysg gp.param = nil c.sendq.enqueue(mysg) goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
// someone woke us up. if mysg != gp.waiting { throw("G waiting list is corrupted") } gp.waiting = nil if gp.param == nil { if c.closed == 0 { throw("chansend: spurious wakeup") } panic(plainError("send on closed channel")) } gp.param = nil if mysg.releasetime > 0 { blockevent(mysg.releasetime-t0, 2) } mysg.c = nil releaseSudog(mysg) returntrue }
funcchanrecv(c *hchan, ep unsafe.Pointer, block bool)(selected, received bool) { // raceenabled: don't need to check ep, as it is always on the stack // or is new memory allocated by reflect.
if debugChan { print("chanrecv: chan=", c, "\n") }
if c == nil { if !block { return } gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2) throw("unreachable") }
// Fast path: check for failed non-blocking operation without acquiring the lock. // // After observing that the channel is not ready for receiving, we observe that the // channel is not closed. Each of these observations is a single word-sized read // (first c.sendq.first or c.qcount, and second c.closed). // Because a channel cannot be reopened, the later observation of the channel // being not closed implies that it was also not closed at the moment of the // first observation. We behave as if we observed the channel at that moment // and report that the receive cannot proceed. // // The order of operations is important here: reversing the operations can lead to // incorrect behavior when racing with a close. if !block && (c.dataqsiz == 0 && c.sendq.first == nil || c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) && atomic.Load(&c.closed) == 0 { return }
var t0 int64 if blockprofilerate > 0 { t0 = cputicks() }
lock(&c.lock)
if c.closed != 0 && c.qcount == 0 { if raceenabled { raceacquire(c.raceaddr()) } unlock(&c.lock) if ep != nil { typedmemclr(c.elemtype, ep) } returntrue, false }
if sg := c.sendq.dequeue(); sg != nil { // Found a waiting sender. If buffer is size 0, receive value // directly from sender. Otherwise, receive from head of queue // and add sender's value to the tail of the queue (both map to // the same buffer slot because the queue is full). recv(c, sg, ep, func() { unlock(&c.lock) }, 3) returntrue, true }
if c.qcount > 0 { // Receive directly from queue qp := chanbuf(c, c.recvx) if raceenabled { raceacquire(qp) racerelease(qp) } if ep != nil { typedmemmove(c.elemtype, ep, qp) } typedmemclr(c.elemtype, qp) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.qcount-- unlock(&c.lock) returntrue, true }
if !block { unlock(&c.lock) returnfalse, false }
gp := getg() mysg := acquireSudog() mysg.releasetime = 0 if t0 != 0 { mysg.releasetime = -1 } //在分配elem和排队mysg之间没有堆栈拆分 //在gp.waiting上copystack可以找到它的地方。 mysg.elem = ep mysg.waitlink = nil gp.waiting = mysg mysg.g = gp mysg.isSelect = false mysg.c = c gp.param = nil c.recvq.enqueue(mysg) goparkunlock(&c.lock, waitReasonChanReceive, traceEvGoBlockRecv, 3)
// someone woke us up if mysg != gp.waiting { throw("G waiting list is corrupted") } gp.waiting = nil if mysg.releasetime > 0 { blockevent(mysg.releasetime-t0, 2) } closed := gp.param == nil gp.param = nil mysg.c = nil releaseSudog(mysg) returntrue, !closed }
funcrecv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skipint) { // 缓存队列不为空,直接从生产者获取数据 if c.dataqsiz == 0 { if raceenabled { racesync(c, sg) } if ep != nil { // copy data from sender recvDirect(c.elemtype, sg, ep) } } else { // Queue is full. Take the item at the // head of the queue. Make the sender enqueue // its item at the tail of the queue. Since the // queue is full, those are both the same slot. // 有 send 阻塞在这里,从 buf 中获取数据 qp := chanbuf(c, c.recvx) if raceenabled { raceacquire(qp) racerelease(qp) raceacquireg(sg.g, qp) racereleaseg(sg.g, qp) } // copy data from queue to receiver if ep != nil { // 将 buf 中未读的当前位置数据拷贝给消费者 typedmemmove(c.elemtype, ep, qp) } // 将阻塞的生产者数据拷贝此位置 typedmemmove(c.elemtype, qp, sg.elem) // 接收元素索引向后移动 c.recvx++ // 环形队列如果已经加到最大就置 0 if c.recvx == c.dataqsiz { c.recvx = 0 } // 环形队列读取的索引位置就是写入数据环形的末端 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz } // 数据置为 nil sg.elem = nil // 获取 SudoG 中的 goroutine 传递给 param 参数 gp := sg.g unlockf() gp.param = unsafe.Pointer(sg) if sg.releasetime != 0 { sg.releasetime = cputicks() } // 唤醒 sendq 里面 SudoG 对应的 g goready(gp, skip+1) }
// A header for a Go map. type hmap struct { // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go. // Make sure this stays in sync with the compiler's definition. count int// # live cells == size of map. Must be first (used by len() builtin) flags uint8 B uint8// log_2 of # of buckets (can hold up to loadFactor * 2^B items) noverflow uint16// approximate number of overflow buckets; see incrnoverflow for details hash0 uint32// hash seed buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing nevacuate uintptr// progress counter for evacuation (buckets less than this have been evacuated) extra *mapextra // optional fields }
// mapextra holds fields that are not present on all maps. type mapextra struct { // If both key and value do not contain pointers and are inline, then we mark bucket // type as containing no pointers. This avoids scanning such maps. // However, bmap.overflow is a pointer. In order to keep overflow buckets // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow. // overflow and oldoverflow are only used if key and value do not contain pointers. // overflow contains overflow buckets for hmap.buckets. // oldoverflow contains overflow buckets for hmap.oldbuckets. // The indirection allows to store a pointer to the slice in hiter. overflow *[]*bmap oldoverflow *[]*bmap
// nextOverflow holds a pointer to a free overflow bucket. nextOverflow *bmap }
typestruct Bucket { // tophash generally contains the top byte of the hash value // for each key in this bucket. If tophash[0] < minTopHash, // tophash[0] is a bucket evacuation state instead. tophash [bucketCnt]uint8 // Followed by bucketCnt keys and then bucketCnt values. // NOTE: packing all the keys together and then all the values together makes the // code a bit more complicated than alternating key/value/key/value/... but it allows // us to eliminate padding which would be needed for, e.g., map[int64]int8. // Followed by an overflow pointer. };
%overflow = percentage of buckets which have an overflow bucket bytes/entry = overhead bytes used per key/value pair hitprobe = # of entries to check when looking up a present key missprobe = # of entries to check when looking up an absent key
funcmakemap(t *maptype, hint int, h *hmap) *hmap { mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size) if overflow || mem > maxAlloc { hint = 0 }
if h == nil { h = new(hmap) /* 新建hmap结构 */ } h.hash0 = fastrand() /* 产生哈希种子 */
B := uint8(0) for overLoadFactor(hint, B) { /* 确定B的值 */ B++ } h.B = B
if h.B != 0 { /* B != 0 时初始化桶指针buckets */ var nextOverflow *bmap h.buckets, nextOverflow = makeBucketArray(t, h.B, nil) /* 初始化桶指针 buckets并分配空间 */ if nextOverflow != nil { h.extra = new(mapextra) h.extra.nextOverflow = nextOverflow /* 设置溢出桶 */ } } return h }
again: bucket := hash & bucketMask(h.B) //2 if h.growing() { growWork(t, h, bucket) } b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) top := tophash(hash) //取tophash
var inserti *uint8 var insertk unsafe.Pointer var val unsafe.Pointer bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { /* 遍历桶中的8个key */ if b.tophash[i] != top { if isEmpty(b.tophash[i]) && inserti == nil { //4.找到插入的位置 inserti = &b.tophash[i] insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) } if b.tophash[i] == emptyRest { break bucketloop } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if !alg.equal(key, k) { continue } if t.needkeyupdate() { //3.更新 typedmemmove(t.key, k, key) } val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) goto done } ovf := b.overflow(t) if ovf == nil { break } b = ovf }
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { hashGrow(t, h) goto again // Growing the table invalidates everything, so try again }
if inserti == nil { newb := h.newoverflow(t, b) //5 inserti = &newb.tophash[0] insertk = add(unsafe.Pointer(newb), dataOffset) val = add(insertk, bucketCnt*uintptr(t.keysize)) }
if t.indirectkey() { kmem := newobject(t.key) *(*unsafe.Pointer)(insertk) = kmem insertk = kmem } if t.indirectvalue() { vmem := newobject(t.elem) *(*unsafe.Pointer)(val) = vmem } typedmemmove(t.key, insertk, key) *inserti = top h.count++
done: if h.flags&hashWriting == 0 { throw("concurrent map writes") } h.flags &^= hashWriting if t.indirectvalue() { val = *((*unsafe.Pointer)(val)) } return val }
// Set hashWriting after calling alg.hash, since alg.hash may panic, // in which case we have not actually done a write (delete). h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) if h.growing() { growWork(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) bOrig := b top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break search } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k2 := k if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } if !alg.equal(key, k2) { continue } // Only clear key if there are pointers in it. if t.indirectkey() { *(*unsafe.Pointer)(k) = nil } elseif t.key.kind&kindNoPointers == 0 { memclrHasPointers(k, t.key.size) } v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) if t.indirectvalue() { *(*unsafe.Pointer)(v) = nil } elseif t.elem.kind&kindNoPointers == 0 { memclrHasPointers(v, t.elem.size) } else { memclrNoHeapPointers(v, t.elem.size) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. // It would be nice to make this a separate function, but // for loops are not currently inlineable. if i == bucketCnt-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } } else { if b.tophash[i+1] != emptyRest { goto notLast } } for { b.tophash[i] = emptyRest if i == 0 { if b == bOrig { break// beginning of initial bucket, we're done. } // Find previous bucket, continue at its last entry. c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } i = bucketCnt - 1 } else { i-- } if b.tophash[i] != emptyOne { break } } notLast: h.count-- break search } }
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. funcoverLoadFactor(count int, B uint8)bool { return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) }
等量扩容:overflow数量 > 2^15时,也即overflow数量超过32768时。
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets. // Note that most of these overflow buckets must be in sparse use; // if use was dense, then we'd have already triggered regular map growth. functooManyOverflowBuckets(noverflow uint16, B uint8)bool { // If the threshold is too low, we do extraneous work. // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory. // "too many" means (approximately) as many overflow buckets as regular buckets. // See incrnoverflow for more details. if B > 15 { B = 15 } // The compiler doesn't see here that B < 16; mask B to generate shorter shift code. return noverflow >= uint16(1)<<(B&15) }