Skip to content

Instantly share code, notes, and snippets.

@warpfork
Last active August 29, 2015 14:13
Show Gist options
  • Save warpfork/bbd641202e8d5c5c5778 to your computer and use it in GitHub Desktop.
Save warpfork/bbd641202e8d5c5c5778 to your computer and use it in GitHub Desktop.
"generic" channels & benchmark
package what
import (
"encoding/json"
"reflect"
"testing"
)
type testStruct struct {
A string
B string
C string
}
func newTestMessage() *testStruct {
return &testStruct{
A: "one",
B: "two",
C: "three",
}
}
var preencodedMessage []byte
func init() {
preencodedMessage, _ = json.Marshal(newTestMessage())
}
func Benchmark_Writes_Baseline(b *testing.B) {
for i := 0; i < b.N; i++ {
json.Unmarshal(preencodedMessage, &testStruct{})
}
}
func regularChanWrite(ch chan *testStruct, x *testStruct) {
// fairly vacuous function, want it for symmetry with the reflective one, which does need to exist to make sure we're hitting the type system
ch <- x
}
func reflectChanWrite(ch interface{}, x interface{}) {
reflect.ValueOf(ch).Send(reflect.ValueOf(x))
}
func Benchmark_RegularChan_Writes_Buffered(b *testing.B) {
ch := make(chan *testStruct, b.N)
for i := 0; i < b.N; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
regularChanWrite(ch, x)
}
close(ch)
}
func Benchmark_ReflectChan_Writes_Buffered(b *testing.B) {
ch := make(chan *testStruct, b.N)
for i := 0; i < b.N; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
reflectChanWrite(ch, x)
}
close(ch)
}
func Benchmark_RegularChan_Writes_Unbuffered(b *testing.B) {
ch := make(chan *testStruct)
go func() {
for range ch {
}
}()
for i := 0; i < b.N; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
regularChanWrite(ch, x)
}
close(ch)
}
func Benchmark_ReflectChan_Writes_Unbuffered(b *testing.B) {
ch := make(chan *testStruct)
go func() {
for range ch {
}
}()
for i := 0; i < b.N; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
reflectChanWrite(ch, x)
}
close(ch)
}
func Benchmark_Reads_Baseline(b *testing.B) {
// in the scenario i'm currently interested in,
// we have a worker goroutine *reading* structures from a channel,
// and then serializing them for write to the wire.
// thus, the "reads" baseline is actually doing *marshall*.
for i := 0; i < b.N; i++ {
json.Marshal(newTestMessage())
}
}
func regularChanRead(ch chan *testStruct) *testStruct {
return <-ch
}
func reflectChanRead(ch chan *testStruct) interface{} {
x, _ := reflect.ValueOf(ch).Recv()
return x.Interface()
}
func Benchmark_RegularChan_Reads_Buffered(b *testing.B) {
ch := make(chan *testStruct, b.N)
for i := 0; i < b.N; i++ {
ch <- newTestMessage()
}
close(ch)
b.ResetTimer()
for i := 0; i < b.N; i++ {
json.Marshal(regularChanRead(ch))
}
}
func Benchmark_ReflectChan_Reads_Buffered(b *testing.B) {
ch := make(chan *testStruct, b.N)
for i := 0; i < b.N; i++ {
ch <- newTestMessage()
}
close(ch)
b.ResetTimer()
for i := 0; i < b.N; i++ {
json.Marshal(reflectChanRead(ch))
}
}
func Benchmark_ReflectChanCasted_Reads_Buffered(b *testing.B) {
ch := make(chan *testStruct, b.N)
for i := 0; i < b.N; i++ {
ch <- newTestMessage()
}
close(ch)
b.ResetTimer()
for i := 0; i < b.N; i++ {
x := reflectChanRead(ch)
y := x.(*testStruct)
json.Marshal(y)
}
}
func Benchmark_RegularChan_Reads_Unbuffered(b *testing.B) {
ch := make(chan *testStruct)
go func() {
for i := 0; i < b.N; i++ {
ch <- newTestMessage()
}
close(ch)
}()
for i := 0; i < b.N; i++ {
json.Marshal(regularChanRead(ch))
}
}
func Benchmark_ReflectChan_Reads_Unbuffered(b *testing.B) {
ch := make(chan *testStruct)
go func() {
for i := 0; i < b.N; i++ {
ch <- newTestMessage()
}
close(ch)
}()
for i := 0; i < b.N; i++ {
json.Marshal(reflectChanRead(ch))
}
}
const msgsBeforeStop = 3
func Benchmark_RegularChan_Select_Unbuffered(b *testing.B) {
// note that this test sequence on select is doing More Work than the priors;
// this one is effectively only a baseline for Benchmark_ReflectChan_Select_Unbuffered, and is not comparable to anything else.
for i := 0; i < b.N; i++ {
ch := make(chan *testStruct)
stopCh := make(chan struct{})
go func() {
for n := 0; n < msgsBeforeStop; n++ {
<-ch
}
close(stopCh)
}()
func() {
defer close(ch)
for i := 0; i < msgsBeforeStop+1; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
select {
case ch <- x:
case <-stopCh:
return
}
}
}()
}
}
func Benchmark_ReflectChan_Select_Unbuffered(b *testing.B) {
for i := 0; i < b.N; i++ {
ch := make(chan *testStruct)
chValue := reflect.ValueOf(ch)
stopCh := make(chan struct{})
stopChValue := reflect.ValueOf(stopCh)
go func() {
for n := 0; n < msgsBeforeStop; n++ {
<-ch
}
close(stopCh)
}()
func() {
defer close(ch)
for i := 0; i < msgsBeforeStop+1; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
chosen, _, _ := reflect.Select([]reflect.SelectCase{
{
Dir: reflect.SelectRecv,
Chan: stopChValue,
},
{
Dir: reflect.SelectSend,
Chan: chValue,
Send: reflect.ValueOf(x),
},
})
switch chosen {
case 0:
return
default:
}
}
}()
}
}
// Well, not really, apparently.
// I tried lifting the allocs on the `[]reflect.SelectCase` out to reduce the cost per message, but it didn't matter --
// evidentally, the increased costs on the selection are pretty much all integral.
func Benchmark_ReflectChan_Select_Unbuffered_Optimized(b *testing.B) {
for i := 0; i < b.N; i++ {
ch := make(chan *testStruct)
chValue := reflect.ValueOf(ch)
stopCh := make(chan struct{})
stopChValue := reflect.ValueOf(stopCh)
go func() {
for n := 0; n < msgsBeforeStop; n++ {
<-ch
}
close(stopCh)
}()
cases := []reflect.SelectCase{
{
Dir: reflect.SelectRecv,
Chan: stopChValue,
},
{
Dir: reflect.SelectSend,
Chan: chValue,
},
}
func() {
defer close(ch)
for i := 0; i < msgsBeforeStop+1; i++ {
x := &testStruct{}
json.Unmarshal(preencodedMessage, x)
cases[1].Send = reflect.ValueOf(x)
chosen, _, _ := reflect.Select(cases)
switch chosen {
case 0:
return
default:
}
}
}()
}
}
Benchmark_Writes_Baseline 500000 2502 ns/op
Benchmark_RegularChan_Writes_Buffered 1000000 2300 ns/op
Benchmark_ReflectChan_Writes_Buffered 500000 2374 ns/op
Benchmark_RegularChan_Writes_Unbuffered 300000 4677 ns/op
Benchmark_ReflectChan_Writes_Unbuffered 300000 4722 ns/op
Benchmark_Reads_Baseline 500000 2441 ns/op
Benchmark_RegularChan_Reads_Buffered 2000000 927 ns/op
Benchmark_ReflectChan_Reads_Buffered 1000000 1060 ns/op
Benchmark_ReflectChanCasted_Reads_Buffered 1000000 1071 ns/op
Benchmark_RegularChan_Reads_Unbuffered 500000 3556 ns/op
Benchmark_ReflectChan_Reads_Unbuffered 500000 3789 ns/op
Benchmark_RegularChan_Select_Unbuffered 50000 25559 ns/op
Benchmark_ReflectChan_Select_Unbuffered 50000 35314 ns/op
Benchmark_ReflectChan_Select_Unbuffered_Optimized 50000 35195 ns/op
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment