正如Rob Napier所建议的那样,您几乎可以肯定达到文件描述符限制。
编辑: 改进的并发版本:
该程序创建了
maxgoroutine
的工作程序池,该程序将请求从通道中拉出,处理并在响应通道上发送。请求由a排队
dispatcher,goroutines
由a排队
workerPool,
workers每次一次处理一个作业,直到请求通道为空,然后
consumer处理响应通道,直到成功响应的数量等于请求的数量。
package mainimport ( "flag" "fmt" "log" "net/http" "runtime" "time")var ( reqs int max int)func init() { flag.IntVar(&reqs, "reqs", 1000000, "Total requests") flag.IntVar(&max, "concurrent", 200, "Maximum concurrent requests")}type Response struct { *http.Response err error}// Dispatcherfunc dispatcher(reqChan chan *http.Request) { defer close(reqChan) for i := 0; i < reqs; i++ { req, err := http.NewRequest("GET", "http://localhost/", nil) if err != nil { log.Println(err) } reqChan <- req }}// Worker Poolfunc workerPool(reqChan chan *http.Request, respChan chan Response) { t := &http.Transport{} for i := 0; i < max; i++ { go worker(t, reqChan, respChan) }}// Workerfunc worker(t *http.Transport, reqChan chan *http.Request, respChan chan Response) { for req := range reqChan { resp, err := t.RoundTrip(req) r := Response{resp, err} respChan <- r }}// Consumerfunc consumer(respChan chan Response) (int64, int64) { var ( conns int64 size int64 ) for conns < int64(reqs) { select { case r, ok := <-respChan: if ok { if r.err != nil { log.Println(r.err) } else { size += r.ContentLength if err := r.Body.Close(); err != nil { log.Println(r.err) } } conns++ } } } return conns, size}func main() { flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) reqChan := make(chan *http.Request) respChan := make(chan Response) start := time.Now() go dispatcher(reqChan) go workerPool(reqChan, respChan) conns, size := consumer(respChan) took := time.Since(start) ns := took.Nanoseconds() av := ns / conns average, err := time.ParseDuration(fmt.Sprintf("%d", av) + "ns") if err != nil { log.Println(err) } fmt.Printf("Connections:t%dnConcurrent:t%dnTotal size:t%d bytesnTotal time:t%snAverage time:t%sn", conns, max, size, took, average)}产生:
连接:1000000
并发:200
总大小:15000000字节
总时间:36m39.6778103s
平均时间:2.199677ms
警告:这 非常 迅速打系统资源限制。在我的笔记本电脑上,超过206位并发工作人员导致我的本地测试Web服务器崩溃!
操场
原始答案: 下面的程序使用缓冲区
chanbool作为信号量通道,它限制了并发请求的数量。您可以调整此数字以及请求的总数,以便对系统进行压力测试并确定最大值。
package mainimport ( "fmt" "net/http" "runtime" "time")type Resp struct { *http.Response err error}func makeResponses(reqs int, rc chan Resp, sem chan bool) { defer close(rc) defer close(sem) for reqs > 0 { select { case sem <- true: req, _ := http.NewRequest("GET", "http://localhost/", nil) transport := &http.Transport{} resp, err := transport.RoundTrip(req) r := Resp{resp, err} rc <- r reqs-- default: <-sem } }}func getResponses(rc chan Resp) int { conns := 0 for { select { case r, ok := <-rc: if ok { conns++ if r.err != nil { fmt.Println(r.err) } else { // Do something with response if err := r.Body.Close(); err != nil { fmt.Println(r.err) } } } else { return conns } } }}func main() { reqs := 100000 maxConcurrent := 1000 runtime.GOMAXPROCS(runtime.NumCPU()) rc := make(chan Resp) sem := make(chan bool, maxConcurrent) start := time.Now() go makeResponses(reqs, rc, sem) conns := getResponses(rc) end := time.Since(start) fmt.Printf("Connections: %dnTotal time: %sn", conns, end)}这将打印如下内容:
连接数:100000
总时间:6m8.2554629s
该测试是在本地Web服务器上完成的,每个请求返回的总响应大小为85B,因此这不是实际的结果。另外,除了关闭它的主体之外,我不对响应进行任何处理。
在最多1000个并发请求中,我的笔记本电脑仅花了6分钟以上的时间就完成了100,000个请求,因此我估计一百万个将花费一个小时。调整
maxConcurrent变量将帮助您获得系统的最佳性能。



