overhaul everything

This commit is contained in:
Jimmy Zelinskie
2014-06-23 22:47:43 -04:00
parent 18f6c32d97
commit 3bfb3074b4
27 changed files with 1193 additions and 1143 deletions

View File

@@ -1,116 +0,0 @@
// Copyright 2013 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package server
import (
"errors"
"net/http"
"path"
"strconv"
"github.com/chihaya/chihaya/config"
)
// announce represents all of the data from an announce request.
type announce struct {
Compact bool
Downloaded uint64
Event string
IP string
Infohash string
Left uint64
NumWant int
Passkey string
PeerID string
Port uint64
Uploaded uint64
}
// newAnnounce parses an HTTP request and generates an Announce.
func newAnnounce(r *http.Request, conf *config.Config) (*announce, error) {
pq, err := parseQuery(r.URL.RawQuery)
if err != nil {
return nil, err
}
compact := pq.Params["compact"] == "1"
downloaded, downloadedErr := pq.getUint64("downloaded")
event, _ := pq.Params["event"]
infohash, _ := pq.Params["info_hash"]
ip, _ := requestedIP(r, pq)
left, leftErr := pq.getUint64("left")
numWant := requestedPeerCount(conf.DefaultNumWant, pq)
passkey, _ := path.Split(r.URL.Path)
peerID, _ := pq.Params["peer_id"]
port, portErr := pq.getUint64("port")
uploaded, uploadedErr := pq.getUint64("uploaded")
if downloadedErr != nil ||
infohash == "" ||
leftErr != nil ||
peerID == "" ||
portErr != nil ||
uploadedErr != nil ||
ip == "" {
return nil, errors.New("malformed request")
}
return &announce{
Compact: compact,
Downloaded: downloaded,
Event: event,
IP: ip,
Infohash: infohash,
Left: left,
NumWant: numWant,
Passkey: passkey,
PeerID: peerID,
Port: port,
Uploaded: uploaded,
}, nil
}
func requestedPeerCount(fallback int, pq *parsedQuery) int {
if numWantStr, exists := pq.Params["numWant"]; exists {
numWant, err := strconv.Atoi(numWantStr)
if err != nil {
return fallback
}
return numWant
}
return fallback
}
func requestedIP(r *http.Request, pq *parsedQuery) (string, error) {
if ip, ok := pq.Params["ip"]; ok {
return ip, nil
}
if ip, ok := pq.Params["ipv4"]; ok {
return ip, nil
}
if xRealIPs, ok := pq.Params["X-Real-Ip"]; ok {
return string(xRealIPs[0]), nil
}
if r.RemoteAddr == "" {
return "127.0.0.1", nil
}
portIndex := len(r.RemoteAddr) - 1
for ; portIndex >= 0; portIndex-- {
if r.RemoteAddr[portIndex] == ':' {
break
}
}
if portIndex != -1 {
return r.RemoteAddr[0:portIndex], nil
}
return "", errors.New("failed to parse IP address")
}

View File

@@ -1,57 +0,0 @@
package server
import (
"fmt"
"io"
"strconv"
"time"
)
func writeBencoded(w io.Writer, data interface{}) {
switch v := data.(type) {
case string:
str := fmt.Sprintf("%s:%s", strconv.Itoa(len(v)), v)
io.WriteString(w, str)
case int:
str := fmt.Sprintf("i%se", strconv.Itoa(v))
io.WriteString(w, str)
case uint:
str := fmt.Sprintf("i%se", strconv.FormatUint(uint64(v), 10))
io.WriteString(w, str)
case int64:
str := fmt.Sprintf("i%se", strconv.FormatInt(v, 10))
io.WriteString(w, str)
case uint64:
str := fmt.Sprintf("i%se", strconv.FormatUint(v, 10))
io.WriteString(w, str)
case time.Duration: // Assume seconds
str := fmt.Sprintf("i%se", strconv.FormatInt(int64(v/time.Second), 10))
io.WriteString(w, str)
case map[string]interface{}:
io.WriteString(w, "d")
for key, val := range v {
str := fmt.Sprintf("%s:%s", strconv.Itoa(len(key)), key)
io.WriteString(w, str)
writeBencoded(w, val)
}
io.WriteString(w, "e")
case []string:
io.WriteString(w, "l")
for _, val := range v {
writeBencoded(w, val)
}
io.WriteString(w, "e")
default:
// Although not currently necessary,
// should handle []interface{} manually; Go can't do it implicitly
panic("tried to bencode an unsupported type!")
}
}

View File

@@ -1,18 +0,0 @@
// Copyright 2013 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package server
import (
"github.com/chihaya/chihaya/storage/backend"
"github.com/chihaya/chihaya/storage/tracker"
)
// Primer represents a function that can prime storage with data.
type Primer func(tracker.Pool, backend.Conn) error
// Prime executes a priming function on the server.
func (s *Server) Prime(p Primer) error {
return p(s.trackerPool, s.backendConn)
}

View File

@@ -1,104 +0,0 @@
// Copyright 2013 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package server
import (
"errors"
"net/url"
"strconv"
)
// parsedQuery represents a parsed URL.Query.
type parsedQuery struct {
Infohashes []string
Params map[string]string
}
// getUint64 is a helper to obtain a uint64 from a parsedQuery.
func (pq *parsedQuery) getUint64(key string) (uint64, error) {
str, exists := pq.Params[key]
if !exists {
return 0, errors.New("value does not exist for key: " + key)
}
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return val, nil
}
// parseQuery parses a raw url query.
func parseQuery(query string) (*parsedQuery, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
firstInfohash string
onKey = true
hasInfohash = false
pq = &parsedQuery{
Infohashes: nil,
Params: make(map[string]string),
}
)
for i, length := 0, len(query); i < length; i++ {
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
if separator || i == length-1 {
if onKey {
keyStart = i + 1
continue
}
if i == length-1 && !separator {
if query[i] == '=' {
continue
}
valEnd = i
}
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
if err != nil {
return nil, err
}
valStr, err := url.QueryUnescape(query[valStart : valEnd+1])
if err != nil {
return nil, err
}
pq.Params[keyStr] = valStr
if keyStr == "info_hash" {
if hasInfohash {
// Multiple infohashes
if pq.Infohashes == nil {
pq.Infohashes = []string{firstInfohash}
}
pq.Infohashes = append(pq.Infohashes, valStr)
} else {
firstInfohash = valStr
hasInfohash = true
}
}
onKey = true
keyStart = i + 1
} else if query[i] == '=' {
onKey = false
valStart = i + 1
} else if onKey {
keyEnd = i
} else {
valEnd = i
}
}
return pq, nil
}

View File

@@ -1,95 +0,0 @@
package server
import (
"net/url"
"testing"
)
var (
baseAddr = "https://www.subdomain.tracker.com:80/"
testInfoHash = "01234567890123456789"
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
}
InvalidQueries = []string{
baseAddr + "announce/?" + "info_hash=%0%a",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestValidQueries(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := parseQuery(baseAddr + "announce/?" + parseVal.Encode())
if err != nil {
t.Error(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.Params) {
t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.Params)
}
}
}
func TestInvalidQueries(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := parseQuery(parseStr)
if err == nil {
t.Error("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Error("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := parseQuery(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}

View File

@@ -5,242 +5,236 @@
package server
import (
"errors"
"log"
"io"
"net"
"net/http"
"strconv"
"time"
"github.com/chihaya/chihaya/storage"
"github.com/chihaya/chihaya/storage/backend"
log "github.com/golang/glog"
"github.com/chihaya/chihaya/bencode"
"github.com/chihaya/chihaya/drivers/tracker"
"github.com/chihaya/chihaya/models"
)
func (s Server) serveAnnounce(w http.ResponseWriter, r *http.Request) {
// Parse the required data from a request
announce, err := newAnnounce(r, s.conf)
announce, err := models.NewAnnounce(r, s.conf)
if err != nil {
fail(err, w, r)
return
}
// Get a connection to the tracker db
conn, err := s.trackerPool.Get()
if err != nil {
log.Panicf("server: %s", err)
fail(err, w, r)
return
}
// Validate the user's passkey
user, err := validateUser(conn, announce.Passkey)
err = conn.ClientWhitelisted(announce.ClientID())
if err != nil {
fail(err, w, r)
return
}
// Check if the user's client is whitelisted
whitelisted, err := conn.ClientWhitelisted(parsePeerID(announce.PeerID))
if err != nil {
log.Panicf("server: %s", err)
}
if !whitelisted {
fail(errors.New("client is not approved"), w, r)
return
}
// Find the specified torrent
torrent, exists, err := conn.FindTorrent(announce.Infohash)
if err != nil {
log.Panicf("server: %s", err)
}
if !exists {
fail(errors.New("torrent does not exist"), w, r)
return
}
// If the torrent was pruned and the user is seeding, unprune it
if !torrent.Active && announce.Left == 0 {
err := conn.MarkActive(torrent)
var user *models.User
if s.conf.Private {
user, err = conn.FindUser(announce.Passkey)
if err != nil {
log.Panicf("server: %s", err)
fail(err, w, r)
return
}
}
now := time.Now().Unix()
// Create a new peer object from the request
peer := &storage.Peer{
ID: announce.PeerID,
UserID: user.ID,
TorrentID: torrent.ID,
IP: announce.IP,
Port: announce.Port,
Uploaded: announce.Uploaded,
Downloaded: announce.Downloaded,
Left: announce.Left,
LastAnnounce: now,
}
delta := &backend.AnnounceDelta{
Peer: peer,
Torrent: torrent,
User: user,
Timestamp: now,
torrent, err := conn.FindTorrent(announce.Infohash)
if err != nil {
fail(err, w, r)
return
}
// Look for the user in in the pool of seeders and leechers
_, seeder := torrent.Seeders[storage.PeerMapKey(peer)]
_, leecher := torrent.Leechers[storage.PeerMapKey(peer)]
peer := models.NewPeer(torrent, user, announce)
created, err := updateTorrent(peer, torrent, conn, announce)
if err != nil {
fail(err, w, r)
return
}
snatched, err := handleEvent(announce, user, torrent, peer, conn)
if err != nil {
fail(err, w, r)
return
}
writeAnnounceResponse(w, announce, user, torrent)
delta := models.NewAnnounceDelta(peer, user, announce, torrent, created, snatched)
s.backendConn.RecordAnnounce(delta)
log.V(3).Infof("chihaya: handled announce from %s", announce.IP)
}
func updateTorrent(p *models.Peer, t *models.Torrent, conn tracker.Conn, a *models.Announce) (created bool, err error) {
if !t.Active && a.Left == 0 {
err = conn.MarkActive(t)
if err != nil {
return
}
}
switch {
// Guarantee that no user is in both pools
case seeder && leecher:
if announce.Left == 0 {
err := conn.RemoveLeecher(torrent, peer)
if err != nil {
log.Panicf("server: %s", err)
}
leecher = false
} else {
err := conn.RemoveSeeder(torrent, peer)
if err != nil {
log.Panicf("server: %s", err)
}
seeder = false
case t.InSeederPool(p):
err = conn.SetSeeder(t, p)
if err != nil {
return
}
case seeder:
// Update the peer with the stats from the request
err := conn.SetSeeder(torrent, peer)
case t.InLeecherPool(p):
err = conn.SetLeecher(t, p)
if err != nil {
log.Panicf("server: %s", err)
}
case leecher:
// Update the peer with the stats from the request
err := conn.SetLeecher(torrent, peer)
if err != nil {
log.Panicf("server: %s", err)
return
}
default:
if announce.Left == 0 {
// Save the peer as a new seeder
err := conn.AddSeeder(torrent, peer)
if a.Left == 0 {
err = conn.AddSeeder(t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
} else {
err = conn.AddLeecher(torrent, peer)
err = conn.AddLeecher(t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
}
delta.Created = true
created = true
}
// Handle any events in the request
return
}
func handleEvent(a *models.Announce, u *models.User, t *models.Torrent, p *models.Peer, conn tracker.Conn) (snatched bool, err error) {
switch {
case announce.Event == "stopped" || announce.Event == "paused":
if seeder {
err := conn.RemoveSeeder(torrent, peer)
case a.Event == "stopped" || a.Event == "paused":
if t.InSeederPool(p) {
err = conn.RemoveSeeder(t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
}
if leecher {
err := conn.RemoveLeecher(torrent, peer)
if t.InLeecherPool(p) {
err = conn.RemoveLeecher(t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
}
case announce.Event == "completed":
err := conn.RecordSnatch(user, torrent)
case a.Event == "completed":
err = conn.IncrementSnatches(t)
if err != nil {
log.Panicf("server: %s", err)
return
}
delta.Snatched = true
if leecher {
err := conn.LeecherFinished(torrent, peer)
snatched = true
if t.InLeecherPool(p) {
err = tracker.LeecherFinished(conn, t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
}
case leecher && announce.Left == 0:
case t.InLeecherPool(p) && a.Left == 0:
// A leecher completed but the event was never received
err := conn.LeecherFinished(torrent, peer)
err = tracker.LeecherFinished(conn, t, p)
if err != nil {
log.Panicf("server: %s", err)
return
}
}
if announce.IP != peer.IP || announce.Port != peer.Port {
peer.Port = announce.Port
peer.IP = announce.IP
}
return
}
// Generate the response
seedCount := len(torrent.Seeders)
leechCount := len(torrent.Leechers)
func writeAnnounceResponse(w io.Writer, a *models.Announce, u *models.User, t *models.Torrent) {
bencoder := bencode.NewEncoder(w)
seedCount := len(t.Seeders)
leechCount := len(t.Leechers)
writeBencoded(w, "d")
writeBencoded(w, "complete")
writeBencoded(w, seedCount)
writeBencoded(w, "incomplete")
writeBencoded(w, leechCount)
writeBencoded(w, "interval")
writeBencoded(w, s.conf.Announce.Duration)
writeBencoded(w, "min interval")
writeBencoded(w, s.conf.MinAnnounce.Duration)
bencoder.Encode("d")
bencoder.Encode("complete")
bencoder.Encode(seedCount)
bencoder.Encode("incomplete")
bencoder.Encode(leechCount)
bencoder.Encode("interval")
bencoder.Encode(a.Config.Announce.Duration)
bencoder.Encode("min interval")
bencoder.Encode(a.Config.MinAnnounce.Duration)
if announce.NumWant > 0 && announce.Event != "stopped" && announce.Event != "paused" {
writeBencoded(w, "peers")
var peerCount, count int
if a.NumWant > 0 && a.Event != "stopped" && a.Event != "paused" {
bencoder.Encode("peers")
if announce.Compact {
if announce.Left > 0 {
peerCount = minInt(announce.NumWant, leechCount)
var peerCount int
if a.Compact {
if a.Left == 0 {
peerCount = minInt(a.NumWant, leechCount)
} else {
peerCount = minInt(announce.NumWant, leechCount+seedCount-1)
peerCount = minInt(a.NumWant, leechCount+seedCount-1)
}
writeBencoded(w, strconv.Itoa(peerCount*6))
writeBencoded(w, ":")
// 6 is the number of bytes 1 compact peer takes up.
bencoder.Encode(strconv.Itoa(peerCount * 6))
bencoder.Encode(":")
} else {
writeBencoded(w, "l")
bencoder.Encode("l")
}
if announce.Left > 0 {
var count int
if a.Left == 0 {
// If they're seeding, give them only leechers
count += writeLeechers(w, user, torrent, announce.NumWant, announce.Compact)
count = writePeers(w, u, t.Leechers, a.NumWant, a.Compact)
} else {
// If they're leeching, prioritize giving them seeders
count += writeSeeders(w, user, torrent, announce.NumWant, announce.Compact)
count += writeLeechers(w, user, torrent, announce.NumWant-count, announce.Compact)
count += writePeers(w, u, t.Seeders, a.NumWant, a.Compact)
count += writePeers(w, u, t.Leechers, a.NumWant-count, a.Compact)
}
if a.Compact && peerCount != count {
log.Errorf("calculated peer count (%d) != real count (%d)", peerCount, count)
}
if announce.Compact && peerCount != count {
log.Panicf("calculated peer count (%d) != real count (%d)", peerCount, count)
}
if !announce.Compact {
writeBencoded(w, "e")
if !a.Compact {
bencoder.Encode("e")
}
}
writeBencoded(w, "e")
bencoder.Encode("e")
}
rawDeltaUp := peer.Uploaded - announce.Uploaded
rawDeltaDown := peer.Downloaded - announce.Downloaded
func writePeers(w io.Writer, user *models.User, peers map[string]models.Peer, numWant int, compact bool) (count int) {
bencoder := bencode.NewEncoder(w)
for _, peer := range peers {
if count >= numWant {
break
}
// Restarting a torrent may cause a delta to be negative.
if rawDeltaUp < 0 {
rawDeltaUp = 0
}
if rawDeltaDown < 0 {
rawDeltaDown = 0
if peer.UserID == user.ID {
continue
}
if compact {
if ip := net.ParseIP(peer.IP); ip != nil {
w.Write(ip)
w.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
}
} else {
bencoder.Encode("d")
bencoder.Encode("ip")
bencoder.Encode(peer.IP)
bencoder.Encode("peer id")
bencoder.Encode(peer.ID)
bencoder.Encode("port")
bencoder.Encode(peer.Port)
bencoder.Encode("e")
}
count++
}
delta.Uploaded = uint64(float64(rawDeltaUp) * user.UpMultiplier * torrent.UpMultiplier)
delta.Downloaded = uint64(float64(rawDeltaDown) * user.DownMultiplier * torrent.DownMultiplier)
s.backendConn.RecordAnnounce(delta)
return
}
func minInt(a, b int) int {
@@ -250,61 +244,3 @@ func minInt(a, b int) int {
return b
}
func writeSeeders(w http.ResponseWriter, user *storage.User, t *storage.Torrent, numWant int, compact bool) int {
count := 0
for _, peer := range t.Seeders {
if count >= numWant {
break
}
if peer.UserID == user.ID {
continue
}
if compact {
// TODO writeBencoded(w, compactAddr)
} else {
writeBencoded(w, "d")
writeBencoded(w, "ip")
writeBencoded(w, peer.IP)
writeBencoded(w, "peer id")
writeBencoded(w, peer.ID)
writeBencoded(w, "port")
writeBencoded(w, peer.Port)
writeBencoded(w, "e")
}
count++
}
return count
}
func writeLeechers(w http.ResponseWriter, user *storage.User, t *storage.Torrent, numWant int, compact bool) int {
count := 0
for _, peer := range t.Leechers {
if count >= numWant {
break
}
if peer.UserID == user.ID {
continue
}
if compact {
// TODO writeBencoded(w, compactAddr)
} else {
writeBencoded(w, "d")
writeBencoded(w, "ip")
writeBencoded(w, peer.IP)
writeBencoded(w, "peer id")
writeBencoded(w, peer.ID)
writeBencoded(w, "port")
writeBencoded(w, peer.Port)
writeBencoded(w, "e")
}
count++
}
return count
}

View File

@@ -9,12 +9,12 @@ import (
"net/http/httptest"
"testing"
"github.com/chihaya/chihaya/storage"
"github.com/chihaya/chihaya/storage/backend"
"github.com/chihaya/chihaya/storage/tracker"
"github.com/chihaya/chihaya/drivers/backend"
"github.com/chihaya/chihaya/drivers/tracker"
"github.com/chihaya/chihaya/models"
_ "github.com/chihaya/chihaya/storage/backend/mock"
_ "github.com/chihaya/chihaya/storage/tracker/mock"
_ "github.com/chihaya/chihaya/drivers/backend/mock"
_ "github.com/chihaya/chihaya/drivers/tracker/mock"
)
func TestAnnounce(t *testing.T) {
@@ -29,7 +29,7 @@ func TestAnnounce(t *testing.T) {
return
}
err = conn.AddUser(&storage.User{
err = conn.AddUser(&models.User{
ID: 1,
Passkey: "yby47f04riwpndba456rqxtmifenq5h6",
})
@@ -42,11 +42,11 @@ func TestAnnounce(t *testing.T) {
return
}
err = conn.AddTorrent(&storage.Torrent{
err = conn.AddTorrent(&models.Torrent{
ID: 1,
Infohash: string([]byte{0x89, 0xd4, 0xbc, 0x52, 0x11, 0x16, 0xca, 0x1d, 0x42, 0xa2, 0xf3, 0x0d, 0x1f, 0x27, 0x4d, 0x94, 0xe4, 0x68, 0x1d, 0xaf}),
Seeders: make(map[string]storage.Peer),
Leechers: make(map[string]storage.Peer),
Seeders: make(map[string]models.Peer),
Leechers: make(map[string]models.Peer),
})
return
@@ -64,8 +64,8 @@ func TestAnnounce(t *testing.T) {
w := httptest.NewRecorder()
s.serveAnnounce(w, r)
if w.Body.String() != "1:d8:completei0e10:incompletei1e8:intervali1800e12:min intervali900e1:e" {
t.Errorf("improper response from server")
if w.Body.String() != "1:d8:completei0e10:incompletei0e8:intervali1800e12:min intervali900e1:e" {
t.Errorf("improper response from server:\n%s", w.Body.String())
}
}

View File

@@ -5,72 +5,67 @@
package server
import (
"errors"
"io"
"log"
"net/http"
"path"
"github.com/chihaya/chihaya/storage"
log "github.com/golang/glog"
"github.com/chihaya/chihaya/bencode"
"github.com/chihaya/chihaya/models"
)
func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request) {
// Parse the query
pq, err := parseQuery(r.URL.RawQuery)
if err != nil {
fail(errors.New("error parsing query"), w, r)
return
}
// Get a connection to the tracker db
conn, err := s.trackerPool.Get()
if err != nil {
log.Fatal(err)
}
// Find and validate the user
passkey, _ := path.Split(r.URL.Path)
_, err = validateUser(conn, passkey)
scrape, err := models.NewScrape(r, s.conf)
if err != nil {
fail(err, w, r)
return
}
io.WriteString(w, "d")
writeBencoded(w, "files")
if pq.Infohashes != nil {
for _, infohash := range pq.Infohashes {
torrent, exists, err := conn.FindTorrent(infohash)
if err != nil {
log.Panicf("server: %s", err)
}
if exists {
writeBencoded(w, infohash)
writeScrapeInfo(w, torrent)
}
}
} else if infohash, exists := pq.Params["info_hash"]; exists {
torrent, exists, err := conn.FindTorrent(infohash)
conn, err := s.trackerPool.Get()
if err != nil {
fail(err, w, r)
}
if s.conf.Private {
_, err = conn.FindUser(scrape.Passkey)
if err != nil {
log.Panicf("server: %s", err)
}
if exists {
writeBencoded(w, infohash)
writeScrapeInfo(w, torrent)
fail(err, w, r)
return
}
}
io.WriteString(w, "e")
var torrents []*models.Torrent
for _, infohash := range scrape.Infohashes {
torrent, err := conn.FindTorrent(infohash)
if err != nil {
fail(err, w, r)
return
}
torrents = append(torrents, torrent)
}
bencoder := bencode.NewEncoder(w)
bencoder.Encode("d")
bencoder.Encode("files")
for _, torrent := range torrents {
writeTorrentStatus(w, torrent)
}
bencoder.Encode("e")
log.V(3).Infof("chihaya: handled scrape from %s", r.RemoteAddr)
w.(http.Flusher).Flush()
}
func writeScrapeInfo(w io.Writer, torrent *storage.Torrent) {
io.WriteString(w, "d")
writeBencoded(w, "complete")
writeBencoded(w, len(torrent.Seeders))
writeBencoded(w, "downloaded")
writeBencoded(w, torrent.Snatches)
writeBencoded(w, "incomplete")
writeBencoded(w, len(torrent.Leechers))
io.WriteString(w, "e")
func writeTorrentStatus(w io.Writer, t *models.Torrent) {
bencoder := bencode.NewEncoder(w)
bencoder.Encode("t.Infohash")
bencoder.Encode("d")
bencoder.Encode("complete")
bencoder.Encode(len(t.Seeders))
bencoder.Encode("downloaded")
bencoder.Encode(t.Snatches)
bencoder.Encode("incomplete")
bencoder.Encode(len(t.Leechers))
bencoder.Encode("e")
}

View File

@@ -10,8 +10,8 @@ import (
"testing"
"github.com/chihaya/chihaya/config"
_ "github.com/chihaya/chihaya/storage/backend/mock"
_ "github.com/chihaya/chihaya/storage/tracker/mock"
_ "github.com/chihaya/chihaya/drivers/backend/mock"
_ "github.com/chihaya/chihaya/drivers/tracker/mock"
)
func newTestServer() (*Server, error) {

View File

@@ -8,7 +8,6 @@ package server
import (
"errors"
"io"
"log"
"net"
"net/http"
"path"
@@ -17,11 +16,11 @@ import (
"time"
"github.com/etix/stoppableListener"
log "github.com/golang/glog"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/storage"
"github.com/chihaya/chihaya/storage/backend"
"github.com/chihaya/chihaya/storage/tracker"
"github.com/chihaya/chihaya/drivers/backend"
"github.com/chihaya/chihaya/drivers/tracker"
)
// Server represents BitTorrent tracker server.
@@ -53,11 +52,6 @@ func New(conf *config.Config) (*Server, error) {
return nil, err
}
err = backendConn.Start()
if err != nil {
return nil, err
}
s := &Server{
conf: conf,
trackerPool: trackerPool,
@@ -130,42 +124,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
func fail(err error, w http.ResponseWriter, r *http.Request) {
errmsg := err.Error()
log.Println("handled failure: " + errmsg)
msg := "d14:failure reason" + strconv.Itoa(len(errmsg)) + ":" + errmsg + "e"
length, _ := io.WriteString(w, msg)
w.Header().Add("Content-Length", string(length))
log.V(2).Infof("chihaya: handled failure: %s from %s ", errmsg, r.RemoteAddr)
w.(http.Flusher).Flush()
}
func validateUser(conn tracker.Conn, dir string) (*storage.User, error) {
if len(dir) != 34 {
return nil, errors.New("passkey is invalid")
}
passkey := dir[1:33]
user, exists, err := conn.FindUser(passkey)
if err != nil {
log.Panicf("server: %s", err)
}
if !exists {
return nil, errors.New("user not found")
}
return user, nil
}
// parsePeerID returns the clientID for a given peerID.
func parsePeerID(peerID string) (clientID string) {
length := len(peerID)
if length >= 6 {
if peerID[0] == '-' {
if length >= 7 {
clientID = peerID[1:7]
}
} else {
clientID = peerID[0:6]
}
}
return
}

View File

@@ -5,62 +5,14 @@
package server
import (
"testing"
"github.com/chihaya/chihaya/drivers/backend"
"github.com/chihaya/chihaya/drivers/tracker"
)
type PeerClientPair struct {
peerID string
clientID string
}
var TestClients = []PeerClientPair{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"},
{"-AR6360-6oZyyMWoOOBe", "AR6360"},
{"-AG2083-s1hiF8vGAAg0", "AG2083"},
{"-AG3003-lEl2Mm4NEO4n", "AG3003"},
{"-MR1100-00HS~T7*65rm", "MR1100"},
{"-LK0140-ATIV~nbEQAMr", "LK0140"},
{"-KT2210-347143496631", "KT2210"},
{"-TR0960-6ep6svaa61r4", "TR0960"},
{"-XX1150-dv220cotgj4d", "XX1150"},
{"-AZ2504-192gwethivju", "AZ2504"},
{"-KT4310-3L4UvarKuqIu", "KT4310"},
{"-AZ2060-0xJQ02d4309O", "AZ2060"},
{"-BD0300-2nkdf08Jd890", "BD0300"},
{"-A~0010-a9mn9DFkj39J", "A~0010"},
{"-UT2300-MNu93JKnm930", "UT2300"},
{"-UT2300-KT4310KT4301", "UT2300"},
{"T03A0----f089kjsdf6e", "T03A0-"},
{"S58B-----nKl34GoNb75", "S58B--"},
{"M4-4-0--9aa757Efd5Bl", "M4-4-0"},
{"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant
{"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet
{"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet
{"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT
{"OP1011affbecbfabeefb", "OP1011"}, // Opera
{"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey
{"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels
{"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee
{"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt
{"346------SDFknl33408", "346---"}, // TorreTopia
{"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod
{"", ""},
{"-", ""},
{"12345", ""},
{"-12345", ""},
{"123456", "123456"},
{"-123456", "123456"},
}
func TestParseClientID(t *testing.T) {
for _, pair := range TestClients {
if parsedID := parsePeerID(pair.peerID); parsedID != pair.clientID {
t.Error("Incorrectly parsed peer ID", pair.peerID, "as", parsedID)
}
}
// Primer represents a function that can prime drivers with data.
type Primer func(tracker.Pool, backend.Conn) error
// Prime executes a priming function on the server.
func (s *Server) Prime(p Primer) error {
return p(s.trackerPool, s.backendConn)
}