bittorrent-tracker/lib/client/http-tracker.js

257 lines
7.3 KiB
JavaScript
Raw Normal View History

2018-10-03 12:44:11 +00:00
const arrayRemove = require('unordered-array-remove')
const bencode = require('bencode')
const compact2string = require('compact2string')
const debug = require('debug')('bittorrent-tracker:http-tracker')
const get = require('simple-get')
2018-10-03 12:44:11 +00:00
const common = require('../common')
const Tracker = require('./tracker')
2018-10-03 12:44:11 +00:00
const HTTP_SCRAPE_SUPPORT = /\/(announce)[^/]*$/
/**
* HTTP torrent tracker client (for an individual tracker)
*
* @param {Client} client parent bittorrent tracker client
* @param {string} announceUrl announce url of tracker
* @param {Object} opts options object
*/
2018-10-03 12:44:11 +00:00
class HTTPTracker extends Tracker {
constructor (client, announceUrl, opts) {
super(client, announceUrl)
2018-10-03 12:44:11 +00:00
const self = this
debug('new http tracker %s', announceUrl)
2018-10-03 12:44:11 +00:00
// Determine scrape url (if http tracker supports it)
self.scrapeUrl = null
2018-10-03 12:44:11 +00:00
const match = self.announceUrl.match(HTTP_SCRAPE_SUPPORT)
if (match) {
const pre = self.announceUrl.slice(0, match.index)
const post = self.announceUrl.slice(match.index + 9)
self.scrapeUrl = `${pre}/scrape${post}`
}
2018-10-03 12:44:11 +00:00
self.cleanupFns = []
self.maybeDestroyCleanup = null
}
2018-10-03 12:44:11 +00:00
announce (opts) {
const self = this
if (self.destroyed) return
2018-10-03 12:44:11 +00:00
const params = Object.assign({}, opts, {
compact: (opts.compact == null) ? 1 : opts.compact,
info_hash: self.client._infoHashBinary,
peer_id: self.client._peerIdBinary,
port: self.client._port
})
if (self._trackerId) params.trackerid = self._trackerId
2015-07-29 08:47:09 +00:00
2018-10-03 12:44:11 +00:00
self._request(self.announceUrl, params, (err, data) => {
if (err) return self.client.emit('warning', err)
self._onAnnounceResponse(data)
})
}
2018-10-03 12:44:11 +00:00
scrape (opts) {
const self = this
if (self.destroyed) return
2018-10-03 12:44:11 +00:00
if (!self.scrapeUrl) {
self.client.emit('error', new Error(`scrape not supported ${self.announceUrl}`))
return
}
2018-10-03 12:44:11 +00:00
const infoHashes = (Array.isArray(opts.infoHash) && opts.infoHash.length > 0)
? opts.infoHash.map(infoHash => {
return infoHash.toString('binary')
})
: (opts.infoHash && opts.infoHash.toString('binary')) || self.client._infoHashBinary
const params = {
info_hash: infoHashes
}
2018-10-03 12:44:11 +00:00
self._request(self.scrapeUrl, params, (err, data) => {
if (err) return self.client.emit('warning', err)
self._onScrapeResponse(data)
})
}
2018-10-03 12:44:11 +00:00
destroy (cb) {
const self = this
if (self.destroyed) return cb(null)
self.destroyed = true
clearInterval(self.interval)
2018-10-03 12:44:11 +00:00
// If there are no pending requests, destroy immediately.
if (self.cleanupFns.length === 0) return destroyCleanup()
2018-10-03 12:44:11 +00:00
// Otherwise, wait a short time for pending requests to complete, then force
// destroy them.
var timeout = setTimeout(destroyCleanup, common.DESTROY_TIMEOUT)
2018-10-03 12:44:11 +00:00
// But, if all pending requests complete before the timeout fires, do cleanup
// right away.
self.maybeDestroyCleanup = () => {
if (self.cleanupFns.length === 0) destroyCleanup()
}
2015-07-29 08:47:09 +00:00
2018-10-03 12:44:11 +00:00
function destroyCleanup () {
if (timeout) {
clearTimeout(timeout)
timeout = null
}
self.maybeDestroyCleanup = null
self.cleanupFns.slice(0).forEach(cleanup => {
cleanup()
})
self.cleanupFns = []
cb(null)
}
2018-10-03 12:44:11 +00:00
}
2018-10-03 12:44:11 +00:00
_request (requestUrl, params, cb) {
const self = this
const u = requestUrl + (!requestUrl.includes('?') ? '?' : '&') +
common.querystringStringify(params)
self.cleanupFns.push(cleanup)
let request = get.concat({
url: u,
timeout: common.REQUEST_TIMEOUT,
headers: {
'user-agent': self.client._userAgent || ''
}
}, onResponse)
function cleanup () {
if (request) {
arrayRemove(self.cleanupFns, self.cleanupFns.indexOf(cleanup))
request.abort()
request = null
}
if (self.maybeDestroyCleanup) self.maybeDestroyCleanup()
}
2018-10-03 12:44:11 +00:00
function onResponse (err, res, data) {
cleanup()
if (self.destroyed) return
if (err) return cb(err)
if (res.statusCode !== 200) {
return cb(new Error(`Non-200 response code ${res.statusCode} from ${self.announceUrl}`))
}
if (!data || data.length === 0) {
return cb(new Error(`Invalid tracker response from${self.announceUrl}`))
}
try {
data = bencode.decode(data)
} catch (err) {
return cb(new Error(`Error decoding tracker response: ${err.message}`))
}
const failure = data['failure reason']
if (failure) {
debug(`failure from ${requestUrl} (${failure})`)
return cb(new Error(failure))
}
const warning = data['warning message']
if (warning) {
debug(`warning from ${requestUrl} (${warning})`)
self.client.emit('warning', new Error(warning))
}
debug(`response from ${requestUrl}`)
cb(null, data)
}
}
2018-10-03 12:44:11 +00:00
_onAnnounceResponse (data) {
const self = this
2018-10-03 12:44:11 +00:00
const interval = data.interval || data['min interval']
if (interval) self.setInterval(interval * 1000)
2018-10-03 12:44:11 +00:00
const trackerId = data['tracker id']
if (trackerId) {
// If absent, do not discard previous trackerId value
self._trackerId = trackerId
}
2018-10-03 12:44:11 +00:00
const response = Object.assign({}, data, {
announce: self.announceUrl,
infoHash: common.binaryToHex(data.info_hash)
})
2018-10-03 12:44:11 +00:00
self.client.emit('update', response)
let addrs
if (Buffer.isBuffer(data.peers)) {
// tracker returned compact response
try {
addrs = compact2string.multi(data.peers)
} catch (err) {
return self.client.emit('warning', err)
}
addrs.forEach(addr => {
self.client.emit('peer', addr)
})
} else if (Array.isArray(data.peers)) {
// tracker returned normal response
data.peers.forEach(peer => {
self.client.emit('peer', `${peer.ip}:${peer.port}`)
})
}
2018-10-03 12:44:11 +00:00
if (Buffer.isBuffer(data.peers6)) {
// tracker returned compact response
try {
addrs = compact2string.multi6(data.peers6)
} catch (err) {
return self.client.emit('warning', err)
}
addrs.forEach(addr => {
self.client.emit('peer', addr)
})
} else if (Array.isArray(data.peers6)) {
// tracker returned normal response
data.peers6.forEach(peer => {
const ip = /^\[/.test(peer.ip) || !/:/.test(peer.ip)
? peer.ip /* ipv6 w/ brackets or domain name */
: `[${peer.ip}]` /* ipv6 without brackets */
self.client.emit('peer', `${ip}:${peer.port}`)
})
}
}
2018-10-03 12:44:11 +00:00
_onScrapeResponse (data) {
const self = this
// NOTE: the unofficial spec says to use the 'files' key, 'host' has been
// seen in practice
data = data.files || data.host || {}
2018-10-03 12:44:11 +00:00
const keys = Object.keys(data)
if (keys.length === 0) {
self.client.emit('warning', new Error('invalid scrape response'))
return
}
2018-10-03 12:44:11 +00:00
keys.forEach(infoHash => {
// TODO: optionally handle data.flags.min_request_interval
// (separate from announce interval)
const response = Object.assign(data[infoHash], {
announce: self.announceUrl,
infoHash: common.binaryToHex(infoHash)
})
self.client.emit('scrape', response)
})
2018-10-03 12:44:11 +00:00
}
}
2018-10-03 12:44:11 +00:00
HTTPTracker.prototype.DEFAULT_ANNOUNCE_INTERVAL = 30 * 60 * 1000 // 30 minutes
module.exports = HTTPTracker