unpkg/server/IngestLogsWorker.js

266 lines
8.2 KiB
JavaScript
Raw Normal View History

2017-05-20 05:41:24 +00:00
require('isomorphic-fetch')
const parseURL = require('url').parse
const invariant = require('invariant')
const gunzip = require('gunzip-maybe')
const ndjson = require('ndjson')
const redis = require('redis')
2017-05-20 05:41:24 +00:00
const CloudflareEmail = process.env.CLOUDFLARE_EMAIL
const CloudflareKey = process.env.CLOUDFLARE_KEY
const RedisURL = process.env.REDIS_URL
2017-05-20 05:41:24 +00:00
invariant(
CloudflareEmail,
'Missing the $CLOUDFLARE_EMAIL environment variable'
)
invariant(
CloudflareKey,
'Missing the $CLOUDFLARE_KEY environment variable'
)
invariant(
RedisURL,
'Missing the $REDIS_URL environment variable'
2017-05-20 05:41:24 +00:00
)
2017-05-23 22:00:09 +00:00
/**
* Domains we want to analyze.
*/
const DomainNames = [
'unpkg.com'
2017-05-24 23:51:03 +00:00
//'npmcdn.com' // We don't have log data on npmcdn.com yet :/
2017-05-23 22:00:09 +00:00
]
/**
* The window of time to download in a single fetch.
*/
const LogWindowSeconds = 30
const db = redis.createClient(RedisURL)
2017-05-20 05:41:24 +00:00
const getZones = (domain) =>
fetch(`https://api.cloudflare.com/client/v4/zones?name=${domain}`, {
method: 'GET',
headers: {
'X-Auth-Email': CloudflareEmail,
'X-Auth-Key': CloudflareKey
}
}).then(res => res.json())
.then(data => data.result)
const getLogs = (zoneId, startTime, endTime) =>
fetch(`https://api.cloudflare.com/client/v4/zones/${zoneId}/logs/requests?start=${startTime}&end=${endTime}`, {
method: 'GET',
headers: {
'X-Auth-Email': CloudflareEmail,
'X-Auth-Key': CloudflareKey,
'Accept-Encoding': 'gzip'
}
}).then(res => res.body.pipe(gunzip()))
const toSeconds = (millis) =>
Math.floor(millis / 1000)
const stringifySeconds = (seconds) =>
new Date(seconds * 1000).toISOString()
2017-05-20 05:41:24 +00:00
// TODO: Copied from express-unpkg, use the same function
const URLFormat = /^\/((?:@[^\/@]+\/)?[^\/@]+)(?:@([^\/]+))?(\/.*)?$/
const getPackageName = (pathname) => {
const match = URLFormat.exec(pathname)
return match && match[1]
}
const oneSecond = 1000
const oneMinute = oneSecond * 60
const oneHour = oneMinute * 60
2017-05-24 19:25:08 +00:00
const computeCounters = (stream) =>
2017-05-20 05:41:24 +00:00
new Promise((resolve, reject) => {
const counters = {}
2017-05-24 19:25:08 +00:00
const incrCounter = (counterName, by = 1) =>
counters[counterName] = (counters[counterName] || 0) + by
2017-05-23 22:00:09 +00:00
2017-05-24 19:25:08 +00:00
const incrCounterMember = (counterName, member, by = 1) => {
counters[counterName] = counters[counterName] || {}
counters[counterName][member] = (counters[counterName][member] || 0) + by
}
2017-05-23 22:00:09 +00:00
stream
.pipe(ndjson.parse())
.on('error', reject)
.on('data', entry => {
const date = new Date(Math.round(entry.timestamp / 1000000))
const dayKey = `${date.getUTCFullYear()}-${date.getUTCMonth()}-${date.getUTCDate()}`
const hourKey = `${dayKey}-${date.getUTCHours()}`
2017-05-24 23:51:03 +00:00
// const minuteKey = `${hourKey}-${date.getUTCMinutes()}`
2017-05-23 22:00:09 +00:00
// Q: How many requests do we receive per day/hour/minute?
2017-05-24 23:51:03 +00:00
// incrCounter(`stats-requests-${dayKey}`) // Done by ingest_stats worker
// incrCounter(`stats-requests-${hourKey}`) // Done by ingest_stats worker
// incrCounter(`stats-requests-${minuteKey}`) // Done by ingest_stats worker
2017-05-23 22:00:09 +00:00
// Q: How many requests are served by origin/cache/edge per day/hour?
2017-05-23 22:00:09 +00:00
if (entry.origin) {
2017-05-24 19:25:08 +00:00
incrCounter(`stats-originRequests-${dayKey}`)
incrCounter(`stats-originRequests-${hourKey}`)
2017-05-23 22:00:09 +00:00
} else if (entry.cache) {
2017-05-24 19:25:08 +00:00
incrCounter(`stats-cacheRequests-${dayKey}`)
incrCounter(`stats-cacheRequests-${hourKey}`)
2017-05-23 22:00:09 +00:00
} else {
2017-05-24 19:25:08 +00:00
incrCounter(`stats-edgeRequests-${dayKey}`)
incrCounter(`stats-edgeRequests-${hourKey}`)
2017-05-23 22:00:09 +00:00
}
const clientRequest = entry.clientRequest
const edgeResponse = entry.edgeResponse
2017-05-23 22:00:09 +00:00
// Q: How many requests do we receive for a package per day?
// Q: How many bytes do we serve for a package per day?
2017-05-23 22:00:09 +00:00
const uri = clientRequest.uri
const package = getPackageName(parseURL(uri).pathname)
if (package) {
2017-05-24 19:25:08 +00:00
incrCounterMember(`stats-packageRequests-${dayKey}`, package)
incrCounterMember(`stats-packageBytes-${dayKey}`, package, edgeResponse.bytes)
}
2017-05-23 22:00:09 +00:00
// Q: How many requests per day do we receive via each protocol?
const protocol = clientRequest.httpProtocol
if (protocol)
2017-05-24 19:25:08 +00:00
incrCounterMember(`stats-protocolRequests-${dayKey}`, protocol)
2017-05-23 22:00:09 +00:00
// Q: How many requests do we receive from a hostname per day?
// Q: How many bytes do we serve to a hostname per day?
2017-05-23 22:00:09 +00:00
const referer = clientRequest.referer
2017-05-23 23:06:49 +00:00
const hostname = referer && parseURL(referer).hostname
2017-05-23 22:00:09 +00:00
if (hostname) {
2017-05-24 19:25:08 +00:00
incrCounterMember(`stats-hostnameRequests-${dayKey}`, hostname)
incrCounterMember(`stats-hostnameBytes-${dayKey}`, hostname, edgeResponse.bytes)
}
2017-05-23 22:00:09 +00:00
})
.on('end', () => {
resolve(counters)
2017-05-23 22:00:09 +00:00
})
})
const processLogs = (stream) =>
2017-05-24 19:25:08 +00:00
computeCounters(stream).then(counters => {
Object.keys(counters).forEach(key => {
const value = counters[key]
if (typeof value === 'number') {
// Simple counter.
db.incrby(key, value)
} else {
// Sorted set.
Object.keys(value).forEach(member => {
db.zincrby(key, value[member], member)
})
}
2017-05-23 22:00:09 +00:00
})
})
const ingestLogs = (zone, startSeconds, endSeconds) =>
new Promise(resolve => {
2017-05-20 05:41:24 +00:00
console.log(
2017-05-23 22:00:09 +00:00
'LOG: start ingesting logs for %s from %s to %s',
2017-05-20 05:41:24 +00:00
zone.name,
stringifySeconds(startSeconds),
stringifySeconds(endSeconds)
)
const startFetchTime = Date.now()
2017-05-23 22:00:09 +00:00
resolve(
getLogs(zone.id, startSeconds, endSeconds).then(stream => {
const endFetchTime = Date.now()
2017-05-20 05:41:24 +00:00
2017-05-23 22:00:09 +00:00
console.log(
'LOG: fetched %ds worth of logs for %s in %dms',
endSeconds - startSeconds,
zone.name,
endFetchTime - startFetchTime
)
2017-05-20 05:41:24 +00:00
2017-05-23 22:00:09 +00:00
const startProcessTime = Date.now()
2017-05-20 05:41:24 +00:00
2017-05-23 22:00:09 +00:00
return processLogs(stream).then(() => {
const endProcessTime = Date.now()
2017-05-20 05:41:24 +00:00
console.log(
2017-05-23 22:00:09 +00:00
'LOG: processed %ds worth of logs for %s in %dms',
endSeconds - startSeconds,
2017-05-20 05:41:24 +00:00
zone.name,
2017-05-23 22:00:09 +00:00
endProcessTime - startProcessTime
2017-05-20 05:41:24 +00:00
)
})
2017-05-23 22:00:09 +00:00
})
)
2017-05-20 05:41:24 +00:00
})
2017-05-23 22:00:09 +00:00
const startZone = (zone) => {
const startSecondsKey = `ingestLogsWorker-nextStartSeconds-${zone.name.replace('.', '-')}`
2017-05-20 05:41:24 +00:00
const takeATurn = () => {
db.get(startSecondsKey, (error, value) => {
let startSeconds = value && parseInt(value, 10)
2017-05-20 05:41:24 +00:00
const now = Date.now()
// Cloudflare keeps logs around for 72 hours.
2017-05-23 22:00:09 +00:00
// https://support.cloudflare.com/hc/en-us/articles/216672448-Enterprise-Log-Share-REST-API
2017-05-20 05:41:24 +00:00
const minSeconds = toSeconds(now - oneHour * 72)
if (startSeconds == null) {
startSeconds = minSeconds
} else if (startSeconds < minSeconds) {
console.warn(
2017-05-23 22:00:09 +00:00
'WARNING: dropped logs for %s from %s to %s!',
2017-05-20 05:41:24 +00:00
zone.name,
stringifySeconds(startSeconds),
stringifySeconds(minSeconds)
)
startSeconds = minSeconds
}
2017-05-23 22:00:09 +00:00
// The log for a request is typically available within thirty (30) minutes
// of the request taking place under normal conditions. We deliver logs
// ordered by the time that the logs were created, i.e. the timestamp of
// the request when it was received by the edge. Given the order of
// delivery, we recommend waiting a full thirty minutes to ingest a full
// set of logs. This will help ensure that any congestion in the log
// pipeline has passed and a full set of logs can be ingested.
// https://support.cloudflare.com/hc/en-us/articles/216672448-Enterprise-Log-Share-REST-API
const maxSeconds = toSeconds(now - (oneMinute * 30))
2017-05-23 22:00:09 +00:00
if (startSeconds < maxSeconds) {
const endSeconds = startSeconds + LogWindowSeconds
2017-05-20 05:41:24 +00:00
ingestLogs(zone, startSeconds, endSeconds).then(() => {
db.set(startSecondsKey, endSeconds)
2017-05-23 22:00:09 +00:00
setTimeout(takeATurn)
}, error => {
console.error(error.stack)
process.exit(1)
2017-05-20 05:41:24 +00:00
})
} else {
2017-05-23 22:00:09 +00:00
setTimeout(takeATurn, (startSeconds - maxSeconds) * 1000)
2017-05-20 05:41:24 +00:00
}
})
}
takeATurn()
}
2017-05-23 22:00:09 +00:00
Promise.all(DomainNames.map(getZones)).then(results => {
2017-05-20 05:41:24 +00:00
const zones = results.reduce((memo, zones) => memo.concat(zones))
2017-05-23 22:00:09 +00:00
zones.forEach(startZone)
2017-05-20 05:41:24 +00:00
})