Move workers into server dir
This commit is contained in:
265
server/IngestLogsWorker.js
Normal file
265
server/IngestLogsWorker.js
Normal file
@ -0,0 +1,265 @@
|
||||
require('isomorphic-fetch')
|
||||
const parseURL = require('url').parse
|
||||
const invariant = require('invariant')
|
||||
const gunzip = require('gunzip-maybe')
|
||||
const ndjson = require('ndjson')
|
||||
const redis = require('redis')
|
||||
|
||||
const CloudflareEmail = process.env.CLOUDFLARE_EMAIL
|
||||
const CloudflareKey = process.env.CLOUDFLARE_KEY
|
||||
const RedisURL = process.env.REDIS_URL
|
||||
|
||||
invariant(
|
||||
CloudflareEmail,
|
||||
'Missing the $CLOUDFLARE_EMAIL environment variable'
|
||||
)
|
||||
|
||||
invariant(
|
||||
CloudflareKey,
|
||||
'Missing the $CLOUDFLARE_KEY environment variable'
|
||||
)
|
||||
|
||||
invariant(
|
||||
RedisURL,
|
||||
'Missing the $REDIS_URL environment variable'
|
||||
)
|
||||
|
||||
/**
|
||||
* Domains we want to analyze.
|
||||
*/
|
||||
const DomainNames = [
|
||||
'unpkg.com'
|
||||
//'npmcdn.com' // We don't have log data on npmcdn.com yet :/
|
||||
]
|
||||
|
||||
/**
|
||||
* The window of time to download in a single fetch.
|
||||
*/
|
||||
const LogWindowSeconds = 30
|
||||
|
||||
const db = redis.createClient(RedisURL)
|
||||
|
||||
const getZones = (domain) =>
|
||||
fetch(`https://api.cloudflare.com/client/v4/zones?name=${domain}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'X-Auth-Email': CloudflareEmail,
|
||||
'X-Auth-Key': CloudflareKey
|
||||
}
|
||||
}).then(res => res.json())
|
||||
.then(data => data.result)
|
||||
|
||||
const getLogs = (zoneId, startTime, endTime) =>
|
||||
fetch(`https://api.cloudflare.com/client/v4/zones/${zoneId}/logs/requests?start=${startTime}&end=${endTime}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'X-Auth-Email': CloudflareEmail,
|
||||
'X-Auth-Key': CloudflareKey,
|
||||
'Accept-Encoding': 'gzip'
|
||||
}
|
||||
}).then(res => res.body.pipe(gunzip()))
|
||||
|
||||
const toSeconds = (millis) =>
|
||||
Math.floor(millis / 1000)
|
||||
|
||||
const stringifySeconds = (seconds) =>
|
||||
new Date(seconds * 1000).toISOString()
|
||||
|
||||
// TODO: Copied from express-unpkg, use the same function
|
||||
const URLFormat = /^\/((?:@[^\/@]+\/)?[^\/@]+)(?:@([^\/]+))?(\/.*)?$/
|
||||
|
||||
const getPackageName = (pathname) => {
|
||||
const match = URLFormat.exec(pathname)
|
||||
return match && match[1]
|
||||
}
|
||||
|
||||
const oneSecond = 1000
|
||||
const oneMinute = oneSecond * 60
|
||||
const oneHour = oneMinute * 60
|
||||
|
||||
const computeCounters = (stream) =>
|
||||
new Promise((resolve, reject) => {
|
||||
const counters = {}
|
||||
|
||||
const incrCounter = (counterName, by = 1) =>
|
||||
counters[counterName] = (counters[counterName] || 0) + by
|
||||
|
||||
const incrCounterMember = (counterName, member, by = 1) => {
|
||||
counters[counterName] = counters[counterName] || {}
|
||||
counters[counterName][member] = (counters[counterName][member] || 0) + by
|
||||
}
|
||||
|
||||
stream
|
||||
.pipe(ndjson.parse())
|
||||
.on('error', reject)
|
||||
.on('data', entry => {
|
||||
const date = new Date(Math.round(entry.timestamp / 1000000))
|
||||
const dayKey = `${date.getUTCFullYear()}-${date.getUTCMonth()}-${date.getUTCDate()}`
|
||||
const hourKey = `${dayKey}-${date.getUTCHours()}`
|
||||
// const minuteKey = `${hourKey}-${date.getUTCMinutes()}`
|
||||
|
||||
// Q: How many requests do we receive per day/hour/minute?
|
||||
// incrCounter(`stats-requests-${dayKey}`) // Done by ingest_stats worker
|
||||
// incrCounter(`stats-requests-${hourKey}`) // Done by ingest_stats worker
|
||||
// incrCounter(`stats-requests-${minuteKey}`) // Done by ingest_stats worker
|
||||
|
||||
// Q: How many requests are served by origin/cache/edge per day/hour?
|
||||
if (entry.origin) {
|
||||
incrCounter(`stats-originRequests-${dayKey}`)
|
||||
incrCounter(`stats-originRequests-${hourKey}`)
|
||||
} else if (entry.cache) {
|
||||
incrCounter(`stats-cacheRequests-${dayKey}`)
|
||||
incrCounter(`stats-cacheRequests-${hourKey}`)
|
||||
} else {
|
||||
incrCounter(`stats-edgeRequests-${dayKey}`)
|
||||
incrCounter(`stats-edgeRequests-${hourKey}`)
|
||||
}
|
||||
|
||||
const clientRequest = entry.clientRequest
|
||||
const edgeResponse = entry.edgeResponse
|
||||
|
||||
// Q: How many requests do we receive for a package per day?
|
||||
// Q: How many bytes do we serve for a package per day?
|
||||
const uri = clientRequest.uri
|
||||
const package = getPackageName(parseURL(uri).pathname)
|
||||
|
||||
if (package) {
|
||||
incrCounterMember(`stats-packageRequests-${dayKey}`, package)
|
||||
incrCounterMember(`stats-packageBytes-${dayKey}`, package, edgeResponse.bytes)
|
||||
}
|
||||
|
||||
// Q: How many requests per day do we receive via each protocol?
|
||||
const protocol = clientRequest.httpProtocol
|
||||
|
||||
if (protocol)
|
||||
incrCounterMember(`stats-protocolRequests-${dayKey}`, protocol)
|
||||
|
||||
// Q: How many requests do we receive from a hostname per day?
|
||||
// Q: How many bytes do we serve to a hostname per day?
|
||||
const referer = clientRequest.referer
|
||||
const hostname = referer && parseURL(referer).hostname
|
||||
|
||||
if (hostname) {
|
||||
incrCounterMember(`stats-hostnameRequests-${dayKey}`, hostname)
|
||||
incrCounterMember(`stats-hostnameBytes-${dayKey}`, hostname, edgeResponse.bytes)
|
||||
}
|
||||
})
|
||||
.on('end', () => {
|
||||
resolve(counters)
|
||||
})
|
||||
})
|
||||
|
||||
const processLogs = (stream) =>
|
||||
computeCounters(stream).then(counters => {
|
||||
Object.keys(counters).forEach(key => {
|
||||
const value = counters[key]
|
||||
|
||||
if (typeof value === 'number') {
|
||||
// Simple counter.
|
||||
db.incrby(key, value)
|
||||
} else {
|
||||
// Sorted set.
|
||||
Object.keys(value).forEach(member => {
|
||||
db.zincrby(key, value[member], member)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const ingestLogs = (zone, startSeconds, endSeconds) =>
|
||||
new Promise(resolve => {
|
||||
console.log(
|
||||
'LOG: start ingesting logs for %s from %s to %s',
|
||||
zone.name,
|
||||
stringifySeconds(startSeconds),
|
||||
stringifySeconds(endSeconds)
|
||||
)
|
||||
|
||||
const startFetchTime = Date.now()
|
||||
|
||||
resolve(
|
||||
getLogs(zone.id, startSeconds, endSeconds).then(stream => {
|
||||
const endFetchTime = Date.now()
|
||||
|
||||
console.log(
|
||||
'LOG: fetched %ds worth of logs for %s in %dms',
|
||||
endSeconds - startSeconds,
|
||||
zone.name,
|
||||
endFetchTime - startFetchTime
|
||||
)
|
||||
|
||||
const startProcessTime = Date.now()
|
||||
|
||||
return processLogs(stream).then(() => {
|
||||
const endProcessTime = Date.now()
|
||||
|
||||
console.log(
|
||||
'LOG: processed %ds worth of logs for %s in %dms',
|
||||
endSeconds - startSeconds,
|
||||
zone.name,
|
||||
endProcessTime - startProcessTime
|
||||
)
|
||||
})
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
const startZone = (zone) => {
|
||||
const startSecondsKey = `ingestLogsWorker-nextStartSeconds-${zone.name.replace('.', '-')}`
|
||||
|
||||
const takeATurn = () => {
|
||||
db.get(startSecondsKey, (error, value) => {
|
||||
let startSeconds = value && parseInt(value, 10)
|
||||
|
||||
const now = Date.now()
|
||||
|
||||
// Cloudflare keeps logs around for 72 hours.
|
||||
// https://support.cloudflare.com/hc/en-us/articles/216672448-Enterprise-Log-Share-REST-API
|
||||
const minSeconds = toSeconds(now - oneHour * 72)
|
||||
|
||||
if (startSeconds == null) {
|
||||
startSeconds = minSeconds
|
||||
} else if (startSeconds < minSeconds) {
|
||||
console.warn(
|
||||
'WARNING: dropped logs for %s from %s to %s!',
|
||||
zone.name,
|
||||
stringifySeconds(startSeconds),
|
||||
stringifySeconds(minSeconds)
|
||||
)
|
||||
|
||||
startSeconds = minSeconds
|
||||
}
|
||||
|
||||
// The log for a request is typically available within thirty (30) minutes
|
||||
// of the request taking place under normal conditions. We deliver logs
|
||||
// ordered by the time that the logs were created, i.e. the timestamp of
|
||||
// the request when it was received by the edge. Given the order of
|
||||
// delivery, we recommend waiting a full thirty minutes to ingest a full
|
||||
// set of logs. This will help ensure that any congestion in the log
|
||||
// pipeline has passed and a full set of logs can be ingested.
|
||||
// https://support.cloudflare.com/hc/en-us/articles/216672448-Enterprise-Log-Share-REST-API
|
||||
const maxSeconds = toSeconds(now - (oneMinute * 30))
|
||||
|
||||
if (startSeconds < maxSeconds) {
|
||||
const endSeconds = startSeconds + LogWindowSeconds
|
||||
|
||||
ingestLogs(zone, startSeconds, endSeconds).then(() => {
|
||||
db.set(startSecondsKey, endSeconds)
|
||||
setTimeout(takeATurn)
|
||||
}, error => {
|
||||
console.error(error.stack)
|
||||
process.exit(1)
|
||||
})
|
||||
} else {
|
||||
setTimeout(takeATurn, (startSeconds - maxSeconds) * 1000)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
takeATurn()
|
||||
}
|
||||
|
||||
Promise.all(DomainNames.map(getZones)).then(results => {
|
||||
const zones = results.reduce((memo, zones) => memo.concat(zones))
|
||||
zones.forEach(startZone)
|
||||
})
|
304
server/IngestStatsWorker.js
Normal file
304
server/IngestStatsWorker.js
Normal file
@ -0,0 +1,304 @@
|
||||
require('isomorphic-fetch')
|
||||
const redis = require('redis')
|
||||
const invariant = require('invariant')
|
||||
const {
|
||||
createDayKey,
|
||||
createHourKey,
|
||||
createMinuteKey
|
||||
} = require('../server/StatsServer')
|
||||
|
||||
const CloudflareEmail = process.env.CLOUDFLARE_EMAIL
|
||||
const CloudflareKey = process.env.CLOUDFLARE_KEY
|
||||
const RedisURL = process.env.REDIS_URL
|
||||
|
||||
invariant(
|
||||
CloudflareEmail,
|
||||
'Missing the $CLOUDFLARE_EMAIL environment variable'
|
||||
)
|
||||
|
||||
invariant(
|
||||
CloudflareKey,
|
||||
'Missing the $CLOUDFLARE_KEY environment variable'
|
||||
)
|
||||
|
||||
invariant(
|
||||
RedisURL,
|
||||
'Missing the $REDIS_URL environment variable'
|
||||
)
|
||||
|
||||
/**
|
||||
* Domains we want to analyze.
|
||||
*/
|
||||
const DomainNames = [
|
||||
'unpkg.com',
|
||||
'npmcdn.com'
|
||||
]
|
||||
|
||||
const db = redis.createClient(RedisURL)
|
||||
|
||||
const getZones = (domain) =>
|
||||
fetch(`https://api.cloudflare.com/client/v4/zones?name=${domain}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'X-Auth-Email': CloudflareEmail,
|
||||
'X-Auth-Key': CloudflareKey
|
||||
}
|
||||
}).then(res => res.json())
|
||||
.then(data => data.result)
|
||||
|
||||
const getZoneAnalyticsDashboard = (zoneId, since) =>
|
||||
fetch(`https://api.cloudflare.com/client/v4/zones/${zoneId}/analytics/dashboard?since=${since}&continuous=true`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'X-Auth-Email': CloudflareEmail,
|
||||
'X-Auth-Key': CloudflareKey
|
||||
}
|
||||
}).then(res => res.json())
|
||||
.then(data => data.result)
|
||||
|
||||
const oneSecond = 1000
|
||||
const oneMinute = oneSecond * 60
|
||||
const oneHour = oneMinute * 60
|
||||
const oneDay = oneHour * 24
|
||||
|
||||
const oneMinuteSeconds = 60
|
||||
const oneHourSeconds = oneMinuteSeconds * 60
|
||||
const oneDaySeconds = oneHourSeconds * 24
|
||||
|
||||
const reduceResults = (memo, results) => {
|
||||
Object.keys(results).forEach(key => {
|
||||
const value = results[key]
|
||||
|
||||
if (typeof value === 'object' && value) {
|
||||
memo[key] = reduceResults(memo[key] || {}, value)
|
||||
} else if (typeof value === 'number') {
|
||||
memo[key] = (memo[key] || 0) + results[key]
|
||||
}
|
||||
})
|
||||
|
||||
return memo
|
||||
}
|
||||
|
||||
const ingestStatsForZones = (zones, since, processDashboard) =>
|
||||
new Promise(resolve => {
|
||||
const zoneNames = zones.map(zone => zone.name).join(', ')
|
||||
|
||||
console.log(
|
||||
'LOG: start ingesting stats for zones %s since %d',
|
||||
zoneNames,
|
||||
since
|
||||
)
|
||||
|
||||
const startFetchTime = Date.now()
|
||||
|
||||
resolve(
|
||||
Promise.all(
|
||||
zones.map(zone => getZoneAnalyticsDashboard(zone.id, since))
|
||||
).then(
|
||||
results => {
|
||||
const endFetchTime = Date.now()
|
||||
|
||||
console.log(
|
||||
'LOG: fetched zone analytics dashboards for %s since %d in %dms',
|
||||
zoneNames,
|
||||
since,
|
||||
endFetchTime - startFetchTime
|
||||
)
|
||||
|
||||
// We don't have per-minute dashboards available for npmcdn.com yet,
|
||||
// so the dashboard for that domain will be null when querying for
|
||||
// per-minute data. Just filter it out here for now.
|
||||
results = results.filter(Boolean)
|
||||
|
||||
return results.length ? results.reduce(reduceResults) : null
|
||||
}
|
||||
).then(
|
||||
dashboard => {
|
||||
if (dashboard == null) {
|
||||
console.warn(
|
||||
'WARNING: missing dashboards for %s since %d',
|
||||
zoneNames,
|
||||
since
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
const startProcessTime = Date.now()
|
||||
|
||||
return processDashboard(dashboard).then(() => {
|
||||
const endProcessTime = Date.now()
|
||||
|
||||
console.log(
|
||||
'LOG: processed zone analytics dashboards for %s since %d in %dms',
|
||||
zoneNames,
|
||||
since,
|
||||
endProcessTime - startProcessTime
|
||||
)
|
||||
})
|
||||
}
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
const ingestPerDayStats = (zones) =>
|
||||
ingestStatsForZones(zones, -10080, processPerDayDashboard)
|
||||
|
||||
const processPerDayDashboard = (dashboard) =>
|
||||
Promise.all(dashboard.timeseries.map(processPerDayTimeseries))
|
||||
|
||||
const processPerDayTimeseries = (ts) =>
|
||||
new Promise(resolve => {
|
||||
const since = new Date(ts.since)
|
||||
const until = new Date(ts.until)
|
||||
|
||||
invariant(
|
||||
since.getUTCHours() === 0 && since.getUTCMinutes() === 0 && since.getUTCSeconds() === 0,
|
||||
'ERROR: per-day timeseries.since must begin exactly on the day'
|
||||
)
|
||||
|
||||
invariant(
|
||||
(until - since) === oneDay,
|
||||
'ERROR: per-day timeseries must span exactly one day'
|
||||
)
|
||||
|
||||
const dayKey = createDayKey(since)
|
||||
|
||||
// Q: How many requests do we serve per day?
|
||||
db.set(`stats-requests-${dayKey}`, ts.requests.all)
|
||||
// Q: How many requests do we serve per day from the cache?
|
||||
db.set(`stats-requestsFromCache-${dayKey}`, ts.requests.cached)
|
||||
|
||||
// Q: How much bandwidth do we serve per day?
|
||||
db.set(`stats-bandwidth-${dayKey}`, ts.bandwidth.all)
|
||||
// Q: How much bandwidth do we serve per day from the cache?
|
||||
db.set(`stats-bandwidthFromCache-${dayKey}`, ts.bandwidth.cached)
|
||||
|
||||
// Q: How many errors do we serve per day?
|
||||
const httpStatus = ts.requests.http_status
|
||||
const errors = Object.keys(httpStatus).reduce((memo, status) => {
|
||||
return parseInt(status, 10) >= 500 ? memo + httpStatus[status] : memo
|
||||
}, 0)
|
||||
|
||||
db.set(`stats-errors-${dayKey}`, errors)
|
||||
|
||||
// Q: How many requests do we serve to a country per day?
|
||||
// Q: How much bandwidth do we serve to a country per day?
|
||||
const requestsByCountry = []
|
||||
const bandwidthByCountry = []
|
||||
|
||||
Object.keys(ts.requests.country).forEach(country => {
|
||||
const requests = ts.requests.country[country]
|
||||
const bandwidth = ts.bandwidth.country[country]
|
||||
|
||||
// Include only countries who made at least 1M requests.
|
||||
if (requests > 1000000) {
|
||||
requestsByCountry.push(requests, country)
|
||||
bandwidthByCountry.push(bandwidth, country)
|
||||
}
|
||||
})
|
||||
|
||||
if (requestsByCountry.length)
|
||||
db.zadd([ `stats-requestsByCountry-${dayKey}`, ...requestsByCountry ])
|
||||
|
||||
if (bandwidthByCountry.length)
|
||||
db.zadd([ `stats-bandwidthByCountry-${dayKey}`, ...bandwidthByCountry ])
|
||||
|
||||
resolve()
|
||||
})
|
||||
|
||||
const ingestPerHourStats = (zones) =>
|
||||
ingestStatsForZones(zones, -1440, processPerHourDashboard)
|
||||
|
||||
const processPerHourDashboard = (dashboard) =>
|
||||
Promise.all(dashboard.timeseries.map(processPerHourTimeseries))
|
||||
|
||||
const processPerHourTimeseries = (ts) =>
|
||||
new Promise(resolve => {
|
||||
const since = new Date(ts.since)
|
||||
const until = new Date(ts.until)
|
||||
|
||||
invariant(
|
||||
since.getUTCMinutes() === 0 && since.getUTCSeconds() === 0,
|
||||
'ERROR: per-hour timeseries.since must begin exactly on the hour'
|
||||
)
|
||||
|
||||
invariant(
|
||||
(until - since) === oneHour,
|
||||
'ERROR: per-hour timeseries must span exactly one hour'
|
||||
)
|
||||
|
||||
const hourKey = createHourKey(since)
|
||||
|
||||
// Q: How many requests do we serve per hour? (expire after 7 days)
|
||||
db.setex(`stats-requests-${hourKey}`, (oneDaySeconds * 7), ts.requests.all)
|
||||
// Q: How many requests do we serve per hour from the cache? (expire after 7 days)
|
||||
db.setex(`stats-requestsFromCache-${hourKey}`, (oneDaySeconds * 7), ts.requests.cached)
|
||||
|
||||
// Q: How much bandwidth do we serve per hour? (expire after 7 days)
|
||||
db.setex(`stats-bandwidth-${hourKey}`, (oneDaySeconds * 7), ts.bandwidth.all)
|
||||
// Q: How much bandwidth do we serve per hour from the cache? (expire after 7 days)
|
||||
db.setex(`stats-bandwidthFromCache-${hourKey}`, (oneDaySeconds * 7), ts.bandwidth.cached)
|
||||
|
||||
resolve()
|
||||
})
|
||||
|
||||
const ingestPerMinuteStats = (zones) =>
|
||||
ingestStatsForZones(zones, -30, processPerMinuteDashboard)
|
||||
|
||||
const processPerMinuteDashboard = (dashboard) =>
|
||||
Promise.all(dashboard.timeseries.map(processPerMinuteTimeseries))
|
||||
|
||||
const processPerMinuteTimeseries = (ts) =>
|
||||
new Promise(resolve => {
|
||||
const since = new Date(ts.since)
|
||||
const until = new Date(ts.until)
|
||||
|
||||
invariant(
|
||||
since.getUTCSeconds() === 0,
|
||||
'ERROR: per-minute timeseries.since must begin exactly on the minute'
|
||||
)
|
||||
|
||||
invariant(
|
||||
(until - since) === oneMinute,
|
||||
'ERROR: per-minute timeseries must span exactly one minute'
|
||||
)
|
||||
|
||||
const minuteKey = createMinuteKey(since)
|
||||
|
||||
// Q: How many requests do we serve per minute? (expire after 1 day)
|
||||
db.setex(`stats-requests-${minuteKey}`, oneDaySeconds, ts.requests.all)
|
||||
// Q: How many requests do we serve per minute from the cache? (expire after 1 day)
|
||||
db.setex(`stats-requestsFromCache-${minuteKey}`, oneDaySeconds, ts.requests.cached)
|
||||
|
||||
// Q: How much bandwidth do we serve per minute? (expire after 1 day)
|
||||
db.setex(`stats-bandwidth-${minuteKey}`, oneDaySeconds, ts.bandwidth.all)
|
||||
// Q: How much bandwidth do we serve per minute from the cache? (expire after 1 day)
|
||||
db.setex(`stats-bandwidthFromCache-${minuteKey}`, oneDaySeconds, ts.bandwidth.cached)
|
||||
|
||||
resolve()
|
||||
})
|
||||
|
||||
const startZones = (zones) => {
|
||||
const takePerMinuteTurn = () =>
|
||||
ingestPerMinuteStats(zones)
|
||||
|
||||
const takePerHourTurn = () =>
|
||||
ingestPerHourStats(zones)
|
||||
|
||||
const takePerDayTurn = () =>
|
||||
ingestPerDayStats(zones)
|
||||
|
||||
takePerMinuteTurn()
|
||||
takePerHourTurn()
|
||||
takePerDayTurn()
|
||||
|
||||
setInterval(takePerMinuteTurn, oneMinute)
|
||||
setInterval(takePerHourTurn, oneHour / 2)
|
||||
setInterval(takePerDayTurn, oneHour / 2)
|
||||
}
|
||||
|
||||
Promise.all(DomainNames.map(getZones)).then(results => {
|
||||
const zones = results.reduce((memo, zones) => memo.concat(zones))
|
||||
startZones(zones)
|
||||
})
|
Reference in New Issue
Block a user