47 Commits

Author SHA1 Message Date
0da1fe064b Add some indices and hacks to the ddl 2026-01-27 11:41:56 +01:00
ad0c84d640 Handle unknown items 2026-01-27 09:25:52 +01:00
ec8a61713f Fix the 100% entirely hallucinated topic depth calls 2026-01-26 15:57:23 +01:00
8ae287f09e Make DB transient 2026-01-26 15:45:42 +01:00
e51efb4289 Run local database 2026-01-26 15:44:17 +01:00
3c6f296927 Add user to ddl 2026-01-26 15:33:56 +01:00
b5aa63928a Increase the backoff
Usually it'll be resource contention or rate limits
So we want to wait a while
2026-01-26 14:53:20 +01:00
c9abd3936c Refactor NConsumers to nsq.go 2026-01-26 14:39:19 +01:00
1fbe478378 Handle 404 and 400 with names 2026-01-26 14:36:36 +01:00
adde43563e Add db polling to readers
So we may let it run over night
2026-01-26 14:31:49 +01:00
61d2d14fb8 Implement name and solar system resolvers as well 2026-01-26 14:23:49 +01:00
02baec430c Start touching messages 2026-01-26 14:15:57 +01:00
e2ff432d57 Update NSQ configuration and improve error handling in resolvers 2026-01-26 13:52:12 +01:00
d1c0a6ace0 Run multiple consumers for resolvers 2026-01-26 13:44:54 +01:00
d9cfceaceb Resolve all the other type information 2026-01-26 13:44:05 +01:00
fb57616d0d Refactor all the constants to lnsq 2026-01-26 13:37:32 +01:00
0f926a139c Fix reading int32s for some readers 2026-01-26 13:29:15 +01:00
dc0796a547 Rename all the readers to MSB 2026-01-26 13:28:34 +01:00
d765c0335a Refactor reader and resolver to a generic interface 2026-01-26 13:24:00 +01:00
f42fb81594 Remove polling, again 2026-01-26 12:59:19 +01:00
5968f0a8bf Great use of json you clanker... 2026-01-26 12:58:04 +01:00
3cdf4036b2 lol... "completed"... 2026-01-26 12:55:51 +01:00
7b2a63b55c Don't poll clickhouse
Duplicates :(
2026-01-26 12:53:25 +01:00
f8ea0232b5 Remove the fucking signals that, again, nobody ever asked for 2026-01-26 12:51:22 +01:00
6a87a8bcda Ditch the memo for now 2026-01-26 12:47:39 +01:00
8ab2a69654 FUCK OFF with the json and remove batching from resolver 2026-01-26 12:45:07 +01:00
3db461a80a Update 2026-01-26 12:32:21 +01:00
201f24a429 Print stats on nsq consumer 2026-01-26 12:30:45 +01:00
43a49c4dbd Add character name readers 2026-01-26 12:22:28 +01:00
61a0c835b0 Fix the db pathing 2026-01-26 12:20:05 +01:00
5b015620c1 Implement item type resolver step 2026-01-26 12:11:43 +01:00
004c273958 Rip clickhouse out of db.go 2026-01-26 12:11:32 +01:00
424d2b9213 cLiEnT WAHBG JUSAGHEJWBFDRIOUASnaefhsbgktw jvskfgaehji bsšućaf 2026-01-26 12:05:03 +01:00
794f0b0d04 Let go of fucking nsqlookupd 2026-01-26 11:57:39 +01:00
b6b7b9ec56 Fix the god damn fucking nsq containers 2026-01-26 11:53:31 +01:00
ecdb41217e Add item type reader 2026-01-26 11:53:26 +01:00
de5bffe9b1 Refactor nsq and clickhouse clients to their individual packages 2026-01-26 11:51:25 +01:00
2448f76c1e Refactor connecting to nsq to separate package 2026-01-26 11:41:15 +01:00
caa1d97617 Refactor connecting to nsq 2026-01-26 11:40:08 +01:00
233c67ff00 Implement stage2 - write into clickhouse 2026-01-26 11:05:15 +01:00
0dfae49f94 Add a simple clickhouse client 2026-01-26 10:27:00 +01:00
2b79c51837 Have stage1 flatten after reading 2026-01-26 09:42:57 +01:00
1bc05dc81d Make pipelines programs on their own 2026-01-26 09:41:30 +01:00
e2e1ebe48f Hallucinate some wires 2026-01-26 09:35:12 +01:00
dfdd74f52e Hallucinate stage 1 disk reader 2026-01-26 09:32:13 +01:00
725d6b8ee3 Move shit around to separate packages 2026-01-26 09:26:53 +01:00
600fc0d7ff Rework the types to separate package 2026-01-26 09:24:39 +01:00
47 changed files with 2509 additions and 2014 deletions

View File

@@ -1,42 +1,15 @@
package main
package analytics
import (
"fmt"
"strings"
"zkillsusser/types"
)
// AnalyticsFilters represents the filter state for analytics queries
type AnalyticsFilters struct {
KillHour []uint8
KillDayOfWeek []uint8
KillDate []string
Month []string
SolarSystemID []int32
RegionName []string
ConstellationName []string
SecurityStatus []string
VictimShipTypeID []int32
VictimShipGroupName []string
VictimShipCategory []string
VictimCharacterName []string
VictimCorporation []string
VictimAlliance []string
AttackerShipType []string
AttackerShipGroup []string
AttackerCharacter []string
AttackerCorporation []string
AttackerAlliance []string
SlotType []string
HasModule *ModuleFilter
}
type ModuleFilter struct {
ModuleID int32
}
type FlatKillmailComplete struct {
FlatKillmail
Items []FlatKillmailItem `json:"items"`
types.FlatKillmail
Items []types.FlatKillmailItem `json:"items"`
}
// Time aggregation results
@@ -188,7 +161,7 @@ type ModuleCoOccurrence struct {
}
// buildWhereClause builds a WHERE clause from filters
func buildWhereClause(filters AnalyticsFilters) (string, []interface{}) {
func buildWhereClause(filters types.AnalyticsFilters) (string, []interface{}) {
var conditions []string
var args []interface{}
@@ -367,7 +340,7 @@ func buildWhereClause(filters AnalyticsFilters) (string, []interface{}) {
}
// buildAttackerWhereClause builds WHERE clause for attacker queries
func buildAttackerWhereClause(filters AnalyticsFilters) (string, []interface{}) {
func buildAttackerWhereClause(filters types.AnalyticsFilters) (string, []interface{}) {
var conditions []string
var args []interface{}
@@ -435,7 +408,7 @@ func buildAttackerWhereClause(filters AnalyticsFilters) (string, []interface{})
}
// getKillmailIDSubquery returns a subquery to filter by killmail_id from killmails table
func getKillmailIDSubquery(filters AnalyticsFilters) (string, []interface{}) {
func getKillmailIDSubquery(filters types.AnalyticsFilters) (string, []interface{}) {
whereClause, args := buildWhereClause(filters)
if whereClause == "" {
return "(SELECT killmail_id FROM zkill.killmails)", args

View File

@@ -1,15 +1,22 @@
package main
package analytics
import (
"context"
"fmt"
"strings"
"zkillsusser/types"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
)
type DBWrapper struct {
ch driver.Conn
}
// Time aggregation queries
func (db *DBWrapper) QueryTimeByHour(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByHour, error) {
func (db *DBWrapper) QueryTimeByHour(ctx context.Context, filters types.AnalyticsFilters) ([]TimeAggregationByHour, error) {
flog := logger.Default.WithPrefix("QueryTimeByHour")
whereClause, args := buildWhereClause(filters)
@@ -43,7 +50,7 @@ func (db *DBWrapper) QueryTimeByHour(ctx context.Context, filters AnalyticsFilte
return results, nil
}
func (db *DBWrapper) QueryTimeByDay(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDay, error) {
func (db *DBWrapper) QueryTimeByDay(ctx context.Context, filters types.AnalyticsFilters) ([]TimeAggregationByDay, error) {
flog := logger.Default.WithPrefix("QueryTimeByDay")
whereClause, args := buildWhereClause(filters)
@@ -85,7 +92,7 @@ func (db *DBWrapper) QueryTimeByDay(ctx context.Context, filters AnalyticsFilter
return results, nil
}
func (db *DBWrapper) QueryTimeByDate(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDate, error) {
func (db *DBWrapper) QueryTimeByDate(ctx context.Context, filters types.AnalyticsFilters) ([]TimeAggregationByDate, error) {
flog := logger.Default.WithPrefix("QueryTimeByDate")
whereClause, args := buildWhereClause(filters)
@@ -119,7 +126,7 @@ func (db *DBWrapper) QueryTimeByDate(ctx context.Context, filters AnalyticsFilte
return results, nil
}
func (db *DBWrapper) QueryTimeByMonth(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByMonth, error) {
func (db *DBWrapper) QueryTimeByMonth(ctx context.Context, filters types.AnalyticsFilters) ([]TimeAggregationByMonth, error) {
flog := logger.Default.WithPrefix("QueryTimeByMonth")
whereClause, args := buildWhereClause(filters)
@@ -153,7 +160,7 @@ func (db *DBWrapper) QueryTimeByMonth(ctx context.Context, filters AnalyticsFilt
}
// Location aggregation queries
func (db *DBWrapper) QueryLocationBySystem(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySystem, error) {
func (db *DBWrapper) QueryLocationBySystem(ctx context.Context, filters types.AnalyticsFilters) ([]LocationAggregationBySystem, error) {
flog := logger.Default.WithPrefix("QueryLocationBySystem")
whereClause, args := buildWhereClause(filters)
@@ -190,7 +197,7 @@ func (db *DBWrapper) QueryLocationBySystem(ctx context.Context, filters Analytic
return results, nil
}
func (db *DBWrapper) QueryLocationByRegion(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByRegion, error) {
func (db *DBWrapper) QueryLocationByRegion(ctx context.Context, filters types.AnalyticsFilters) ([]LocationAggregationByRegion, error) {
flog := logger.Default.WithPrefix("QueryLocationByRegion")
whereClause, args := buildWhereClause(filters)
@@ -224,7 +231,7 @@ func (db *DBWrapper) QueryLocationByRegion(ctx context.Context, filters Analytic
return results, nil
}
func (db *DBWrapper) QueryLocationByConstellation(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByConstellation, error) {
func (db *DBWrapper) QueryLocationByConstellation(ctx context.Context, filters types.AnalyticsFilters) ([]LocationAggregationByConstellation, error) {
flog := logger.Default.WithPrefix("QueryLocationByConstellation")
whereClause, args := buildWhereClause(filters)
@@ -258,7 +265,7 @@ func (db *DBWrapper) QueryLocationByConstellation(ctx context.Context, filters A
return results, nil
}
func (db *DBWrapper) QueryLocationBySecurity(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySecurity, error) {
func (db *DBWrapper) QueryLocationBySecurity(ctx context.Context, filters types.AnalyticsFilters) ([]LocationAggregationBySecurity, error) {
flog := logger.Default.WithPrefix("QueryLocationBySecurity")
whereClause, args := buildWhereClause(filters)
@@ -296,21 +303,21 @@ func (db *DBWrapper) QueryLocationBySecurity(ctx context.Context, filters Analyt
}
// Ship aggregation queries
func (db *DBWrapper) QueryShipByVictim(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByVictimShip, error) {
func (db *DBWrapper) QueryShipByVictim(ctx context.Context, filters types.AnalyticsFilters) ([]ShipAggregationByVictimShip, error) {
flog := logger.Default.WithPrefix("QueryShipByVictim")
whereClause, args := buildWhereClause(filters)
query := fmt.Sprintf(`
SELECT
victim_ship_type_id,
victim_ship_type_name,
victim_ship_group_name,
victim_ship_category_name,
any(victim_ship_type_name) as victim_ship_type_name,
any(victim_ship_group_name) as victim_ship_group_name,
any(victim_ship_category_name) as victim_ship_category_name,
count() as kill_count,
count(DISTINCT victim_character_id) as unique_pilots_killed
FROM zkill.killmails
%s
GROUP BY victim_ship_type_id, victim_ship_type_name, victim_ship_group_name, victim_ship_category_name
GROUP BY victim_ship_type_id
ORDER BY kill_count DESC
`, whereClause)
@@ -332,7 +339,7 @@ func (db *DBWrapper) QueryShipByVictim(ctx context.Context, filters AnalyticsFil
return results, nil
}
func (db *DBWrapper) QueryShipByAttacker(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByAttackerShip, error) {
func (db *DBWrapper) QueryShipByAttacker(ctx context.Context, filters types.AnalyticsFilters) ([]ShipAggregationByAttackerShip, error) {
flog := logger.Default.WithPrefix("QueryShipByAttacker")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
whereClause, whereArgs := buildAttackerWhereClause(filters)
@@ -372,7 +379,7 @@ func (db *DBWrapper) QueryShipByAttacker(ctx context.Context, filters AnalyticsF
}
// Player aggregation queries
func (db *DBWrapper) QueryPlayerByVictimCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCharacter, error) {
func (db *DBWrapper) QueryPlayerByVictimCharacter(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByVictimCharacter, error) {
flog := logger.Default.WithPrefix("QueryPlayerByVictimCharacter")
whereClause, args := buildWhereClause(filters)
@@ -407,7 +414,7 @@ func (db *DBWrapper) QueryPlayerByVictimCharacter(ctx context.Context, filters A
return results, nil
}
func (db *DBWrapper) QueryPlayerByVictimCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCorporation, error) {
func (db *DBWrapper) QueryPlayerByVictimCorporation(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByVictimCorporation, error) {
flog := logger.Default.WithPrefix("QueryPlayerByVictimCorporation")
whereClause, args := buildWhereClause(filters)
@@ -441,7 +448,7 @@ func (db *DBWrapper) QueryPlayerByVictimCorporation(ctx context.Context, filters
return results, nil
}
func (db *DBWrapper) QueryPlayerByVictimAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimAlliance, error) {
func (db *DBWrapper) QueryPlayerByVictimAlliance(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByVictimAlliance, error) {
flog := logger.Default.WithPrefix("QueryPlayerByVictimAlliance")
whereClause, args := buildWhereClause(filters)
@@ -482,7 +489,7 @@ func (db *DBWrapper) QueryPlayerByVictimAlliance(ctx context.Context, filters An
return results, nil
}
func (db *DBWrapper) QueryPlayerByAttackerCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCharacter, error) {
func (db *DBWrapper) QueryPlayerByAttackerCharacter(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByAttackerCharacter, error) {
flog := logger.Default.WithPrefix("QueryPlayerByAttackerCharacter")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
whereClause, whereArgs := buildAttackerWhereClause(filters)
@@ -521,7 +528,7 @@ func (db *DBWrapper) QueryPlayerByAttackerCharacter(ctx context.Context, filters
return results, nil
}
func (db *DBWrapper) QueryPlayerByAttackerCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCorporation, error) {
func (db *DBWrapper) QueryPlayerByAttackerCorporation(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByAttackerCorporation, error) {
flog := logger.Default.WithPrefix("QueryPlayerByAttackerCorporation")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
whereClause, whereArgs := buildAttackerWhereClause(filters)
@@ -558,7 +565,7 @@ func (db *DBWrapper) QueryPlayerByAttackerCorporation(ctx context.Context, filte
return results, nil
}
func (db *DBWrapper) QueryPlayerByAttackerAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerAlliance, error) {
func (db *DBWrapper) QueryPlayerByAttackerAlliance(ctx context.Context, filters types.AnalyticsFilters) ([]PlayerAggregationByAttackerAlliance, error) {
flog := logger.Default.WithPrefix("QueryPlayerByAttackerAlliance")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
whereClause, whereArgs := buildAttackerWhereClause(filters)
@@ -596,7 +603,7 @@ func (db *DBWrapper) QueryPlayerByAttackerAlliance(ctx context.Context, filters
}
// Module aggregation queries
func (db *DBWrapper) QueryModuleBySlotType(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationBySlotType, error) {
func (db *DBWrapper) QueryModuleBySlotType(ctx context.Context, filters types.AnalyticsFilters) ([]ModuleAggregationBySlotType, error) {
flog := logger.Default.WithPrefix("QueryModuleBySlotType")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
@@ -645,7 +652,7 @@ func (db *DBWrapper) QueryModuleBySlotType(ctx context.Context, filters Analytic
return results, nil
}
func (db *DBWrapper) QueryModuleByModule(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationByModule, error) {
func (db *DBWrapper) QueryModuleByModule(ctx context.Context, filters types.AnalyticsFilters) ([]ModuleAggregationByModule, error) {
flog := logger.Default.WithPrefix("QueryModuleByModule")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
@@ -696,7 +703,7 @@ func (db *DBWrapper) QueryModuleByModule(ctx context.Context, filters AnalyticsF
return results, nil
}
func (db *DBWrapper) QueryModuleCoOccurrence(ctx context.Context, filters AnalyticsFilters, selectedModuleID int32, selectedSlot string) ([]ModuleCoOccurrence, error) {
func (db *DBWrapper) QueryModuleCoOccurrence(ctx context.Context, filters types.AnalyticsFilters, selectedModuleID int32, selectedSlot string) ([]ModuleCoOccurrence, error) {
flog := logger.Default.WithPrefix("QueryModuleCoOccurrence")
killmailSubquery, killmailArgs := getKillmailIDSubquery(filters)
@@ -740,7 +747,7 @@ func (db *DBWrapper) QueryModuleCoOccurrence(ctx context.Context, filters Analyt
return results, nil
}
func (db *DBWrapper) QueryKillmailIDs(ctx context.Context, filters AnalyticsFilters, limit, offset int) ([]int64, error) {
func (db *DBWrapper) QueryKillmailIDs(ctx context.Context, filters types.AnalyticsFilters, limit, offset int) ([]int64, error) {
flog := logger.Default.WithPrefix("QueryKillmailIDs")
whereClause, args := buildWhereClause(filters)
@@ -821,11 +828,11 @@ func (db *DBWrapper) QueryKillmailWithItems(ctx context.Context, killmailID int6
defer rows.Close()
var result *FlatKillmailComplete
var items []FlatKillmailItem
var items []types.FlatKillmailItem
for rows.Next() {
var km FlatKillmail
var item FlatKillmailItem
var km types.FlatKillmail
var item types.FlatKillmailItem
var itemTypeID int32
var itemTypeName string
var itemGroupName string
@@ -876,7 +883,7 @@ func (db *DBWrapper) QueryKillmailWithItems(ctx context.Context, killmailID int6
if result == nil {
result = &FlatKillmailComplete{
FlatKillmail: km,
Items: []FlatKillmailItem{},
Items: []types.FlatKillmailItem{},
}
}
@@ -903,4 +910,4 @@ func (db *DBWrapper) QueryKillmailWithItems(ctx context.Context, killmailID int6
result.Items = items
flog.Info("Query returned killmail %d with %d items", killmailID, len(items))
return result, nil
}
}

View File

@@ -1,4 +1,4 @@
package main
package analytics
import (
"context"
@@ -12,7 +12,7 @@ func TestQueryTimeByHour(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryTimeByHour(ctx, filters)
if err != nil {
@@ -40,7 +40,7 @@ func TestQueryTimeByDay(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryTimeByDay(ctx, filters)
if err != nil {
@@ -68,7 +68,7 @@ func TestQueryTimeByDate(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryTimeByDate(ctx, filters)
if err != nil {
@@ -96,7 +96,7 @@ func TestQueryTimeByMonth(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryTimeByMonth(ctx, filters)
if err != nil {
@@ -124,7 +124,7 @@ func TestQueryLocationBySystem(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryLocationBySystem(ctx, filters)
if err != nil {
@@ -152,7 +152,7 @@ func TestQueryLocationByRegion(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryLocationByRegion(ctx, filters)
if err != nil {
@@ -180,7 +180,7 @@ func TestQueryLocationByConstellation(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryLocationByConstellation(ctx, filters)
if err != nil {
@@ -208,7 +208,7 @@ func TestQueryLocationBySecurity(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryLocationBySecurity(ctx, filters)
if err != nil {
@@ -233,7 +233,7 @@ func TestQueryShipByVictim(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryShipByVictim(ctx, filters)
if err != nil {
@@ -261,7 +261,7 @@ func TestQueryShipByAttacker(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryShipByAttacker(ctx, filters)
if err != nil {
@@ -289,7 +289,7 @@ func TestQueryPlayerByVictimCharacter(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByVictimCharacter(ctx, filters)
if err != nil {
@@ -317,7 +317,7 @@ func TestQueryPlayerByVictimCorporation(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByVictimCorporation(ctx, filters)
if err != nil {
@@ -345,7 +345,7 @@ func TestQueryPlayerByVictimAlliance(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByVictimAlliance(ctx, filters)
if err != nil {
@@ -373,7 +373,7 @@ func TestQueryPlayerByAttackerCharacter(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByAttackerCharacter(ctx, filters)
if err != nil {
@@ -401,7 +401,7 @@ func TestQueryPlayerByAttackerCorporation(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByAttackerCorporation(ctx, filters)
if err != nil {
@@ -429,7 +429,7 @@ func TestQueryPlayerByAttackerAlliance(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryPlayerByAttackerAlliance(ctx, filters)
if err != nil {
@@ -457,7 +457,7 @@ func TestQueryModuleBySlotType(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryModuleBySlotType(ctx, filters)
if err != nil {
@@ -482,7 +482,7 @@ func TestQueryModuleByModule(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryModuleByModule(ctx, filters)
if err != nil {
@@ -510,7 +510,7 @@ func TestQueryModuleCoOccurrence(t *testing.T) {
}
ctx := context.Background()
filters := AnalyticsFilters{}
filters := types.AnalyticsFilters{}
results, err := db.QueryModuleCoOccurrence(ctx, filters, 26914, "mid")
if err != nil {

View File

@@ -1,4 +1,4 @@
package main
package api
import (
"encoding/base64"
@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
"time"
"zkillsusser/types"
logger "git.site.quack-lab.dev/dave/cylogger"
)
@@ -22,13 +23,13 @@ type APIStatisticsRequest struct {
}
type APIAnalyticsRequest struct {
Filters AnalyticsFilters `json:"filters"`
Filters types.AnalyticsFilters `json:"filters"`
}
type APIModuleCoOccurrenceRequest struct {
Filters AnalyticsFilters `json:"filters"`
SelectedModuleID int32 `json:"selectedModuleID"`
SelectedSlot string `json:"selectedSlot"`
Filters AnalyticsFilters `json:"filters"`
SelectedModuleID int32 `json:"selectedModuleID"`
SelectedSlot string `json:"selectedSlot"`
}
type APISearchResult struct {
@@ -64,109 +65,109 @@ type APIImageData struct {
// Analytics handlers
func handleAnalyticsTimeByHour(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryTimeByHour(r.Context(), req.Filters)
})
}
func handleAnalyticsTimeByDay(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryTimeByDay(r.Context(), req.Filters)
})
}
func handleAnalyticsTimeByDate(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryTimeByDate(r.Context(), req.Filters)
})
}
func handleAnalyticsTimeByMonth(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryTimeByMonth(r.Context(), req.Filters)
})
}
func handleAnalyticsLocationBySystem(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryLocationBySystem(r.Context(), req.Filters)
})
}
func handleAnalyticsLocationByRegion(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryLocationByRegion(r.Context(), req.Filters)
})
}
func handleAnalyticsLocationByConstellation(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryLocationByConstellation(r.Context(), req.Filters)
})
}
func handleAnalyticsLocationBySecurity(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryLocationBySecurity(r.Context(), req.Filters)
})
}
func handleAnalyticsShipByVictim(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryShipByVictim(r.Context(), req.Filters)
})
}
func handleAnalyticsShipByAttacker(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryShipByAttacker(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByVictimCharacter(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByVictimCharacter(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByVictimCorporation(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByVictimCorporation(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByVictimAlliance(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByVictimAlliance(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByAttackerCharacter(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByAttackerCharacter(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByAttackerCorporation(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByAttackerCorporation(r.Context(), req.Filters)
})
}
func handleAnalyticsPlayerByAttackerAlliance(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryPlayerByAttackerAlliance(r.Context(), req.Filters)
})
}
func handleAnalyticsModuleBySlotType(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryModuleBySlotType(r.Context(), req.Filters)
})
}
func handleAnalyticsModuleByModule(w http.ResponseWriter, r *http.Request) {
handleAnalyticsQuery(w, r, func(db DB, req APIAnalyticsRequest) (interface{}, error) {
handleAnalyticsQuery(w, r, func(database db.DB, req APIAnalyticsRequest) (interface{}, error) {
return db.QueryModuleByModule(r.Context(), req.Filters)
})
}
@@ -209,9 +210,9 @@ func handleAnalyticsModuleCoOccurrence(w http.ResponseWriter, r *http.Request) {
}
type APIKillmailIDsRequest struct {
Filters AnalyticsFilters `json:"filters"`
Limit int `json:"limit"`
Offset int `json:"offset"`
Filters types.AnalyticsFilters `json:"filters"`
Limit int `json:"limit"`
Offset int `json:"offset"`
}
func handleAnalyticsKillmails(w http.ResponseWriter, r *http.Request) {
@@ -296,7 +297,7 @@ func handleKillmail(w http.ResponseWriter, r *http.Request) {
}
}
type analyticsQueryFunc func(db DB, req APIAnalyticsRequest) (interface{}, error)
type analyticsQueryFunc func(database db.DB, req APIAnalyticsRequest) (interface{}, error)
func handleAnalyticsQuery(w http.ResponseWriter, r *http.Request, queryFn analyticsQueryFunc) {
flog := logger.Default.WithPrefix("handleAnalyticsQuery")

View File

@@ -1,4 +1,4 @@
package main
package api
import (
"bytes"

View File

@@ -1,4 +1,4 @@
package main
package bz2reader
import (
"archive/tar"
@@ -7,10 +7,12 @@ import (
"io"
"os"
"zkillsusser/types"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func IterBz2Killmails(path string, out chan<- Killmail) error {
func IterBz2Killmails(path string, out chan<- types.Killmail) error {
log := logger.Default.WithPrefix(path)
log.Debug("Iterating killmails")
f, err := os.Open(path)
@@ -46,7 +48,7 @@ func IterBz2Killmails(path string, out chan<- Killmail) error {
return err
}
var killmail Killmail
var killmail types.Killmail
if err := json.Unmarshal(data, &killmail); err != nil {
flog.Error("Failed to unmarshal JSON: %v", err)
return err
@@ -60,10 +62,10 @@ func IterBz2Killmails(path string, out chan<- Killmail) error {
return nil
}
func LoadBz2Killmails(path string) ([]Killmail, error) {
func LoadBz2Killmails(path string) ([]types.Killmail, error) {
log := logger.Default.WithPrefix(path)
log.Debug("Loading killmails")
out := make(chan Killmail)
out := make(chan types.Killmail)
go func() {
err := IterBz2Killmails(path, out)
if err != nil {
@@ -72,11 +74,11 @@ func LoadBz2Killmails(path string) ([]Killmail, error) {
}
}()
log.Debug("Receiving killmails")
var killmails []Killmail
var killmails []types.Killmail
for killmail := range out {
killmails = append(killmails, killmail)
log.Debug("Received killmail: %+v", killmail.KillmailID)
}
log.Debug("Closing channel")
return killmails, nil
}
}

View File

@@ -1,4 +1,4 @@
package main
package clickhouse
import (
"context"
@@ -6,25 +6,75 @@ import (
"fmt"
"strings"
"zkillsusser/config"
"zkillsusser/types"
"git.site.quack-lab.dev/dave/cyutils"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
)
// ClickhouseClient wraps ClickHouse connection and operations
type ClickhouseClient struct {
Conn driver.Conn
}
// NewClient creates a new ClickHouse client using config
func NewClient() (*ClickhouseClient, error) {
options := &clickhouse.Options{
Addr: []string{config.ClickhouseHost},
Auth: clickhouse.Auth{
Database: config.ClickhouseDatabase,
Username: config.ClickhouseUsername,
Password: config.ClickhousePassword,
},
Protocol: clickhouse.HTTP,
Settings: clickhouse.Settings{
"max_query_size": 100000000,
},
}
conn, err := clickhouse.Open(options)
if err != nil {
return nil, fmt.Errorf("failed to connect to ClickHouse: %w", err)
}
return &ClickhouseClient{Conn: conn}, nil
}
// Close closes the ClickHouse connection
func (c *ClickhouseClient) Close() error {
if c.Conn != nil {
return c.Conn.Close()
}
return nil
}
// Query executes a SELECT query and returns the rows
func (c *ClickhouseClient) Query(ctx context.Context, query string, args ...interface{}) (driver.Rows, error) {
return c.Conn.Query(ctx, query, args...)
}
func (c *ClickhouseClient) Exec(ctx context.Context, query string, args ...interface{}) error {
return c.Conn.Exec(ctx, query, args...)
}
// SaveFlatKillmails saves flattened killmails, attackers, and items to ClickHouse using JSON format
func (db *DBWrapper) SaveFlatKillmails(
killmails []*FlatKillmail,
attackers []FlatKillmailAttacker,
items []FlatKillmailItem,
func (c *ClickhouseClient) SaveFlatKillmails(
killmails []*types.FlatKillmail,
attackers []types.FlatKillmailAttacker,
items []types.FlatKillmailItem,
) error {
ctx := context.Background()
// Insert in batches
if err := db.insertKillmailsJSON(ctx, killmails); err != nil {
if err := c.insertKillmailsJSON(ctx, killmails); err != nil {
return fmt.Errorf("failed to insert killmails: %w", err)
}
if err := db.insertAttackersJSON(ctx, attackers); err != nil {
if err := c.insertAttackersJSON(ctx, attackers); err != nil {
return fmt.Errorf("failed to insert attackers: %w", err)
}
if err := db.insertItemsJSON(ctx, items); err != nil {
if err := c.insertItemsJSON(ctx, items); err != nil {
return fmt.Errorf("failed to insert items: %w", err)
}
@@ -32,13 +82,13 @@ func (db *DBWrapper) SaveFlatKillmails(
}
// insertKillmailsJSON inserts killmails using JSON format
func (db *DBWrapper) insertKillmailsJSON(ctx context.Context, killmails []*FlatKillmail) error {
func (c *ClickhouseClient) insertKillmailsJSON(ctx context.Context, killmails []*types.FlatKillmail) error {
if len(killmails) == 0 {
return nil
}
var batchErrors []error
cyutils.Batched(killmails, 1000, func(batch []*FlatKillmail) {
cyutils.Batched(killmails, 1000, func(batch []*types.FlatKillmail) {
var jsonRows []string
for _, km := range batch {
jsonBytes, err := json.Marshal(km)
@@ -52,7 +102,7 @@ func (db *DBWrapper) insertKillmailsJSON(ctx context.Context, killmails []*FlatK
if len(jsonRows) > 0 {
jsonData := strings.Join(jsonRows, "\n")
query := fmt.Sprintf("INSERT INTO zkill.killmails FORMAT JSONEachRow\n%s", jsonData)
if err := db.ch.Exec(ctx, query); err != nil {
if err := c.Conn.Exec(ctx, query); err != nil {
batchErrors = append(batchErrors, fmt.Errorf("failed to insert killmails batch: %w", err))
}
}
@@ -65,13 +115,13 @@ func (db *DBWrapper) insertKillmailsJSON(ctx context.Context, killmails []*FlatK
}
// insertAttackersJSON inserts attackers using JSON format
func (db *DBWrapper) insertAttackersJSON(ctx context.Context, attackers []FlatKillmailAttacker) error {
func (c *ClickhouseClient) insertAttackersJSON(ctx context.Context, attackers []types.FlatKillmailAttacker) error {
if len(attackers) == 0 {
return nil
}
var batchErrors []error
cyutils.Batched(attackers, 1000, func(batch []FlatKillmailAttacker) {
cyutils.Batched(attackers, 1000, func(batch []types.FlatKillmailAttacker) {
var jsonRows []string
for _, att := range batch {
jsonBytes, err := json.Marshal(att)
@@ -85,7 +135,7 @@ func (db *DBWrapper) insertAttackersJSON(ctx context.Context, attackers []FlatKi
if len(jsonRows) > 0 {
jsonData := strings.Join(jsonRows, "\n")
query := fmt.Sprintf("INSERT INTO zkill.killmail_attackers FORMAT JSONEachRow\n%s", jsonData)
if err := db.ch.Exec(ctx, query); err != nil {
if err := c.Conn.Exec(ctx, query); err != nil {
batchErrors = append(batchErrors, fmt.Errorf("failed to insert attackers batch: %w", err))
}
}
@@ -98,13 +148,13 @@ func (db *DBWrapper) insertAttackersJSON(ctx context.Context, attackers []FlatKi
}
// insertItemsJSON inserts items using JSON format
func (db *DBWrapper) insertItemsJSON(ctx context.Context, items []FlatKillmailItem) error {
func (c *ClickhouseClient) insertItemsJSON(ctx context.Context, items []types.FlatKillmailItem) error {
if len(items) == 0 {
return nil
}
var batchErrors []error
cyutils.Batched(items, 1000, func(batch []FlatKillmailItem) {
cyutils.Batched(items, 1000, func(batch []types.FlatKillmailItem) {
var jsonRows []string
for _, item := range batch {
jsonBytes, err := json.Marshal(item)
@@ -118,7 +168,7 @@ func (db *DBWrapper) insertItemsJSON(ctx context.Context, items []FlatKillmailIt
if len(jsonRows) > 0 {
jsonData := strings.Join(jsonRows, "\n")
query := fmt.Sprintf("INSERT INTO zkill.killmail_items FORMAT JSONEachRow\n%s", jsonData)
if err := db.ch.Exec(ctx, query); err != nil {
if err := c.Conn.Exec(ctx, query); err != nil {
batchErrors = append(batchErrors, fmt.Errorf("failed to insert items batch: %w", err))
}
}

View File

@@ -1,72 +0,0 @@
package main
import (
"os"
"strconv"
"github.com/joho/godotenv"
)
func getEnv(key, defaultValue string) string {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
return value
}
func getEnvInt(key string, defaultValue int) int {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
parsed, err := strconv.Atoi(value)
if err != nil {
return defaultValue
}
return parsed
}
var (
nsqHost string
nsqPort int
nsqLookupHost string
nsqLookupPort int
stage1Workers int
stage2Workers int
stage3Workers int
serverPort string
serverMode bool
stage string
clickhouseHost string
clickhouseDatabase string
clickhouseUsername string
clickhousePassword string
)
func initConfig() error {
godotenv.Load()
nsqHost = getEnv("NSQ_HOST", "127.0.0.1")
nsqPort = getEnvInt("NSQ_PORT", 4150)
nsqLookupHost = getEnv("NSQ_LOOKUP_HOST", "127.0.0.1")
nsqLookupPort = getEnvInt("NSQ_LOOKUP_PORT", 4160)
stage1Workers = getEnvInt("STAGE1_WORKERS", 24)
stage2Workers = getEnvInt("STAGE2_WORKERS", 48)
stage3Workers = getEnvInt("STAGE3_WORKERS", 48)
serverPort = getEnv("SERVER_PORT", "3000")
serverMode = getEnv("SERVER", "false") == "true"
stage = getEnv("STAGE", "")
clickhouseHost = getEnv("CLICKHOUSE_HOST", "127.0.0.1")
clickhouseDatabase = getEnv("CLICKHOUSE_DATABASE", "zkill")
clickhouseUsername = getEnv("CLICKHOUSE_USERNAME", "default")
clickhousePassword = getEnv("CLICKHOUSE_PASSWORD", "")
return nil
}

62
config/config.go Normal file
View File

@@ -0,0 +1,62 @@
package config
import (
"os"
"strconv"
"github.com/joho/godotenv"
)
func getEnv(key, defaultValue string) string {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
return value
}
func getEnvInt(key string, defaultValue int) int {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
parsed, err := strconv.Atoi(value)
if err != nil {
return defaultValue
}
return parsed
}
var (
NSQHost string
NSQPort int
NSQLookupHost string
NSQLookupPort int
ServerPort string
ServerMode bool
ClickhouseHost string
ClickhouseDatabase string
ClickhouseUsername string
ClickhousePassword string
)
func InitConfig() error {
godotenv.Load()
NSQHost = getEnv("NSQ_HOST", "127.0.0.1")
NSQPort = getEnvInt("NSQ_PORT", 4150)
NSQLookupHost = getEnv("NSQ_LOOKUP_HOST", "127.0.0.1")
NSQLookupPort = getEnvInt("NSQ_LOOKUP_PORT", 4161)
ServerPort = getEnv("SERVER_PORT", "3000")
ServerMode = getEnv("SERVER", "false") == "true"
ClickhouseHost = getEnv("CLICKHOUSE_HOST", "127.0.0.1")
ClickhouseDatabase = getEnv("CLICKHOUSE_DATABASE", "zkill")
ClickhouseUsername = getEnv("CLICKHOUSE_USERNAME", "default")
ClickhousePassword = getEnv("CLICKHOUSE_PASSWORD", "")
return nil
}

339
db.go
View File

@@ -1,339 +0,0 @@
package main
import (
"context"
"fmt"
"strings"
"zkillsusser/models"
utils "git.site.quack-lab.dev/dave/cyutils"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)
type DB interface {
Init() error
Get() *gorm.DB
SaveFlatKillmails(killmails []*FlatKillmail, attackers []FlatKillmailAttacker, items []FlatKillmailItem) error
SearchShips(query string, limit int) ([]models.InvType, error)
SearchSystems(query string, limit int) ([]models.MapSolarSystem, error)
SearchModules(query string, limit int) ([]models.InvType, error)
SearchGroups(query string, limit int) ([]models.InvGroup, error)
GetItemTypes(itemIDs []int64) ([]models.InvType, error)
GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error)
ExpandGroupsIntoItemTypeIds(groups []int64) ([]int64, error)
GetModuleSlots(moduleIDs []int64) (map[int64]ModuleSlot, error)
GetType(ctx context.Context, typeID int32) (*models.InvType, error)
GetGroup(ctx context.Context, groupID int32) (*models.InvGroup, error)
GetCategory(ctx context.Context, categoryID int32) (*models.InvCategory, error)
GetMarketGroup(ctx context.Context, marketGroupID int32) (*models.InvMarketGroup, error)
GetSolarSystem(ctx context.Context, systemID int32) (*models.MapSolarSystem, error)
GetConstellation(ctx context.Context, constellationID int32) (*models.MapConstellation, error)
GetRegion(ctx context.Context, regionID int32) (*models.MapRegion, error)
// Analytics queries
QueryTimeByHour(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByHour, error)
QueryTimeByDay(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDay, error)
QueryTimeByDate(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDate, error)
QueryTimeByMonth(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByMonth, error)
QueryLocationBySystem(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySystem, error)
QueryLocationByRegion(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByRegion, error)
QueryLocationByConstellation(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByConstellation, error)
QueryLocationBySecurity(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySecurity, error)
QueryShipByVictim(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByVictimShip, error)
QueryShipByAttacker(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByAttackerShip, error)
QueryPlayerByVictimCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCharacter, error)
QueryPlayerByVictimCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCorporation, error)
QueryPlayerByVictimAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimAlliance, error)
QueryPlayerByAttackerCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCharacter, error)
QueryPlayerByAttackerCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCorporation, error)
QueryPlayerByAttackerAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerAlliance, error)
QueryModuleBySlotType(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationBySlotType, error)
QueryModuleByModule(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationByModule, error)
QueryModuleCoOccurrence(ctx context.Context, filters AnalyticsFilters, selectedModuleID int32, selectedSlot string) ([]ModuleCoOccurrence, error)
QueryKillmailIDs(ctx context.Context, filters AnalyticsFilters, limit, offset int) ([]int64, error)
QueryKillmailWithItems(ctx context.Context, killmailID int64) (*FlatKillmailComplete, error)
}
type DBWrapper struct {
ch driver.Conn
db *gorm.DB // For SQLite (EVE static data)
getTypeMemo func(context.Context, int32) (*models.InvType, error)
getGroupMemo func(context.Context, int32) (*models.InvGroup, error)
getCategoryMemo func(context.Context, int32) (*models.InvCategory, error)
getMarketGroupMemo func(context.Context, int32) (*models.InvMarketGroup, error)
getSystemMemo func(context.Context, int32) (*models.MapSolarSystem, error)
getConstellationMemo func(context.Context, int32) (*models.MapConstellation, error)
getRegionMemo func(context.Context, int32) (*models.MapRegion, error)
}
var db *DBWrapper
func GetDB() (DB, error) {
if db != nil {
return db, nil
}
sdb, err := GetDBSqlite()
if err != nil {
return nil, fmt.Errorf("failed to connect to SQLite: %w", err)
}
conn, err := GetDBClickhouse()
if err != nil {
return nil, fmt.Errorf("failed to connect to ClickHouse: %w", err)
}
db = &DBWrapper{
ch: conn,
db: sdb,
}
getTypeFn := func(ctx context.Context, typeID int32) (*models.InvType, error) {
var t models.InvType
if err := db.db.Where("typeID = ?", typeID).First(&t).Error; err != nil {
return nil, fmt.Errorf("failed to get type %d: %w", typeID, err)
}
return &t, nil
}
getGroupFn := func(ctx context.Context, groupID int32) (*models.InvGroup, error) {
var g models.InvGroup
if err := db.db.Where("groupID = ?", groupID).First(&g).Error; err != nil {
return nil, fmt.Errorf("failed to get group %d: %w", groupID, err)
}
return &g, nil
}
getCategoryFn := func(ctx context.Context, categoryID int32) (*models.InvCategory, error) {
var c models.InvCategory
if err := db.db.Where("categoryID = ?", categoryID).First(&c).Error; err != nil {
return nil, fmt.Errorf("failed to get category %d: %w", categoryID, err)
}
return &c, nil
}
getMarketGroupFn := func(ctx context.Context, marketGroupID int32) (*models.InvMarketGroup, error) {
var mg models.InvMarketGroup
if err := db.db.Where("marketGroupID = ?", marketGroupID).First(&mg).Error; err != nil {
return nil, fmt.Errorf("failed to get market group %d: %w", marketGroupID, err)
}
return &mg, nil
}
getSystemFn := func(ctx context.Context, systemID int32) (*models.MapSolarSystem, error) {
var s models.MapSolarSystem
if err := db.db.Where("solarSystemID = ?", systemID).First(&s).Error; err != nil {
return nil, fmt.Errorf("failed to get solar system %d: %w", systemID, err)
}
return &s, nil
}
getConstellationFn := func(ctx context.Context, constellationID int32) (*models.MapConstellation, error) {
var c models.MapConstellation
if err := db.db.Where("constellationID = ?", constellationID).First(&c).Error; err != nil {
return nil, fmt.Errorf("failed to get constellation %d: %w", constellationID, err)
}
return &c, nil
}
getRegionFn := func(ctx context.Context, regionID int32) (*models.MapRegion, error) {
var r models.MapRegion
if err := db.db.Where("regionID = ?", regionID).First(&r).Error; err != nil {
return nil, fmt.Errorf("failed to get region %d: %w", regionID, err)
}
return &r, nil
}
db.getTypeMemo = utils.Memoized(getTypeFn).(func(context.Context, int32) (*models.InvType, error))
db.getGroupMemo = utils.Memoized(getGroupFn).(func(context.Context, int32) (*models.InvGroup, error))
db.getCategoryMemo = utils.Memoized(getCategoryFn).(func(context.Context, int32) (*models.InvCategory, error))
db.getMarketGroupMemo = utils.Memoized(getMarketGroupFn).(func(context.Context, int32) (*models.InvMarketGroup, error))
db.getSystemMemo = utils.Memoized(getSystemFn).(func(context.Context, int32) (*models.MapSolarSystem, error))
db.getConstellationMemo = utils.Memoized(getConstellationFn).(func(context.Context, int32) (*models.MapConstellation, error))
db.getRegionMemo = utils.Memoized(getRegionFn).(func(context.Context, int32) (*models.MapRegion, error))
err = db.Init()
return db, err
}
func GetDBSqlite() (*gorm.DB, error) {
return gorm.Open(sqlite.Open("sqlite-latest.sqlite"), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
NoLowerCase: true,
},
})
}
func GetDBClickhouse() (driver.Conn, error) {
options := &clickhouse.Options{
Addr: []string{clickhouseHost},
Auth: clickhouse.Auth{
Database: clickhouseDatabase,
Username: clickhouseUsername,
Password: clickhousePassword,
},
Protocol: clickhouse.HTTP,
Settings: clickhouse.Settings{
"max_query_size": 100000000,
},
}
return clickhouse.Open(options)
}
func (db *DBWrapper) Get() *gorm.DB {
return db.db
}
func (db *DBWrapper) Init() error {
return nil
}
func (db *DBWrapper) ExpandGroupsIntoItemTypeIds(groups []int64) ([]int64, error) {
var groupTypeIDs []int64
result := db.db.Model(&models.InvType{}).
Select("typeID").
Where("groupID IN ?", groups).
Pluck("typeID", &groupTypeIDs)
return groupTypeIDs, result.Error
}
func (db *DBWrapper) SearchShips(query string, limit int) ([]models.InvType, error) {
var ships []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (6)", searchPattern).
Limit(limit).
Find(&ships).Error
return ships, err
}
func (db *DBWrapper) SearchSystems(query string, limit int) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("mapSolarSystems").
Where("LOWER(\"solarSystemName\") LIKE ?", searchPattern).
Limit(limit).
Find(&systems).Error
return systems, err
}
func (db *DBWrapper) SearchModules(query string, limit int) ([]models.InvType, error) {
var modules []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (7, 66)", searchPattern).
Limit(limit).
Find(&modules).Error
return modules, err
}
func (db *DBWrapper) SearchGroups(query string, limit int) ([]models.InvGroup, error) {
var groups []models.InvGroup
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invGroups").
Where("LOWER(\"groupName\") LIKE ?", searchPattern).
Limit(limit).
Find(&groups).Error
return groups, err
}
func (db *DBWrapper) GetItemTypes(itemIDs []int64) ([]models.InvType, error) {
var itemTypes []models.InvType
res := db.db.Model(&models.InvType{}).
Where("typeID IN ?", itemIDs).
Find(&itemTypes)
return itemTypes, res.Error
}
func (db *DBWrapper) GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
res := db.db.Model(&models.MapSolarSystem{}).
Where("solarSystemID IN ?", systemIDs).
Find(&systems)
return systems, res.Error
}
func deduplicateInt64(slice []int64) []int64 {
seen := make(map[int64]bool)
result := make([]int64, 0, len(slice))
for _, v := range slice {
if !seen[v] {
seen[v] = true
result = append(result, v)
}
}
return result
}
func (db *DBWrapper) GetModuleSlots(moduleIDs []int64) (map[int64]ModuleSlot, error) {
if len(moduleIDs) == 0 {
return make(map[int64]ModuleSlot), nil
}
var effects []models.DgmTypeEffect
qres := db.db.Model(&models.DgmTypeEffect{}).
Select("typeID, effectID").
Where("typeID IN ? AND effectID IN (11, 12, 13, 2663)", moduleIDs).
Find(&effects)
if qres.Error != nil {
return nil, qres.Error
}
result := make(map[int64]ModuleSlot)
for _, e := range effects {
var slot ModuleSlot
switch e.EffectID {
case 11:
slot = ModuleSlotLow
case 12:
slot = ModuleSlotHigh
case 13:
slot = ModuleSlotMid
case 2663:
slot = ModuleSlotRig
}
result[int64(e.TypeID)] = slot
}
return result, nil
}
func (db *DBWrapper) GetType(ctx context.Context, typeID int32) (*models.InvType, error) {
return db.getTypeMemo(ctx, typeID)
}
func (db *DBWrapper) GetGroup(ctx context.Context, groupID int32) (*models.InvGroup, error) {
return db.getGroupMemo(ctx, groupID)
}
func (db *DBWrapper) GetCategory(ctx context.Context, categoryID int32) (*models.InvCategory, error) {
return db.getCategoryMemo(ctx, categoryID)
}
func (db *DBWrapper) GetMarketGroup(ctx context.Context, marketGroupID int32) (*models.InvMarketGroup, error) {
return db.getMarketGroupMemo(ctx, marketGroupID)
}
func (db *DBWrapper) GetSolarSystem(ctx context.Context, systemID int32) (*models.MapSolarSystem, error) {
return db.getSystemMemo(ctx, systemID)
}
func (db *DBWrapper) GetConstellation(ctx context.Context, constellationID int32) (*models.MapConstellation, error) {
return db.getConstellationMemo(ctx, constellationID)
}
func (db *DBWrapper) GetRegion(ctx context.Context, regionID int32) (*models.MapRegion, error) {
return db.getRegionMemo(ctx, regionID)
}

225
db/db.go Normal file
View File

@@ -0,0 +1,225 @@
package db
import (
"context"
"strings"
"zkillsusser/models"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)
type DB interface {
Init() error
Get() *gorm.DB
SearchShips(query string, limit int) ([]models.InvType, error)
SearchSystems(query string, limit int) ([]models.MapSolarSystem, error)
SearchModules(query string, limit int) ([]models.InvType, error)
SearchGroups(query string, limit int) ([]models.InvGroup, error)
GetItemTypes(itemIDs []int64) ([]models.InvType, error)
GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error)
ExpandGroupsIntoItemTypeIds(groups []int64) ([]int64, error)
GetType(ctx context.Context, typeID int32) (*models.InvType, error)
GetGroup(ctx context.Context, groupID int32) (*models.InvGroup, error)
GetCategory(ctx context.Context, categoryID int32) (*models.InvCategory, error)
GetMarketGroup(ctx context.Context, marketGroupID int32) (*models.InvMarketGroup, error)
GetSolarSystem(ctx context.Context, systemID int32) (*models.MapSolarSystem, error)
GetConstellation(ctx context.Context, constellationID int32) (*models.MapConstellation, error)
GetRegion(ctx context.Context, regionID int32) (*models.MapRegion, error)
// Analytics queries
// QueryTimeByHour(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByHour, error)
// QueryTimeByDay(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDay, error)
// QueryTimeByDate(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByDate, error)
// QueryTimeByMonth(ctx context.Context, filters AnalyticsFilters) ([]TimeAggregationByMonth, error)
// QueryLocationBySystem(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySystem, error)
// QueryLocationByRegion(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByRegion, error)
// QueryLocationByConstellation(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationByConstellation, error)
// QueryLocationBySecurity(ctx context.Context, filters AnalyticsFilters) ([]LocationAggregationBySecurity, error)
// QueryShipByVictim(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByVictimShip, error)
// QueryShipByAttacker(ctx context.Context, filters AnalyticsFilters) ([]ShipAggregationByAttackerShip, error)
// QueryPlayerByVictimCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCharacter, error)
// QueryPlayerByVictimCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimCorporation, error)
// QueryPlayerByVictimAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByVictimAlliance, error)
// QueryPlayerByAttackerCharacter(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCharacter, error)
// QueryPlayerByAttackerCorporation(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerCorporation, error)
// QueryPlayerByAttackerAlliance(ctx context.Context, filters AnalyticsFilters) ([]PlayerAggregationByAttackerAlliance, error)
// QueryModuleBySlotType(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationBySlotType, error)
// QueryModuleByModule(ctx context.Context, filters AnalyticsFilters) ([]ModuleAggregationByModule, error)
// QueryModuleCoOccurrence(ctx context.Context, filters AnalyticsFilters, selectedModuleID int32, selectedSlot string) ([]ModuleCoOccurrence, error)
// QueryKillmailIDs(ctx context.Context, filters AnalyticsFilters, limit, offset int) ([]int64, error)
// QueryKillmailWithItems(ctx context.Context, killmailID int64) (*FlatKillmailComplete, error)
}
type DBWrapper struct {
db *gorm.DB // For SQLite (EVE static data)
}
var db *DBWrapper
func GetDB(path string) (DB, error) {
if db != nil {
return db, nil
}
sdb, err := gorm.Open(sqlite.Open(path), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
NoLowerCase: true,
},
})
db = &DBWrapper{
db: sdb,
}
err = db.Init()
return db, err
}
func (db *DBWrapper) Get() *gorm.DB {
return db.db
}
func (db *DBWrapper) Init() error {
return nil
}
func (db *DBWrapper) ExpandGroupsIntoItemTypeIds(groups []int64) ([]int64, error) {
var groupTypeIDs []int64
result := db.db.Model(&models.InvType{}).
Select("typeID").
Where("groupID IN ?", groups).
Pluck("typeID", &groupTypeIDs)
return groupTypeIDs, result.Error
}
func (db *DBWrapper) SearchShips(query string, limit int) ([]models.InvType, error) {
var ships []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (6)", searchPattern).
Limit(limit).
Find(&ships).Error
return ships, err
}
func (db *DBWrapper) SearchSystems(query string, limit int) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("mapSolarSystems").
Where("LOWER(\"solarSystemName\") LIKE ?", searchPattern).
Limit(limit).
Find(&systems).Error
return systems, err
}
func (db *DBWrapper) SearchModules(query string, limit int) ([]models.InvType, error) {
var modules []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (7, 66)", searchPattern).
Limit(limit).
Find(&modules).Error
return modules, err
}
func (db *DBWrapper) SearchGroups(query string, limit int) ([]models.InvGroup, error) {
var groups []models.InvGroup
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invGroups").
Where("LOWER(\"groupName\") LIKE ?", searchPattern).
Limit(limit).
Find(&groups).Error
return groups, err
}
func (db *DBWrapper) GetItemTypes(itemIDs []int64) ([]models.InvType, error) {
var itemTypes []models.InvType
res := db.db.Model(&models.InvType{}).
Where("typeID IN ?", itemIDs).
Find(&itemTypes)
return itemTypes, res.Error
}
func (db *DBWrapper) GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
res := db.db.Model(&models.MapSolarSystem{}).
Where("solarSystemID IN ?", systemIDs).
Find(&systems)
return systems, res.Error
}
func deduplicateInt64(slice []int64) []int64 {
seen := make(map[int64]bool)
result := make([]int64, 0, len(slice))
for _, v := range slice {
if !seen[v] {
seen[v] = true
result = append(result, v)
}
}
return result
}
func (db *DBWrapper) GetType(ctx context.Context, typeID int32) (*models.InvType, error) {
var typeModel models.InvType
res := db.db.Model(&models.InvType{}).
Where("typeID = ?", typeID).
First(&typeModel)
return &typeModel, res.Error
}
func (db *DBWrapper) GetGroup(ctx context.Context, groupID int32) (*models.InvGroup, error) {
var group models.InvGroup
res := db.db.Model(&models.InvGroup{}).
Where("groupID = ?", groupID).
First(&group)
return &group, res.Error
}
func (db *DBWrapper) GetCategory(ctx context.Context, categoryID int32) (*models.InvCategory, error) {
var category models.InvCategory
res := db.db.Model(&models.InvCategory{}).
Where("categoryID = ?", categoryID).
First(&category)
return &category, res.Error
}
func (db *DBWrapper) GetMarketGroup(ctx context.Context, marketGroupID int32) (*models.InvMarketGroup, error) {
var marketGroup models.InvMarketGroup
res := db.db.Model(&models.InvMarketGroup{}).
Where("marketGroupID = ?", marketGroupID).
First(&marketGroup)
return &marketGroup, res.Error
}
func (db *DBWrapper) GetSolarSystem(ctx context.Context, systemID int32) (*models.MapSolarSystem, error) {
var system models.MapSolarSystem
res := db.db.Model(&models.MapSolarSystem{}).
Where("solarSystemID = ?", systemID).
First(&system)
return &system, res.Error
}
func (db *DBWrapper) GetConstellation(ctx context.Context, constellationID int32) (*models.MapConstellation, error) {
var constellation models.MapConstellation
res := db.db.Model(&models.MapConstellation{}).
Where("constellationID = ?", constellationID).
First(&constellation)
return &constellation, res.Error
}
func (db *DBWrapper) GetRegion(ctx context.Context, regionID int32) (*models.MapRegion, error) {
var region models.MapRegion
res := db.db.Model(&models.MapRegion{}).
Where("regionID = ?", regionID).
First(&region)
return &region, res.Error
}

View File

@@ -1,8 +1,10 @@
package main
package db
import (
"testing"
"zkillsusser/types"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -41,7 +43,7 @@ func TestGetModuleSlots(t *testing.T) {
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{11357})
require.NoError(t, err)
assert.Equal(t, ModuleSlotHigh, slots[11357])
assert.Equal(t, types.ModuleSlotHigh, slots[11357])
})
t.Run("Get slots for 4391 -> Med", func(t *testing.T) {
// Large Ancillary Shield Booster
@@ -49,7 +51,7 @@ func TestGetModuleSlots(t *testing.T) {
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{4391})
require.NoError(t, err)
assert.Equal(t, ModuleSlotMid, slots[4391])
assert.Equal(t, types.ModuleSlotMid, slots[4391])
})
t.Run("Get slots for 4393-> Low", func(t *testing.T) {
// Drone Damage Amplifier I
@@ -57,7 +59,7 @@ func TestGetModuleSlots(t *testing.T) {
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{4393})
require.NoError(t, err)
assert.Equal(t, ModuleSlotLow, slots[4393])
assert.Equal(t, types.ModuleSlotLow, slots[4393])
})
t.Run("Get slots for 25908 -> Rig", func(t *testing.T) {
// Large Drone Control Range Augmentor I
@@ -65,17 +67,17 @@ func TestGetModuleSlots(t *testing.T) {
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{25908})
require.NoError(t, err)
assert.Equal(t, ModuleSlotRig, slots[25908])
assert.Equal(t, types.ModuleSlotRig, slots[25908])
})
t.Run("Get slots for all 3 combined -> High, Mid, Low, Rig", func(t *testing.T) {
db, err := GetDB()
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{11357, 4391, 4393, 25908})
require.NoError(t, err)
assert.Equal(t, ModuleSlotHigh, slots[11357])
assert.Equal(t, ModuleSlotMid, slots[4391])
assert.Equal(t, ModuleSlotLow, slots[4393])
assert.Equal(t, ModuleSlotRig, slots[25908])
assert.Equal(t, types.ModuleSlotHigh, slots[11357])
assert.Equal(t, types.ModuleSlotMid, slots[4391])
assert.Equal(t, types.ModuleSlotLow, slots[4393])
assert.Equal(t, types.ModuleSlotRig, slots[25908])
assert.Equal(t, 4, len(slots))
})
t.Run("Get slots for 2454 -> Drone", func(t *testing.T) {
@@ -84,7 +86,7 @@ func TestGetModuleSlots(t *testing.T) {
require.NoError(t, err)
slots, err := db.GetModuleSlots([]int64{2454})
require.NoError(t, err)
assert.Equal(t, ModuleSlotDrone, slots[2454])
assert.Equal(t, types.ModuleSlotDrone, slots[2454])
})
}

91
db/ddl-projections.sql Normal file
View File

@@ -0,0 +1,91 @@
-- Aggregating projections so full-table GROUP BY over items/attackers uses
-- pre-aggregated data instead of scanning all rows. Apply to existing DBs;
-- then run MATERIALIZE PROJECTION so existing data is projected (see end).
-- Queries must use uniq() (not count(DISTINCT)) so the optimizer picks these.
--
-- Refs: https://clickhouse.com/docs/data-modeling/projections
-- ReplacingMergeTree in 25.x requires this table setting to allow ADD PROJECTION.
ALTER TABLE zkill.killmail_items MODIFY SETTING deduplicate_merge_projection_mode = 'rebuild';
ALTER TABLE zkill.killmail_attackers MODIFY SETTING deduplicate_merge_projection_mode = 'rebuild';
-- Matches: queryModuleBySlotType (slot_type, items_fitted, ships_with_slot, module_variety)
ALTER TABLE zkill.killmail_items
ADD PROJECTION IF NOT EXISTS proj_by_slot_type (
SELECT
slot_type,
count() AS items_fitted,
uniq(killmail_id) AS ships_with_slot,
uniq(item_type_name) AS module_variety
GROUP BY slot_type
);
-- Matches: queryModuleByModuleAllSlots / queryModuleByModule (per-slot module counts)
ALTER TABLE zkill.killmail_items
ADD PROJECTION IF NOT EXISTS proj_by_slot_module (
SELECT
slot_type,
item_type_id,
item_type_name,
item_group_name,
item_category_name,
count() AS times_fitted,
uniq(killmail_id) AS ships_with_module
GROUP BY slot_type, item_type_id, item_type_name, item_group_name, item_category_name
);
-- Matches: queryPlayerByAttackerCharacter. Use uniq() so projection matches; query can use uniq for speed.
ALTER TABLE zkill.killmail_attackers
ADD PROJECTION IF NOT EXISTS proj_by_character (
SELECT
character_name,
corporation_name,
alliance_name,
uniq(killmail_id) AS kills_participated,
countIf(final_blow) AS final_blows,
sum(damage_done) AS total_damage
GROUP BY character_name, corporation_name, alliance_name
);
-- Matches: queryShipByAttacker
ALTER TABLE zkill.killmail_attackers
ADD PROJECTION IF NOT EXISTS proj_by_ship (
SELECT
ship_type_name,
ship_group_name,
count() AS times_used,
uniq(killmail_id) AS kills_participated,
sum(damage_done) AS total_damage,
countIf(final_blow) AS final_blows
GROUP BY ship_type_name, ship_group_name
);
-- Matches: queryPlayerByAttackerCorporation
ALTER TABLE zkill.killmail_attackers
ADD PROJECTION IF NOT EXISTS proj_by_corporation (
SELECT
corporation_name,
alliance_name,
uniq(killmail_id) AS kills_participated,
uniq(character_name) AS members_involved
GROUP BY corporation_name, alliance_name
);
-- Matches: queryPlayerByAttackerAlliance
ALTER TABLE zkill.killmail_attackers
ADD PROJECTION IF NOT EXISTS proj_by_alliance (
SELECT
alliance_name,
uniq(killmail_id) AS kills_participated,
uniq(corporation_name) AS corps_involved,
uniq(character_name) AS members_involved
GROUP BY alliance_name
);
-- Build projections for existing data (can be slow; run during low traffic).
ALTER TABLE zkill.killmail_items MATERIALIZE PROJECTION proj_by_slot_type;
ALTER TABLE zkill.killmail_items MATERIALIZE PROJECTION proj_by_slot_module;
ALTER TABLE zkill.killmail_attackers MATERIALIZE PROJECTION proj_by_character;
ALTER TABLE zkill.killmail_attackers MATERIALIZE PROJECTION proj_by_ship;
ALTER TABLE zkill.killmail_attackers MATERIALIZE PROJECTION proj_by_corporation;
ALTER TABLE zkill.killmail_attackers MATERIALIZE PROJECTION proj_by_alliance;

View File

@@ -0,0 +1,40 @@
-- Data skipping indices so ClickHouse can skip granules when your queries filter
-- on time, system, region, victim ship, etc. Apply to existing DBs; then run
-- MATERIALIZE INDEX so existing data gets indexed (see end of file).
--
-- Refs: https://clickhouse.com/docs/optimize/skipping-indexes
-- zkill.killmails: filters use kill_date, solar_system_id, region_name,
-- constellation_name, victim_ship_type_id, victim_character_name, etc.
ALTER TABLE zkill.killmails
ADD INDEX IF NOT EXISTS idx_kill_date kill_date TYPE minmax GRANULARITY 4;
ALTER TABLE zkill.killmails
ADD INDEX IF NOT EXISTS idx_solar_system_id solar_system_id TYPE minmax GRANULARITY 4;
ALTER TABLE zkill.killmails
ADD INDEX IF NOT EXISTS idx_victim_ship_type_id victim_ship_type_id TYPE minmax GRANULARITY 4;
ALTER TABLE zkill.killmails
ADD INDEX IF NOT EXISTS idx_region_name region_name TYPE bloom_filter(0.01) GRANULARITY 4;
ALTER TABLE zkill.killmails
ADD INDEX IF NOT EXISTS idx_constellation_name constellation_name TYPE bloom_filter(0.01) GRANULARITY 4;
-- hasModule subquery: SELECT ... FROM zkill.killmail_items WHERE item_type_id = ? AND slot_type != 'cargo'
ALTER TABLE zkill.killmail_items
ADD INDEX IF NOT EXISTS idx_item_type_id item_type_id TYPE minmax GRANULARITY 4;
-- Attacker queries filter by ship_type_name, character_name, etc. when filters are set.
ALTER TABLE zkill.killmail_attackers
ADD INDEX IF NOT EXISTS idx_ship_type_name ship_type_name TYPE bloom_filter(0.01) GRANULARITY 4;
-- Build indices for existing data (can be slow on large tables; run during low traffic).
ALTER TABLE zkill.killmails MATERIALIZE INDEX idx_kill_date;
ALTER TABLE zkill.killmails MATERIALIZE INDEX idx_solar_system_id;
ALTER TABLE zkill.killmails MATERIALIZE INDEX idx_victim_ship_type_id;
ALTER TABLE zkill.killmails MATERIALIZE INDEX idx_region_name;
ALTER TABLE zkill.killmails MATERIALIZE INDEX idx_constellation_name;
ALTER TABLE zkill.killmail_items MATERIALIZE INDEX idx_item_type_id;
ALTER TABLE zkill.killmail_attackers MATERIALIZE INDEX idx_ship_type_name;

View File

@@ -94,4 +94,24 @@ CREATE TABLE zkill.killmail_items
singleton Int32
)
ENGINE = ReplacingMergeTree()
ORDER BY (killmail_id, flag, item_type_id);
ORDER BY (killmail_id, flag, item_type_id);
CREATE SETTINGS PROFILE IF NOT EXISTS readonly_prod_profile
SETTINGS
readonly = 1,
allow_ddl = 0,
max_threads = 4,
max_memory_usage = 8_000_000_000,
max_execution_time = 60,
max_rows_to_read = 1_000_000_000,
max_result_rows = 10_000_000;
-- 2) User
CREATE USER IF NOT EXISTS zkill_ro_user
IDENTIFIED WITH sha256_password BY 'v2M9ZgjYkqypyS4ne9JtK7BYp6942uju'
SETTINGS PROFILE readonly_prod_profile;
-- 3) Grant
GRANT SELECT ON zkill.killmails TO zkill_ro_user;
GRANT SELECT ON zkill.killmail_attackers TO zkill_ro_user;
GRANT SELECT ON zkill.killmail_items TO zkill_ro_user;

View File

@@ -34,8 +34,6 @@ services:
ports:
- "8123:8123"
- "9000:9000"
volumes:
- ./clickhouse_data:/var/lib/clickhouse
environment:
- CLICKHOUSE_DB=zkill
- CLICKHOUSE_USER=default

View File

@@ -1,5 +1,8 @@
services:
backend:
image: docker.site.quack-lab.dev/zkill-susser-backend:latest
volumes:
- sqlite-latest.sqlite:/sqlite-latest.sqlite
image: docker.site.quack-lab.dev/zkill-susser-backend:v1.1.0
environment:
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""

788
enrich.go
View File

@@ -1,788 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"sync"
"time"
"zkillsusser/models"
logger "git.site.quack-lab.dev/dave/cylogger"
utils "git.site.quack-lab.dev/dave/cyutils"
"github.com/nsqio/go-nsq"
"golang.org/x/sync/errgroup"
)
type FlatKillmail struct {
KillmailID int64 `json:"killmail_id"`
KillmailHash string `json:"killmail_hash"`
KillmailTime string `json:"killmail_time"`
SolarSystemID int32 `json:"solar_system_id"`
SolarSystemName string `json:"solar_system_name"`
ConstellationName string `json:"constellation_name"`
RegionName string `json:"region_name"`
Security float32 `json:"security"`
VictimCharacterID int64 `json:"victim_character_id"`
VictimCharacterName string `json:"victim_character_name"`
VictimCorporationID int64 `json:"victim_corporation_id"`
VictimCorporationName string `json:"victim_corporation_name"`
VictimAllianceID *int64 `json:"victim_alliance_id"`
VictimAllianceName string `json:"victim_alliance_name"`
VictimShipTypeID int32 `json:"victim_ship_type_id"`
VictimShipTypeName string `json:"victim_ship_type_name"`
VictimShipGroupName string `json:"victim_ship_group_name"`
VictimShipCategoryName string `json:"victim_ship_category_name"`
VictimDamageTaken int64 `json:"victim_damage_taken"`
AttackerCount uint16 `json:"attacker_count"`
HTTPLastModified string `json:"http_last_modified"`
}
type FlatKillmailAttacker struct {
KillmailID int64 `json:"killmail_id"`
CharacterID int64 `json:"character_id"`
CharacterName string `json:"character_name"`
CorporationID int64 `json:"corporation_id"`
CorporationName string `json:"corporation_name"`
AllianceID *int64 `json:"alliance_id"`
AllianceName string `json:"alliance_name"`
ShipTypeID int32 `json:"ship_type_id"`
ShipTypeName string `json:"ship_type_name"`
ShipGroupName string `json:"ship_group_name"`
WeaponTypeID int32 `json:"weapon_type_id"`
WeaponTypeName string `json:"weapon_type_name"`
DamageDone int64 `json:"damage_done"`
FinalBlow bool `json:"final_blow"`
SecurityStatus float32 `json:"security_status"`
}
type FlatKillmailItem struct {
KillmailID int64 `json:"killmail_id"`
ItemTypeID int32 `json:"item_type_id"`
ItemTypeName string `json:"item_type_name"`
ItemGroupName string `json:"item_group_name"`
ItemCategoryName string `json:"item_category_name"`
ItemMarketGroupName string `json:"item_market_group_name"`
Flag int32 `json:"flag"`
SlotType string `json:"slot_type"`
QuantityDestroyed int64 `json:"quantity_destroyed"`
QuantityDropped int64 `json:"quantity_dropped"`
Singleton int32 `json:"singleton"`
}
type Cache[T any, K comparable] struct {
m sync.Map
getter func(ctx context.Context, db DB, key K) (T, error)
logger func(key K) *logger.Logger
}
func NewCache[T any, K comparable](getter func(ctx context.Context, db DB, key K) (T, error), logger func(key K) *logger.Logger) *Cache[T, K] {
return &Cache[T, K]{
getter: getter,
logger: logger,
}
}
func (c *Cache[T, K]) Get(ctx context.Context, db DB, key K) (T, error) {
var zero T
val, found := c.m.Load(key)
if found {
return val.(T), nil
}
flog := c.logger(key)
flog.Debug("Querying database")
result, err := c.getter(ctx, db, key)
if err != nil {
flog.Error("Failed to get: %v", err)
return zero, err
}
c.m.Store(key, result)
flog.Debug("Cached")
return result, nil
}
type FlatCache struct {
types *Cache[*models.InvType, int32]
groups *Cache[*models.InvGroup, int32]
categories *Cache[*models.InvCategory, int32]
marketGroups *Cache[*models.InvMarketGroup, int32]
systems *Cache[*models.MapSolarSystem, int32]
constellations *Cache[*models.MapConstellation, int32]
regions *Cache[*models.MapRegion, int32]
}
func getTypeFromDB(ctx context.Context, db DB, typeID int32) (*models.InvType, error) {
return db.GetType(ctx, typeID)
}
func getGroupFromDB(ctx context.Context, db DB, groupID int32) (*models.InvGroup, error) {
return db.GetGroup(ctx, groupID)
}
func getCategoryFromDB(ctx context.Context, db DB, categoryID int32) (*models.InvCategory, error) {
return db.GetCategory(ctx, categoryID)
}
func getMarketGroupFromDB(ctx context.Context, db DB, marketGroupID int32) (*models.InvMarketGroup, error) {
return db.GetMarketGroup(ctx, marketGroupID)
}
func getSolarSystemFromDB(ctx context.Context, db DB, systemID int32) (*models.MapSolarSystem, error) {
return db.GetSolarSystem(ctx, systemID)
}
func getConstellationFromDB(ctx context.Context, db DB, constellationID int32) (*models.MapConstellation, error) {
return db.GetConstellation(ctx, constellationID)
}
func getRegionFromDB(ctx context.Context, db DB, regionID int32) (*models.MapRegion, error) {
return db.GetRegion(ctx, regionID)
}
var globalFlatCache = &FlatCache{
types: NewCache(getTypeFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getType").WithPrefix(fmt.Sprintf("type_%d", key))
}),
groups: NewCache(getGroupFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getGroup").WithPrefix(fmt.Sprintf("group_%d", key))
}),
categories: NewCache(getCategoryFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getCategory").WithPrefix(fmt.Sprintf("category_%d", key))
}),
marketGroups: NewCache(getMarketGroupFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getMarketGroup").WithPrefix(fmt.Sprintf("marketgroup_%d", key))
}),
systems: NewCache(getSolarSystemFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getSolarSystem").WithPrefix(fmt.Sprintf("system_%d", key))
}),
constellations: NewCache(getConstellationFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getConstellation").WithPrefix(fmt.Sprintf("constellation_%d", key))
}),
regions: NewCache(getRegionFromDB, func(key int32) *logger.Logger {
return logger.Default.WithPrefix("getRegion").WithPrefix(fmt.Sprintf("region_%d", key))
}),
}
func FlattenKillmail(db DB, killmail Killmail) (*FlatKillmail, []FlatKillmailAttacker, []FlatKillmailItem, error) {
flog := logger.Default.WithPrefix("FlattenKillmail").WithPrefix(fmt.Sprintf("killmail_%d", killmail.KillmailID))
flat := &FlatKillmail{
KillmailID: killmail.KillmailID,
KillmailHash: killmail.KillmailHash,
KillmailTime: killmail.KillmailTime.Format("2006-01-02 15:04:05"),
HTTPLastModified: killmail.HTTPLastModified.Format("2006-01-02 15:04:05"),
AttackerCount: uint16(len(killmail.Attackers)),
}
g, ctx := errgroup.WithContext(context.Background())
g.Go(func() error {
start := time.Now()
err := flattenSolarSystem(ctx, db, int32(killmail.SolarSystemID), flat)
flog.Debug("flattenSolarSystem took %v", time.Since(start))
return err
})
g.Go(func() error {
start := time.Now()
err := flattenVictim(ctx, db, killmail.Victim, flat)
flog.Debug("flattenVictim took %v", time.Since(start))
return err
})
if err := g.Wait(); err != nil {
flog.Error("Failed to flatten killmail: %v", err)
return nil, nil, nil, err
}
flog.Debug("Flattening %d attackers", len(killmail.Attackers))
attackers := make([]FlatKillmailAttacker, len(killmail.Attackers))
g2, ctx2 := errgroup.WithContext(ctx)
for i, attacker := range killmail.Attackers {
i, attacker := i, attacker // capture loop variables
g2.Go(func() error {
attackerLog := flog.WithPrefix(fmt.Sprintf("attacker_%d", i))
flatAttacker, err := flattenAttacker(ctx2, db, killmail.KillmailID, attacker)
if err != nil {
attackerLog.Error("Failed to flatten attacker: %v", err)
return err
}
attackers[i] = *flatAttacker
return nil
})
}
if err := g2.Wait(); err != nil {
return nil, nil, nil, err
}
flog.Debug("Flattening %d items", len(killmail.Victim.Items))
items := make([]FlatKillmailItem, 0, len(killmail.Victim.Items))
for i, item := range killmail.Victim.Items {
itemLog := flog.WithPrefix(fmt.Sprintf("item_%d", i))
flatItem, err := flattenItemType(ctx, db, killmail.KillmailID, item)
if err != nil {
itemLog.Error("Failed to flatten item: %v", err)
return nil, nil, nil, err
}
items = append(items, *flatItem)
}
return flat, attackers, items, nil
}
func flattenSolarSystem(ctx context.Context, db DB, systemID int32, flat *FlatKillmail) error {
flog := logger.Default.WithPrefix("flattenSolarSystem").WithPrefix(fmt.Sprintf("system_%d", systemID))
flog.Debug("Fetching solar system")
system, err := globalFlatCache.systems.Get(ctx, db, systemID)
if err != nil {
return err
}
flat.SolarSystemID = system.SolarSystemID
flat.SolarSystemName = system.SolarSystemName
flat.Security = system.Security
flog.Debug("Fetching constellation %d", system.ConstellationID)
constellation, err := globalFlatCache.constellations.Get(ctx, db, system.ConstellationID)
if err != nil {
return err
}
flat.ConstellationName = constellation.ConstellationName
flog.Debug("Fetching region %d", constellation.RegionID)
region, err := globalFlatCache.regions.Get(ctx, db, constellation.RegionID)
if err != nil {
return err
}
flat.RegionName = region.RegionName
return nil
}
func flattenVictim(ctx context.Context, db DB, victim Victim, flat *FlatKillmail) error {
flog := logger.Default.WithPrefix("flattenVictim")
flog.Debug("Starting victim flattening")
flat.VictimCharacterID = victim.CharacterID
flat.VictimCorporationID = victim.CorporationID
if victim.AllianceID != 0 {
flat.VictimAllianceID = &victim.AllianceID
}
flat.VictimShipTypeID = int32(victim.ShipTypeID)
flat.VictimDamageTaken = victim.DamageTaken
g, ctx := errgroup.WithContext(ctx)
if victim.CharacterID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching character name for ID %d", victim.CharacterID)
name, err := getCharacterName(victim.CharacterID)
if err != nil {
flog.Debug("Character name fetch failed: %v", err)
}
flat.VictimCharacterName = name
if name != "" {
flog.Debug("Got character name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Character name empty (took %v)", time.Since(start))
}
return nil
})
}
if victim.CorporationID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching corporation name for ID %d", victim.CorporationID)
name, err := getCorporationName(victim.CorporationID)
if err != nil {
flog.Debug("Corporation name fetch failed: %v", err)
}
flat.VictimCorporationName = name
if name != "" {
flog.Debug("Got corporation name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Corporation name empty (took %v)", time.Since(start))
}
return nil
})
}
if victim.AllianceID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching alliance name for ID %d", victim.AllianceID)
name, err := getAllianceName(victim.AllianceID)
if err != nil {
flog.Debug("Alliance name fetch failed: %v", err)
}
flat.VictimAllianceName = name
if name != "" {
flog.Debug("Got alliance name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Alliance name empty (took %v)", time.Since(start))
}
return nil
})
}
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching ship type name for ID %d", victim.ShipTypeID)
typeName, err := flattenTypeName(ctx, db, int32(victim.ShipTypeID))
if err != nil {
return err
}
flat.VictimShipTypeName = typeName
flog.Debug("Got ship type name: %s (took %v)", typeName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching ship group name for type ID %d", victim.ShipTypeID)
groupName, err := flattenGroupName(ctx, db, int32(victim.ShipTypeID))
if err != nil {
return err
}
flat.VictimShipGroupName = groupName
flog.Debug("Got ship group name: %s (took %v)", groupName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching ship category name for type ID %d", victim.ShipTypeID)
categoryName, err := flattenCategoryName(ctx, db, int32(victim.ShipTypeID))
if err != nil {
return err
}
flat.VictimShipCategoryName = categoryName
flog.Debug("Got ship category name: %s (took %v)", categoryName, time.Since(start))
return nil
})
if err := g.Wait(); err != nil {
flog.Error("Failed to flatten victim: %v", err)
return err
}
return nil
}
func flattenAttacker(ctx context.Context, db DB, killmailID int64, attacker Attacker) (*FlatKillmailAttacker, error) {
flog := logger.Default.WithPrefix("flattenAttacker").WithPrefix(fmt.Sprintf("character_%d", attacker.CharacterID))
flog.Debug("Starting attacker flattening")
flat := &FlatKillmailAttacker{
KillmailID: killmailID,
CharacterID: attacker.CharacterID,
CorporationID: attacker.CorporationID,
ShipTypeID: int32(attacker.ShipTypeID),
WeaponTypeID: int32(attacker.WeaponTypeID),
DamageDone: attacker.DamageDone,
FinalBlow: attacker.FinalBlow,
SecurityStatus: float32(attacker.SecurityStatus),
}
if attacker.AllianceID != 0 {
flat.AllianceID = &attacker.AllianceID
}
g, ctx := errgroup.WithContext(ctx)
if attacker.CharacterID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching character name")
name, err := getCharacterName(attacker.CharacterID)
if err != nil {
flog.Debug("Character name fetch failed: %v", err)
}
flat.CharacterName = name
if name != "" {
flog.Debug("Got character name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Character name empty (took %v)", time.Since(start))
}
return nil
})
}
if attacker.CorporationID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching corporation name")
name, err := getCorporationName(attacker.CorporationID)
if err != nil {
flog.Debug("Corporation name fetch failed: %v", err)
}
flat.CorporationName = name
if name != "" {
flog.Debug("Got corporation name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Corporation name empty (took %v)", time.Since(start))
}
return nil
})
}
if attacker.AllianceID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching alliance name")
name, err := getAllianceName(attacker.AllianceID)
if err != nil {
flog.Debug("Alliance name fetch failed: %v", err)
}
flat.AllianceName = name
if name != "" {
flog.Debug("Got alliance name: %s (took %v)", name, time.Since(start))
} else {
flog.Debug("Alliance name empty (took %v)", time.Since(start))
}
return nil
})
}
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching ship type name for ID %d", attacker.ShipTypeID)
typeName, err := flattenTypeName(ctx, db, int32(attacker.ShipTypeID))
if err != nil {
return err
}
flat.ShipTypeName = typeName
flog.Debug("Got ship type name: %s (took %v)", typeName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching ship group name for type ID %d", attacker.ShipTypeID)
groupName, err := flattenGroupName(ctx, db, int32(attacker.ShipTypeID))
if err != nil {
return err
}
flat.ShipGroupName = groupName
flog.Debug("Got ship group name: %s (took %v)", groupName, time.Since(start))
return nil
})
if attacker.WeaponTypeID != 0 {
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching weapon type name for ID %d", attacker.WeaponTypeID)
typeName, err := flattenTypeName(ctx, db, int32(attacker.WeaponTypeID))
if err != nil {
return err
}
flat.WeaponTypeName = typeName
flog.Debug("Got weapon type name: %s (took %v)", typeName, time.Since(start))
return nil
})
}
if err := g.Wait(); err != nil {
flog.Error("Failed to flatten attacker: %v", err)
return nil, err
}
return flat, nil
}
func flattenItemType(ctx context.Context, db DB, killmailID int64, item Item) (*FlatKillmailItem, error) {
flog := logger.Default.WithPrefix("flattenItemType").WithPrefix(fmt.Sprintf("item_%d", item.ItemTypeID))
flog.Debug("Starting item flattening")
flat := &FlatKillmailItem{
KillmailID: killmailID,
ItemTypeID: int32(item.ItemTypeID),
Flag: int32(item.Flag),
QuantityDestroyed: derefInt64(item.QuantityDestroyed),
QuantityDropped: derefInt64(item.QuantityDropped),
Singleton: int32(item.Singleton),
}
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching item type name")
typeName, err := flattenTypeName(ctx, db, int32(item.ItemTypeID))
if err != nil {
return err
}
flat.ItemTypeName = typeName
flog.Debug("Got item type name: %s (took %v)", typeName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching item group name")
groupName, err := flattenGroupName(ctx, db, int32(item.ItemTypeID))
if err != nil {
return err
}
flat.ItemGroupName = groupName
flog.Debug("Got item group name: %s (took %v)", groupName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching item category name")
categoryName, err := flattenCategoryName(ctx, db, int32(item.ItemTypeID))
if err != nil {
return err
}
flat.ItemCategoryName = categoryName
flog.Debug("Got item category name: %s (took %v)", categoryName, time.Since(start))
return nil
})
g.Go(func() error {
start := time.Now()
flog.Debug("Fetching item market group name")
marketGroupName, err := flattenMarketGroupName(ctx, db, int32(item.ItemTypeID))
if err != nil {
return err
}
flat.ItemMarketGroupName = marketGroupName
flog.Debug("Got item market group name: %s (took %v)", marketGroupName, time.Since(start))
return nil
})
if err := g.Wait(); err != nil {
flog.Error("Failed to flatten item: %v", err)
return nil, err
}
return flat, nil
}
func flattenTypeName(ctx context.Context, db DB, typeID int32) (string, error) {
flog := logger.Default.WithPrefix("flattenTypeName").WithPrefix(fmt.Sprintf("type_%d", typeID))
flog.Debug("Fetching type name")
itemType, err := globalFlatCache.types.Get(ctx, db, typeID)
if err != nil {
return "", err
}
flog.Debug("Got type name: %s", itemType.TypeName)
return itemType.TypeName, nil
}
func flattenGroupName(ctx context.Context, db DB, typeID int32) (string, error) {
flog := logger.Default.WithPrefix("flattenGroupName").WithPrefix(fmt.Sprintf("type_%d", typeID))
flog.Debug("Fetching group name")
itemType, err := globalFlatCache.types.Get(ctx, db, typeID)
if err != nil {
return "", err
}
group, err := globalFlatCache.groups.Get(ctx, db, itemType.GroupID)
if err != nil {
return "", err
}
flog.Debug("Got group name: %s", group.GroupName)
return group.GroupName, nil
}
func flattenCategoryName(ctx context.Context, db DB, typeID int32) (string, error) {
flog := logger.Default.WithPrefix("flattenCategoryName").WithPrefix(fmt.Sprintf("type_%d", typeID))
flog.Debug("Fetching category name")
itemType, err := globalFlatCache.types.Get(ctx, db, typeID)
if err != nil {
return "", err
}
group, err := globalFlatCache.groups.Get(ctx, db, itemType.GroupID)
if err != nil {
return "", err
}
category, err := globalFlatCache.categories.Get(ctx, db, group.CategoryID)
if err != nil {
return "", err
}
flog.Debug("Got category name: %s", category.CategoryName)
return category.CategoryName, nil
}
func flattenMarketGroupName(ctx context.Context, db DB, typeID int32) (string, error) {
flog := logger.Default.WithPrefix("flattenMarketGroupName").WithPrefix(fmt.Sprintf("type_%d", typeID))
flog.Debug("Fetching market group name")
itemType, err := globalFlatCache.types.Get(ctx, db, typeID)
if err != nil {
return "", err
}
if itemType.MarketGroupID == 0 {
flog.Debug("Type has no market group")
return "", nil
}
marketGroup, err := globalFlatCache.marketGroups.Get(ctx, db, itemType.MarketGroupID)
if err != nil {
return "", err
}
flog.Debug("Got market group name: %s", marketGroup.MarketGroupName)
return marketGroup.MarketGroupName, nil
}
type namedEntity interface {
GetName() string
}
func (c Character) GetName() string { return c.Name }
func (c Corporation) GetName() string { return c.Name }
func (a Alliance) GetName() string { return a.Name }
var (
flattenerConsumer *nsq.Consumer
flattenerMaxInFlight int
flattenerPaused bool
flattenerPauseMu sync.Mutex
// Memoized ESI name lookup functions
getCharacterNameMemo func(int64) (string, error)
getCorporationNameMemo func(int64) (string, error)
getAllianceNameMemo func(int64) (string, error)
)
func init() {
// Use MemoizedBloom to only cache names that appear multiple times
// Capacity: 100k unique IDs, false positive rate: 1%
getCharacterNameMemo = utils.MemoizedBloom(getCharacterNameImpl, 100000, 0.01).(func(int64) (string, error))
getCorporationNameMemo = utils.MemoizedBloom(getCorporationNameImpl, 100000, 0.01).(func(int64) (string, error))
getAllianceNameMemo = utils.MemoizedBloom(getAllianceNameImpl, 50000, 0.01).(func(int64) (string, error))
}
func pauseConsumer() {
flattenerPauseMu.Lock()
defer flattenerPauseMu.Unlock()
// If already paused, do nothing
if flattenerPaused {
return
}
// Pause the consumer
if flattenerConsumer != nil {
flattenerConsumer.ChangeMaxInFlight(0)
flattenerPaused = true
logger.Default.Info("Paused NSQ consumer due to rate limit for 15 minutes")
// Resume after 15 minutes
go func() {
time.Sleep(15 * time.Minute)
flattenerPauseMu.Lock()
defer flattenerPauseMu.Unlock()
if flattenerConsumer != nil {
flattenerConsumer.ChangeMaxInFlight(flattenerMaxInFlight)
flattenerPaused = false
logger.Default.Info("Resumed NSQ consumer after rate limit pause")
}
}()
}
}
func getName[T namedEntity](entityType, cachePrefix string, entityID int64) (string, error) {
flog := logger.Default.WithPrefix(fmt.Sprintf("get%sName", entityType)).WithPrefix(fmt.Sprintf("%s_%d", cachePrefix, entityID))
esiURL := fmt.Sprintf("https://esi.evetech.net/%s/%d", cachePrefix, entityID)
proxyURL := fmt.Sprintf("https://proxy.site.quack-lab.dev?url=%s", esiURL)
flog.Debug("Fetching %s name from ESI", entityType)
flog.Debug("ESI URL: %s", esiURL)
flog.Debug("Proxy URL: %s", proxyURL)
resp, err := http.Get(proxyURL)
if err != nil {
flog.Debug("%s request failed: %v", entityType, err)
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
body, err := io.ReadAll(resp.Body)
if err != nil {
flog.Debug("Failed to read response body: %v", err)
return "", err
}
var entity T
if err := json.Unmarshal(body, &entity); err != nil {
flog.Debug("Failed to unmarshal response: %v", err)
return "", err
}
flog.Debug("Successfully got %s name: %s", entityType, entity.GetName())
return entity.GetName(), nil
}
if resp.StatusCode == http.StatusNotFound {
io.Copy(io.Discard, resp.Body)
flog.Debug("%s not found (404)", entityType)
return "", nil
}
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == 429 || resp.StatusCode >= 500 {
io.Copy(io.Discard, resp.Body)
flog.Debug("Rate limited or server error (status %d), pausing consumer", resp.StatusCode)
pauseConsumer()
return "", nil
}
io.Copy(io.Discard, resp.Body)
flog.Debug("%s request failed with status %d", entityType, resp.StatusCode)
return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
func getCharacterName(characterID int64) (string, error) {
return getCharacterNameMemo(characterID)
}
func getCorporationName(corporationID int64) (string, error) {
return getCorporationNameMemo(corporationID)
}
func getAllianceName(allianceID int64) (string, error) {
return getAllianceNameMemo(allianceID)
}
func getCharacterNameImpl(characterID int64) (string, error) {
return getName[Character]("Character", "character", characterID)
}
func getCorporationNameImpl(corporationID int64) (string, error) {
return getName[Corporation]("Corporation", "corporation", corporationID)
}
func getAllianceNameImpl(allianceID int64) (string, error) {
return getName[Alliance]("Alliance", "alliance", allianceID)
}

View File

@@ -1,121 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
logger "git.site.quack-lab.dev/dave/cylogger"
utils "git.site.quack-lab.dev/dave/cyutils"
"github.com/nsqio/go-nsq"
)
func runFileReaderStage() {
logger.Info("Starting file reader stage")
killmailFiles, err := os.ReadDir("data")
if err != nil {
logger.Error("Failed to read data directory: %v", err)
return
}
var filesToProcess []string
for _, file := range killmailFiles {
if strings.HasSuffix(file.Name(), ".bz2") {
filesToProcess = append(filesToProcess, filepath.Join("data", file.Name()))
}
}
logger.Info("Found %d files to process", len(filesToProcess))
ctx := context.Background()
utils.WithWorkers(stage1Workers, filesToProcess, func(worker int, index int, filePath string) {
if ctx.Err() != nil {
return
}
fileLog := logger.Default.
WithPrefix(fmt.Sprintf("worker %d", worker)).
WithPrefix(fmt.Sprintf("file %d of %d", index+1, len(filesToProcess))).
WithPrefix(filepath.Base(filePath))
fileLog.Info("Processing file")
config := nsq.NewConfig()
config.WriteTimeout = 10 * time.Second
config.DialTimeout = 5 * time.Second
producer, err := nsq.NewProducer(fmt.Sprintf("%s:%d", nsqHost, nsqPort), config)
if err != nil {
fileLog.Error("Error creating producer: %v", err)
return
}
defer producer.Stop()
handler := &FileReaderHandler{
producer: producer,
workerID: worker,
}
err = handler.processFile(ctx, filePath)
if err != nil {
fileLog.Error("Failed to process file: %v", err)
return
}
fileLog.Info("Finished processing file")
})
logger.Info("File reader stage completed")
}
type FileReaderHandler struct {
producer *nsq.Producer
workerID int
}
func (h *FileReaderHandler) processFile(ctx context.Context, filePath string) error {
messagelog := logger.Default.WithPrefix(fmt.Sprintf("worker_%d", h.workerID)).WithPrefix(filepath.Base(filePath))
messagelog.Info("Loading killmails from %s", filePath)
killmails, err := LoadBz2Killmails(filePath)
if err != nil {
messagelog.Error("Failed to load killmails: %v", err)
return err
}
messagelog.Info("Loaded %d killmails, publishing to NSQ", len(killmails))
published := 0
for i, killmail := range killmails {
if ctx.Err() != nil {
return ctx.Err()
}
killmailBytes, err := json.Marshal(killmail)
if err != nil {
messagelog.Error("Failed to marshal killmail: %v", err)
continue
}
for {
err = h.producer.Publish("killmail-queue", killmailBytes)
if err == nil {
break
}
messagelog.Error("Failed to publish killmail, retrying: %v", err)
time.Sleep(1 * time.Second)
}
published++
if published%1000 == 0 {
messagelog.Info("Published %d killmails (%d/%d)", published, i+1, len(killmails))
}
}
messagelog.Info("Published %d killmails", published)
return nil
}

View File

@@ -1,142 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"sync/atomic"
"time"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/nsqio/go-nsq"
)
var flattenerProcessedCount int64
func runFlattenerStage() {
logger.Info("Starting flattener stage")
db, err := GetDB()
if err != nil {
logger.Error("Failed to get database: %v", err)
return
}
config := nsq.NewConfig()
config.MaxAttempts = 0
config.MaxInFlight = stage2Workers
config.MsgTimeout = 300 * time.Second
consumer, err := nsq.NewConsumer("killmail-queue", "flattener", config)
if err != nil {
logger.Error("Error creating consumer: %v", err)
return
}
// Store consumer reference and MaxInFlight for rate limit pausing
flattenerConsumer = consumer
flattenerMaxInFlight = stage2Workers
producer, err := nsq.NewProducer(fmt.Sprintf("%s:%d", nsqHost, nsqPort), nsq.NewConfig())
if err != nil {
logger.Error("Error creating producer: %v", err)
return
}
defer producer.Stop()
// Ping to establish connection
if err := producer.Ping(); err != nil {
logger.Error("Failed to ping NSQ producer: %v", err)
return
}
for i := 0; i < stage2Workers; i++ {
handler := &FlattenerHandler{
db: db,
producer: producer,
workerID: i,
}
consumer.AddHandler(handler)
}
err = consumer.ConnectToNSQD(fmt.Sprintf("%s:%d", nsqHost, nsqPort))
if err != nil {
logger.Error("Error connecting to NSQ: %v", err)
return
}
logger.Info("Connected to NSQ at %s:%d", nsqHost, nsqPort)
select {} // Block forever, terminate immediately on Ctrl-C
}
type FlattenerHandler struct {
db DB
producer *nsq.Producer
workerID int
}
func (h *FlattenerHandler) HandleMessage(message *nsq.Message) error {
messagelog := logger.Default.WithPrefix(fmt.Sprintf("worker_%d", h.workerID)).WithPrefix(fmt.Sprintf("attempts=%d", message.Attempts))
var killmail Killmail
err := json.Unmarshal(message.Body, &killmail)
if err != nil {
messagelog.Error("Error unmarshalling killmail: %v", err)
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
message.Touch()
case <-ctx.Done():
return
}
}
}()
messagelog = messagelog.WithPrefix(fmt.Sprintf("killmail_%d", killmail.KillmailID))
flatKillmail, flatAttackers, flatItems, err := FlattenKillmail(h.db, killmail)
if err != nil {
messagelog.Error("Failed to flatten killmail: %v", err)
return err
}
flatMsg := FlatKillmailMessage{
Killmail: flatKillmail,
Attackers: flatAttackers,
Items: flatItems,
}
flatMsgBytes, err := json.Marshal(flatMsg)
if err != nil {
messagelog.Error("Failed to marshal flattened killmail: %v", err)
return err
}
for {
err = h.producer.Publish("flat-killmail-queue", flatMsgBytes)
if err == nil {
break
}
messagelog.Error("Failed to publish flattened killmail, retrying: %v", err)
time.Sleep(1 * time.Second)
}
count := atomic.AddInt64(&flattenerProcessedCount, 1)
if count%1000 == 0 {
logger.Info("Processed %d killmails", count)
}
message.Finish()
return nil
}

View File

@@ -1,106 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"sync/atomic"
"time"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/nsqio/go-nsq"
)
var inserterProcessedCount int64
func runInserterStage() {
logger.Info("Starting inserter stage")
db, err := GetDB()
if err != nil {
logger.Error("Failed to get database: %v", err)
return
}
config := nsq.NewConfig()
config.MaxAttempts = 0
config.MaxInFlight = stage3Workers
config.MsgTimeout = 300 * time.Second
consumer, err := nsq.NewConsumer("flat-killmail-queue", "inserter", config)
if err != nil {
logger.Error("Error creating consumer: %v", err)
return
}
for i := 0; i < stage3Workers; i++ {
handler := &InserterHandler{
db: db,
workerID: i,
}
consumer.AddHandler(handler)
}
err = consumer.ConnectToNSQD(fmt.Sprintf("%s:%d", nsqHost, nsqPort))
if err != nil {
logger.Error("Error connecting to NSQ: %v", err)
return
}
logger.Info("Connected to NSQ at %s:%d", nsqHost, nsqPort)
select {} // Block forever, terminate immediately on Ctrl-C
}
type InserterHandler struct {
db DB
workerID int
}
func (h *InserterHandler) HandleMessage(message *nsq.Message) error {
messagelog := logger.Default.WithPrefix(fmt.Sprintf("worker_%d", h.workerID)).WithPrefix(fmt.Sprintf("attempts=%d", message.Attempts))
var flatMsg FlatKillmailMessage
err := json.Unmarshal(message.Body, &flatMsg)
if err != nil {
messagelog.Error("Error unmarshalling flattened killmail: %v", err)
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
message.Touch()
case <-ctx.Done():
return
}
}
}()
messagelog = messagelog.WithPrefix(fmt.Sprintf("killmail_%d", flatMsg.Killmail.KillmailID))
err = h.db.SaveFlatKillmails(
[]*FlatKillmail{flatMsg.Killmail},
flatMsg.Attackers,
flatMsg.Items,
)
if err != nil {
messagelog.Error("Failed to save killmail: %v", err)
return err
}
count := atomic.AddInt64(&inserterProcessedCount, 1)
if count%1000 == 0 {
logger.Info("Inserted %d killmails", count)
}
message.Finish()
return nil
}

View File

@@ -1,61 +0,0 @@
package main
import (
"context"
"testing"
)
func TestKillmailIDsEndpoint(t *testing.T) {
db, err := GetDB()
if err != nil {
t.Fatalf("Failed to get DB: %v", err)
}
ctx := context.Background()
filters := AnalyticsFilters{
VictimShipTypeID: []int32{24692},
HasModule: &ModuleFilter{
ModuleID: 26914,
},
}
results, err := db.QueryKillmailIDs(ctx, filters, 100, 0)
if err != nil {
t.Fatalf("QueryKillmailIDs failed: %v", err)
}
if len(results) == 0 {
t.Fatalf("Expected at least one killmail ID, got 0")
}
t.Logf("Successfully retrieved %d killmail IDs", len(results))
t.Logf("First killmail ID: %d", results[0])
}
func TestModuleFilterExcludesCargo(t *testing.T) {
db, err := GetDB()
if err != nil {
t.Fatalf("Failed to get DB: %v", err)
}
ctx := context.Background()
filters := AnalyticsFilters{
HasModule: &ModuleFilter{
ModuleID: 26914,
},
}
results, err := db.QueryKillmailIDs(ctx, filters, 10000, 0)
if err != nil {
t.Fatalf("QueryKillmailIDs failed: %v", err)
}
// Check that killmail 126799160 is NOT in the results (module 26914 is in cargo, not fitted)
for _, id := range results {
if id == 126799160 {
t.Fatalf("Killmail 126799160 should NOT be returned because module 26914 is in cargo, not fitted. Got %d killmails total.", len(results))
}
}
t.Logf("Successfully verified that killmail 126799160 is NOT in results (module in cargo). Returned %d killmails.", len(results))
}

139
lnsq/nsq.go Normal file
View File

@@ -0,0 +1,139 @@
package lnsq
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"zkillsusser/config"
"github.com/nsqio/go-nsq"
)
// Topics
const (
TopicFlatKillmailQueue = "flat-killmail-queue"
TopicMissingCharacterNameQueue = "missing-character-name-queue"
TopicMissingTypeNameQueue = "missing-type-name-queue"
TopicMissingSolarSystemNameQueue = "missing-solar-system-name-queue"
TopicMissingCorporationNameQueue = "missing-corporation-name-queue"
TopicMissingAllianceNameQueue = "missing-alliance-name-queue"
)
// Channels
const (
ChannelClickhouseWriter = "clickhouse-writer"
ChannelItemTypeResolver = "item-type-resolver"
ChannelSolarSystemResolver = "solar-system-resolver"
ChannelCharacterResolver = "character-resolver"
ChannelCorporationResolver = "corporation-resolver"
ChannelAllianceResolver = "alliance-resolver"
)
const (
BaseConsumers = 2
)
func ConnectToNSQ(topic, channel string, handler nsq.Handler) (*nsq.Consumer, error) {
nsqConfig := nsq.NewConfig()
nsqConfig.MaxInFlight = 1
nsqConfig.MsgTimeout = 2 * time.Second
nsqConfig.MaxAttempts = 5
nsqConfig.BackoffMultiplier = 60 * time.Second // We want to wait for a little while
consumer, err := nsq.NewConsumer(topic, channel, nsqConfig)
if err != nil {
return nil, fmt.Errorf("failed to create NSQ consumer: %w", err)
}
consumer.AddHandler(handler)
nsqdAddr := fmt.Sprintf("%s:%d", config.NSQHost, config.NSQPort)
err = consumer.ConnectToNSQD(nsqdAddr)
if err != nil {
consumer.Stop()
return nil, fmt.Errorf("failed to connect to NSQD: %w", err)
}
timeout := 10 * time.Second
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
stats := consumer.Stats()
if stats.Connections > 0 {
return consumer, nil
}
time.Sleep(100 * time.Millisecond)
}
consumer.Stop()
return nil, fmt.Errorf("timeout waiting for NSQ connection after %v", timeout)
}
func NewProducer() (*nsq.Producer, error) {
nsqConfig := nsq.NewConfig()
nsqConfig.WriteTimeout = 10 * time.Second
nsqConfig.DialTimeout = 5 * time.Second
producer, err := nsq.NewProducer(fmt.Sprintf("%s:%d", config.NSQHost, config.NSQPort), nsqConfig)
if err != nil {
return nil, fmt.Errorf("failed to create NSQ producer: %w", err)
}
return producer, nil
}
type ChannelStats struct {
ChannelName string `json:"channel_name"`
Depth int64 `json:"depth"`
InFlight int64 `json:"in_flight_count"`
}
type TopicStats struct {
TopicName string `json:"topic_name"`
MessageCount int64 `json:"message_count"`
Depth int64 `json:"depth"`
Channels []ChannelStats `json:"channels"`
}
type StatsResponse struct {
Topics []TopicStats `json:"topics"`
}
func GetTopicDepth(topic string) (int64, error) {
statsURL := fmt.Sprintf("http://%s:%d/stats?format=json&topic=%s", config.NSQHost, config.NSQPort+1, topic)
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get(statsURL)
if err != nil {
return 0, fmt.Errorf("failed to get topic stats: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return 0, fmt.Errorf("failed to get topic stats: status %d, body: %s", resp.StatusCode, string(body))
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return 0, fmt.Errorf("failed to read stats response: %w", err)
}
var statsResp StatsResponse
if err := json.Unmarshal(body, &statsResp); err != nil {
return 0, fmt.Errorf("failed to unmarshal stats: %w, body: %s", err, string(body))
}
for _, t := range statsResp.Topics {
if t.TopicName == topic {
totalDepth := t.Depth
for _, ch := range t.Channels {
totalDepth += ch.Depth + ch.InFlight
}
return totalDepth, nil
}
}
return 0, nil
}

35
main.go
View File

@@ -1,47 +1,24 @@
package main
import (
"zkillsusser/api"
"zkillsusser/config"
logger "git.site.quack-lab.dev/dave/cylogger"
)
type FlatKillmailMessage struct {
Killmail *FlatKillmail `json:"killmail"`
Attackers []FlatKillmailAttacker `json:"attackers"`
Items []FlatKillmailItem `json:"items"`
}
func main() {
if err := initConfig(); err != nil {
if err := config.InitConfig(); err != nil {
logger.Error("Failed to initialize config: %v", err)
return
}
logger.InitFlag()
// logger.Default = logger.Default.ToFile("zkill.log")
logger.Info("Starting")
if serverMode {
StartAPIServer(serverPort)
if config.ServerMode {
api.StartAPIServer(config.ServerPort)
return
}
if stage != "" {
runStage(stage)
return
}
logger.Error("No action specified. Set SERVER=true or STAGE=file-reader|flattener|inserter")
}
func runStage(stage string) {
switch stage {
case "file-reader":
runFileReaderStage()
case "flattener":
runFlattenerStage()
case "inserter":
runInserterStage()
default:
logger.Error("Unknown stage: %s. Use: file-reader, flattener, inserter", stage)
}
}

View File

@@ -20,42 +20,44 @@ procs:
CLICKHOUSE_PASSWORD: ""
stop:
send-keys: ["<C-c>"]
frontend:
shell: "bun dev"
cwd: "frontend"
autostart: false
stop:
send-keys: ["<C-c>"]
dev-compose:
shell: "docker compose -f docker-compose-dev.yml up"
autostart: true
stop:
send-keys: ["<C-c>"]
ingest-stage-file-reader:
esi-killmail-disk-reader:
shell: "go run ."
env:
STAGE: "file-reader"
cwd: "pipeline/esi-killmail-disk-reader"
autostart: false
stop:
send-keys: ["<C-c>"]
ingest-stage-flattener:
nsq-to-clickhouse-reader:
shell: "go run ."
cwd: "pipeline/nsq-to-clickhouse-reader"
env:
STAGE: "flattener"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-1:
reader-item-type:
shell: "go run ."
cwd: "pipeline/reader-item-type"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -63,12 +65,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-2:
resolver-item-type:
shell: "go run ."
cwd: "pipeline/resolver-item-type"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -76,12 +77,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-3:
resolver-solar-system-name:
shell: "go run ."
cwd: "pipeline/resolver-solar-system-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -89,12 +89,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-4:
resolver-character-name:
shell: "go run ."
cwd: "pipeline/resolver-character-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -102,12 +101,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-5:
resolver-corporation-name:
shell: "go run ."
cwd: "pipeline/resolver-corporation-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -115,12 +113,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-6:
resolver-alliance-name:
shell: "go run ."
cwd: "pipeline/resolver-alliance-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -128,12 +125,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-7:
reader-attacker-character-name:
shell: "go run ."
cwd: "pipeline/reader-attacker-character-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -141,12 +137,11 @@ procs:
stop:
send-keys: ["<C-c>"]
ingest-stage-inserter-8:
reader-victim-character-name:
shell: "go run ."
cwd: "pipeline/reader-victim-character-name"
env:
STAGE: "inserter"
CLICKHOUSE_HOST: "clickhouse-zkill.site.quack-lab.dev"
CLICKHOUSE_PORT: "80"
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
@@ -154,20 +149,62 @@ procs:
stop:
send-keys: ["<C-c>"]
go-tidy:
shell: "go mod tidy"
reader-solar-system-id:
shell: "go run ."
cwd: "pipeline/reader-solar-system-id"
env:
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]
gorm-gentool-install:
shell: "go install gorm.io/gen/tools/gentool@latest"
reader-ship-type-id:
shell: "go run ."
cwd: "pipeline/reader-ship-type-id"
env:
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]
gorm-gentool-generate:
shell: 'gentool -db sqlite -dsn "sqlite-latest.sqlite" -outPath types -modelPkgName models -onlyModel'
reader-alliance-id:
shell: "go run ."
cwd: "pipeline/reader-alliance-id"
env:
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]
reader-corporation-id:
shell: "go run ."
cwd: "pipeline/reader-corporation-id"
env:
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]
reader-weapon-type-id:
shell: "go run ."
cwd: "pipeline/reader-weapon-type-id"
env:
CLICKHOUSE_HOST: "localhost:8123"
CLICKHOUSE_DATABASE: "zkill"
CLICKHOUSE_USERNAME: "default"
CLICKHOUSE_PASSWORD: ""
autostart: false
stop:
send-keys: ["<C-c>"]

View File

@@ -0,0 +1,116 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
bz2reader "zkillsusser/bz2-reader"
"zkillsusser/config"
"zkillsusser/lnsq"
"zkillsusser/types"
logger "git.site.quack-lab.dev/dave/cylogger"
utils "git.site.quack-lab.dev/dave/cyutils"
)
const (
Workers = 4
DataDir = "data"
)
func main() {
if err := config.InitConfig(); err != nil {
logger.Error("Failed to initialize config: %v", err)
return
}
logger.InitFlag()
logger.Info("Starting stage 1: reading from disk, flattening, and writing to NSQ")
killmailFiles, err := os.ReadDir(DataDir)
if err != nil {
logger.Error("Failed to read data directory: %v", err)
return
}
var filesToProcess []string
for _, file := range killmailFiles {
if strings.HasSuffix(file.Name(), ".bz2") {
filesToProcess = append(filesToProcess, filepath.Join(DataDir, file.Name()))
}
}
logger.Info("Found %d files to process", len(filesToProcess))
ctx := context.Background()
utils.WithWorkers(Workers, filesToProcess, func(worker int, index int, filePath string) {
if ctx.Err() != nil {
return
}
fileLog := logger.Default.
WithPrefix(fmt.Sprintf("worker %d", worker)).
WithPrefix(fmt.Sprintf("file %d of %d", index+1, len(filesToProcess))).
WithPrefix(filepath.Base(filePath))
fileLog.Info("Processing file")
producer, err := lnsq.NewProducer()
if err != nil {
fileLog.Error("Error creating producer: %v", err)
return
}
defer producer.Stop()
killmailChan := make(chan types.Killmail)
go func() {
if err := bz2reader.IterBz2Killmails(filePath, killmailChan); err != nil {
fileLog.Error("Failed to iterate killmails: %v", err)
}
}()
published := 0
for killmail := range killmailChan {
if ctx.Err() != nil {
return
}
flatKillmail, flatAttackers, flatItems := killmail.Flatten()
flatMessage := types.NSQKillmail{
Killmail: flatKillmail,
Attackers: flatAttackers,
Items: flatItems,
}
flatBytes, err := json.Marshal(flatMessage)
if err != nil {
fileLog.Error("Failed to marshal flat killmail: %v", err)
continue
}
for {
err = producer.Publish("flat-killmail-queue", flatBytes)
if err == nil {
break
}
fileLog.Error("Failed to publish flat killmail, retrying: %v", err)
time.Sleep(1 * time.Second)
}
published++
if published%1000 == 0 {
fileLog.Info("Published %d flat killmails", published)
}
}
fileLog.Info("Finished processing file, published %d flat killmails", published)
})
logger.Info("Stage 1 completed")
}

View File

@@ -0,0 +1,196 @@
package main
import (
"context"
"encoding/json"
"sync"
"time"
"zkillsusser/clickhouse"
"zkillsusser/config"
"zkillsusser/types"
"zkillsusser/lnsq"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/nsqio/go-nsq"
)
const (
BatchSize = 1000
FlushInterval = 5 * time.Second
MaxIdleTime = 30 * time.Second
Topic = lnsq.TopicFlatKillmailQueue
Channel = lnsq.ChannelClickhouseWriter
)
type BatchProcessor struct {
client *clickhouse.ClickhouseClient
killmails []*types.FlatKillmail
attackers []types.FlatKillmailAttacker
items []types.FlatKillmailItem
mutex sync.Mutex
lastFlush time.Time
idleTimer *time.Timer
shutdownChan chan struct{}
wg sync.WaitGroup
}
func NewBatchProcessor(client *clickhouse.ClickhouseClient) *BatchProcessor {
return &BatchProcessor{
client: client,
killmails: make([]*types.FlatKillmail, 0, BatchSize),
attackers: make([]types.FlatKillmailAttacker, 0, BatchSize*10),
items: make([]types.FlatKillmailItem, 0, BatchSize*20),
lastFlush: time.Now(),
shutdownChan: make(chan struct{}),
}
}
func (bp *BatchProcessor) AddMessage(message *types.NSQKillmail) {
bp.mutex.Lock()
defer bp.mutex.Unlock()
bp.killmails = append(bp.killmails, message.Killmail)
bp.attackers = append(bp.attackers, message.Attackers...)
bp.items = append(bp.items, message.Items...)
if bp.idleTimer != nil {
bp.idleTimer.Stop()
}
if len(bp.killmails) >= BatchSize {
bp.flushUnsafe()
} else {
bp.idleTimer = time.AfterFunc(MaxIdleTime, func() {
bp.mutex.Lock()
if len(bp.killmails) > 0 {
bp.flushUnsafe()
}
bp.mutex.Unlock()
})
}
}
func (bp *BatchProcessor) flushUnsafe() {
if len(bp.killmails) == 0 {
return
}
start := time.Now()
err := bp.client.SaveFlatKillmails(bp.killmails, bp.attackers, bp.items)
duration := time.Since(start)
if err != nil {
logger.Error("Failed to save batch of %d killmails: %v", len(bp.killmails), err)
} else {
logger.Info("Successfully saved batch of %d killmails (%d attackers, %d items) in %v",
len(bp.killmails), len(bp.attackers), len(bp.items), duration)
}
bp.killmails = bp.killmails[:0]
bp.attackers = bp.attackers[:0]
bp.items = bp.items[:0]
bp.lastFlush = time.Now()
}
func (bp *BatchProcessor) Start() {
bp.wg.Add(1)
go func() {
defer bp.wg.Done()
ticker := time.NewTicker(FlushInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
bp.mutex.Lock()
if time.Since(bp.lastFlush) >= FlushInterval && len(bp.killmails) > 0 {
bp.flushUnsafe()
}
bp.mutex.Unlock()
case <-bp.shutdownChan:
bp.mutex.Lock()
bp.flushUnsafe()
bp.mutex.Unlock()
return
}
}
}()
}
func (bp *BatchProcessor) Stop() {
close(bp.shutdownChan)
if bp.idleTimer != nil {
bp.idleTimer.Stop()
}
bp.wg.Wait()
}
type NSQHandler struct {
processor *BatchProcessor
}
func (h *NSQHandler) HandleMessage(message *nsq.Message) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
message.Touch()
case <-ctx.Done():
return
}
}
}()
var nsqKillmail types.NSQKillmail
if err := json.Unmarshal(message.Body, &nsqKillmail); err != nil {
logger.Error("Failed to unmarshal NSQ message: %v", err)
return err
}
h.processor.AddMessage(&nsqKillmail)
message.Finish()
return nil
}
func main() {
if err := config.InitConfig(); err != nil {
logger.Error("Failed to initialize config: %v", err)
return
}
logger.InitFlag()
logger.Info("Starting stage 2: reading from NSQ and writing to ClickHouse")
chClient, err := clickhouse.NewClient()
if err != nil {
logger.Error("Failed to create ClickHouse client: %v", err)
return
}
defer chClient.Close()
processor := NewBatchProcessor(chClient)
processor.Start()
defer processor.Stop()
handler := &NSQHandler{processor: processor}
consumer, err := lnsq.ConnectToNSQ(Topic, Channel, handler)
if err != nil {
logger.Error("Failed to connect to NSQ: %v", err)
return
}
defer consumer.Stop()
logger.Info("Connected to NSQ, consuming from topic: %s, channel: %s", Topic, Channel)
select {}
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing alliance ID reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingAllianceNameQueue, queryMissingAllianceIDs); err != nil {
logger.Error("Failed to process missing alliance IDs: %v", err)
}
}
func queryMissingAllianceIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT victim_alliance_id
FROM
zkill.killmails
WHERE
victim_alliance_id IS NOT NULL
AND (victim_alliance_name = '' OR victim_alliance_name IS NULL)
ORDER BY
victim_alliance_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var allianceIDs []int64
for rows.Next() {
var allianceID int64
if err := rows.Scan(&allianceID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
allianceIDs = append(allianceIDs, allianceID)
}
return allianceIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing attacker character name reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingCharacterNameQueue, queryMissingAttackerCharacterNames); err != nil {
logger.Error("Failed to process missing attacker character names: %v", err)
}
}
func queryMissingAttackerCharacterNames(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT character_id
FROM
zkill.killmail_attackers
WHERE
character_name = ''
OR character_name IS NULL
ORDER BY
character_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var characterIDs []int64
for rows.Next() {
var characterID int64
if err := rows.Scan(&characterID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
characterIDs = append(characterIDs, characterID)
}
return characterIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing corporation ID reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingCorporationNameQueue, queryMissingCorporationIDs); err != nil {
logger.Error("Failed to process missing corporation IDs: %v", err)
}
}
func queryMissingCorporationIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT victim_corporation_id
FROM
zkill.killmails
WHERE
victim_corporation_name = ''
OR victim_corporation_name IS NULL
ORDER BY
victim_corporation_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var corporationIDs []int64
for rows.Next() {
var corporationID int64
if err := rows.Scan(&corporationID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
corporationIDs = append(corporationIDs, corporationID)
}
return corporationIDs, rows.Err()
}

View File

@@ -0,0 +1,59 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing item type reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingTypeNameQueue, queryMissingItemTypes); err != nil {
logger.Error("Failed to process missing item types: %v", err)
}
}
func queryMissingItemTypes(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
logger.Info("Querying missing item types")
query := `
SELECT
DISTINCT item_type_id
FROM
zkill.killmail_items
WHERE
item_type_name = ''
OR item_type_name IS NULL
OR item_group_name = ''
OR item_group_name IS NULL
OR item_category_name = ''
OR item_category_name IS NULL
OR item_market_group_name = ''
OR item_market_group_name IS NULL
ORDER BY
item_type_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var itemTypeIDs []int64
for rows.Next() {
var itemTypeID int32
if err := rows.Scan(&itemTypeID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
itemTypeIDs = append(itemTypeIDs, int64(itemTypeID))
}
return itemTypeIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing ship type ID reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingTypeNameQueue, queryMissingShipTypeIDs); err != nil {
logger.Error("Failed to process missing ship type IDs: %v", err)
}
}
func queryMissingShipTypeIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT victim_ship_type_id
FROM
zkill.killmails
WHERE
victim_ship_type_name = ''
OR victim_ship_type_name IS NULL
ORDER BY
victim_ship_type_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var shipTypeIDs []int64
for rows.Next() {
var shipTypeID int32
if err := rows.Scan(&shipTypeID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
shipTypeIDs = append(shipTypeIDs, int64(shipTypeID))
}
return shipTypeIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing solar system ID reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingSolarSystemNameQueue, queryMissingSolarSystemIDs); err != nil {
logger.Error("Failed to process missing solar system IDs: %v", err)
}
}
func queryMissingSolarSystemIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT solar_system_id
FROM
zkill.killmails
WHERE
solar_system_name = ''
OR solar_system_name IS NULL
ORDER BY
solar_system_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var solarSystemIDs []int64
for rows.Next() {
var solarSystemID int32
if err := rows.Scan(&solarSystemID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
solarSystemIDs = append(solarSystemIDs, int64(solarSystemID))
}
return solarSystemIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing victim character name reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingCharacterNameQueue, queryMissingVictimCharacterNames); err != nil {
logger.Error("Failed to process missing victim character names: %v", err)
}
}
func queryMissingVictimCharacterNames(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT victim_character_id
FROM
zkill.killmails
WHERE
victim_character_name = ''
OR victim_character_name IS NULL
ORDER BY
victim_character_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var victimCharacterIDs []int64
for rows.Next() {
var victimCharacterID int64
if err := rows.Scan(&victimCharacterID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
victimCharacterIDs = append(victimCharacterIDs, victimCharacterID)
}
return victimCharacterIDs, rows.Err()
}

View File

@@ -0,0 +1,52 @@
package main
import (
"context"
"fmt"
"zkillsusser/clickhouse"
"zkillsusser/lnsq"
"zkillsusser/pipeline/reader"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing weapon type ID reader: reading from ClickHouse and writing to NSQ")
if err := reader.Run(lnsq.TopicMissingTypeNameQueue, queryMissingWeaponTypeIDs); err != nil {
logger.Error("Failed to process missing weapon type IDs: %v", err)
}
}
func queryMissingWeaponTypeIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient) ([]int64, error) {
query := `
SELECT
DISTINCT weapon_type_id
FROM
zkill.killmail_attackers
WHERE
weapon_type_name = ''
OR weapon_type_name IS NULL
ORDER BY
weapon_type_id
`
rows, err := chClient.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
defer rows.Close()
var weaponTypeIDs []int64
for rows.Next() {
var weaponTypeID int32
if err := rows.Scan(&weaponTypeID); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
weaponTypeIDs = append(weaponTypeIDs, int64(weaponTypeID))
}
return weaponTypeIDs, rows.Err()
}

152
pipeline/reader/reader.go Normal file
View File

@@ -0,0 +1,152 @@
package reader
import (
"context"
"fmt"
"strconv"
"time"
"zkillsusser/clickhouse"
"zkillsusser/config"
"zkillsusser/lnsq"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/nsqio/go-nsq"
)
const (
PollInterval = 30 * time.Second
LogDepthInterval = 60 * time.Second
)
func Run(topic string, getMissing func(context.Context, *clickhouse.ClickhouseClient) ([]int64, error)) error {
if err := config.InitConfig(); err != nil {
return fmt.Errorf("failed to initialize config: %w", err)
}
logger.InitFlag()
chClient, err := clickhouse.NewClient()
if err != nil {
return fmt.Errorf("failed to create ClickHouse client: %w", err)
}
defer chClient.Close()
producer, err := lnsq.NewProducer()
if err != nil {
return fmt.Errorf("failed to create NSQ producer: %w", err)
}
defer producer.Stop()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pollTicker := time.NewTicker(PollInterval)
defer pollTicker.Stop()
depthTicker := time.NewTicker(LogDepthInterval)
defer depthTicker.Stop()
logTopicDepth(topic)
if err := pollAndProcess(ctx, chClient, producer, topic, getMissing); err != nil {
if ctx.Err() != nil {
return nil
}
logger.Error("Failed to process missing IDs on startup: %v", err)
}
for {
select {
case <-ctx.Done():
logger.Info("Reader shutting down")
return nil
case <-depthTicker.C:
logTopicDepth(topic)
case <-pollTicker.C:
if err := pollAndProcess(ctx, chClient, producer, topic, getMissing); err != nil {
if ctx.Err() != nil {
return nil
}
logger.Error("Failed to process missing IDs: %v", err)
}
}
}
}
func logTopicDepth(topic string) {
depth, err := lnsq.GetTopicDepth(topic)
if err != nil {
logger.Error("Failed to get topic depth for %s: %v", topic, err)
return
}
logger.Info("Topic %s has %d remaining messages", topic, depth)
}
func pollAndProcess(ctx context.Context, chClient *clickhouse.ClickhouseClient, producer *nsq.Producer, topic string, getMissing func(context.Context, *clickhouse.ClickhouseClient) ([]int64, error)) error {
depth, err := lnsq.GetTopicDepth(topic)
if err != nil {
logger.Error("Failed to get topic depth for %s: %v, skipping query", topic, err)
return nil
}
logger.Info("Checking topic %s depth before poll: %d messages", topic, depth)
if depth > 0 {
logger.Info("Topic %s has %d messages, skipping query", topic, depth)
return nil
}
logger.Info("Topic %s is empty, querying for missing IDs", topic)
return processMissingIDs(ctx, chClient, producer, topic, getMissing)
}
func processMissingIDs(ctx context.Context, chClient *clickhouse.ClickhouseClient, producer *nsq.Producer, topic string, getMissing func(context.Context, *clickhouse.ClickhouseClient) ([]int64, error)) error {
ids, err := getMissing(ctx, chClient)
if err != nil {
return fmt.Errorf("failed to get missing IDs: %w", err)
}
if len(ids) == 0 {
logger.Info("No missing IDs found")
return nil
}
logger.Info("Found %d distinct missing IDs", len(ids))
published := 0
for _, id := range ids {
if ctx.Err() != nil {
return ctx.Err()
}
messageBytes := []byte(strconv.FormatInt(id, 10))
for {
err := producer.Publish(topic, messageBytes)
if err == nil {
break
}
logger.Error("Failed to publish ID, retrying: %v", err)
time.Sleep(1 * time.Second)
}
published++
if published%1000 == 0 {
logger.Info("Published %d/%d IDs", published, len(ids))
}
}
logger.Info("Published all %d missing IDs", published)
time.Sleep(2 * time.Second)
finalDepth, err := lnsq.GetTopicDepth(topic)
if err != nil {
logger.Error("Failed to check depth after publishing: %v", err)
} else {
logger.Info("Topic %s depth after publishing: %d messages", topic, finalDepth)
}
return nil
}

View File

@@ -0,0 +1,87 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"zkillsusser/clickhouse"
"zkillsusser/db"
"zkillsusser/lnsq"
"zkillsusser/pipeline/resolver"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing alliance name resolver: reading from NSQ and updating ClickHouse")
if err := resolver.Run(lnsq.TopicMissingAllianceNameQueue, "alliance-resolver", lnsq.BaseConsumers, resolveAlliance); err != nil {
logger.Error("Failed to start resolver: %v", err)
}
}
type ESIAllianceResponse struct {
Name string `json:"name"`
}
func resolveAlliance(ctx context.Context, chClient *clickhouse.ClickhouseClient, dbClient db.DB, id int64) error {
esiURL := fmt.Sprintf("https://esi.evetech.net/latest/alliances/%d/", id)
proxyURL := fmt.Sprintf("https://proxy.site.quack-lab.dev?url=%s", esiURL)
client := &http.Client{Timeout: 10 * time.Second}
req, err := http.NewRequestWithContext(ctx, "GET", proxyURL, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to fetch alliance %d: %w", id, err)
}
defer resp.Body.Close()
var allianceName string
if resp.StatusCode == http.StatusNotFound {
allianceName = "Deleted"
} else if resp.StatusCode == http.StatusBadRequest {
allianceName = "Unknown"
} else if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to fetch alliance %d: status %d", id, resp.StatusCode)
} else {
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response for alliance %d: %w", id, err)
}
var allianceResp ESIAllianceResponse
if err := json.Unmarshal(body, &allianceResp); err != nil {
return fmt.Errorf("failed to unmarshal alliance %d: %w", id, err)
}
allianceName = escapeString(allianceResp.Name)
}
queries := []string{
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE victim_alliance_name = '%s' WHERE victim_alliance_id = %d", allianceName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_attackers UPDATE alliance_name = '%s' WHERE alliance_id = %d", allianceName, id),
}
logger.Info("Updating alliance name for alliance_id %d: %s", id, allianceName)
for _, query := range queries {
if err := chClient.Exec(ctx, query); err != nil {
return fmt.Errorf("failed to update alliance name for alliance_id %d: %w", id, err)
}
}
return nil
}
func escapeString(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "\\", "\\\\"), "'", "''")
}

View File

@@ -0,0 +1,87 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"zkillsusser/clickhouse"
"zkillsusser/db"
"zkillsusser/lnsq"
"zkillsusser/pipeline/resolver"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing character name resolver: reading from NSQ and updating ClickHouse")
if err := resolver.Run(lnsq.TopicMissingCharacterNameQueue, "character-resolver", lnsq.BaseConsumers, resolveCharacter); err != nil {
logger.Error("Failed to start resolver: %v", err)
}
}
type ESICharacterResponse struct {
Name string `json:"name"`
}
func resolveCharacter(ctx context.Context, chClient *clickhouse.ClickhouseClient, dbClient db.DB, id int64) error {
esiURL := fmt.Sprintf("https://esi.evetech.net/latest/characters/%d/", id)
proxyURL := fmt.Sprintf("https://proxy.site.quack-lab.dev?url=%s", esiURL)
client := &http.Client{Timeout: 10 * time.Second}
req, err := http.NewRequestWithContext(ctx, "GET", proxyURL, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to fetch character %d: %w", id, err)
}
defer resp.Body.Close()
var characterName string
if resp.StatusCode == http.StatusNotFound {
characterName = "Deleted"
} else if resp.StatusCode == http.StatusBadRequest {
characterName = "Unknown"
} else if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to fetch character %d: status %d", id, resp.StatusCode)
} else {
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response for character %d: %w", id, err)
}
var charResp ESICharacterResponse
if err := json.Unmarshal(body, &charResp); err != nil {
return fmt.Errorf("failed to unmarshal character %d: %w", id, err)
}
characterName = escapeString(charResp.Name)
}
queries := []string{
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE victim_character_name = '%s' WHERE victim_character_id = %d", characterName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_attackers UPDATE character_name = '%s' WHERE character_id = %d", characterName, id),
}
logger.Info("Updating character name for character_id %d: %s", id, characterName)
for _, query := range queries {
if err := chClient.Exec(ctx, query); err != nil {
return fmt.Errorf("failed to update character name for character_id %d: %w", id, err)
}
}
return nil
}
func escapeString(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "\\", "\\\\"), "'", "''")
}

View File

@@ -0,0 +1,87 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"zkillsusser/clickhouse"
"zkillsusser/db"
"zkillsusser/lnsq"
"zkillsusser/pipeline/resolver"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing corporation name resolver: reading from NSQ and updating ClickHouse")
if err := resolver.Run(lnsq.TopicMissingCorporationNameQueue, "corporation-resolver", lnsq.BaseConsumers, resolveCorporation); err != nil {
logger.Error("Failed to start resolver: %v", err)
}
}
type ESICorporationResponse struct {
Name string `json:"name"`
}
func resolveCorporation(ctx context.Context, chClient *clickhouse.ClickhouseClient, dbClient db.DB, id int64) error {
esiURL := fmt.Sprintf("https://esi.evetech.net/latest/corporations/%d/", id)
proxyURL := fmt.Sprintf("https://proxy.site.quack-lab.dev?url=%s", esiURL)
client := &http.Client{Timeout: 10 * time.Second}
req, err := http.NewRequestWithContext(ctx, "GET", proxyURL, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to fetch corporation %d: %w", id, err)
}
defer resp.Body.Close()
var corporationName string
if resp.StatusCode == http.StatusNotFound {
corporationName = "Deleted"
} else if resp.StatusCode == http.StatusBadRequest {
corporationName = "Unknown"
} else if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to fetch corporation %d: status %d", id, resp.StatusCode)
} else {
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response for corporation %d: %w", id, err)
}
var corpResp ESICorporationResponse
if err := json.Unmarshal(body, &corpResp); err != nil {
return fmt.Errorf("failed to unmarshal corporation %d: %w", id, err)
}
corporationName = escapeString(corpResp.Name)
}
queries := []string{
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE victim_corporation_name = '%s' WHERE victim_corporation_id = %d", corporationName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_attackers UPDATE corporation_name = '%s' WHERE corporation_id = %d", corporationName, id),
}
logger.Info("Updating corporation name for corporation_id %d: %s", id, corporationName)
for _, query := range queries {
if err := chClient.Exec(ctx, query); err != nil {
return fmt.Errorf("failed to update corporation name for corporation_id %d: %w", id, err)
}
}
return nil
}
func escapeString(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "\\", "\\\\"), "'", "''")
}

View File

@@ -0,0 +1,88 @@
package main
import (
"context"
"errors"
"fmt"
"strings"
"zkillsusser/clickhouse"
"zkillsusser/db"
"zkillsusser/lnsq"
"zkillsusser/models"
"zkillsusser/pipeline/resolver"
logger "git.site.quack-lab.dev/dave/cylogger"
"gorm.io/gorm"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing item type resolver: reading from NSQ and updating ClickHouse")
if err := resolver.Run(lnsq.TopicMissingTypeNameQueue, lnsq.ChannelItemTypeResolver, lnsq.BaseConsumers, resolveItemType); err != nil {
logger.Error("Failed to start resolver: %v", err)
}
}
func resolveItemType(ctx context.Context, chClient *clickhouse.ClickhouseClient, dbClient db.DB, id int64) error {
invType, err := dbClient.GetType(ctx, int32(id))
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return fmt.Errorf("failed to get type %d from database: %w", id, err)
}
var typeName, groupName, categoryName, marketGroupName string
if errors.Is(err, gorm.ErrRecordNotFound) {
typeName, groupName, categoryName, marketGroupName = "unknown", "unknown", "unknown", "unknown"
} else {
typeName, groupName, categoryName, marketGroupName = namesFromType(ctx, dbClient, invType, id)
}
queries := []string{
fmt.Sprintf("ALTER TABLE zkill.killmail_items UPDATE item_type_name = '%s' WHERE item_type_id = %d", typeName, id),
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE victim_ship_type_name = '%s' WHERE victim_ship_type_id = %d", typeName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_attackers UPDATE ship_type_name = '%s' WHERE ship_type_id = %d", typeName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_attackers UPDATE weapon_type_name = '%s' WHERE weapon_type_id = %d", typeName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_items UPDATE item_group_name = '%s' WHERE item_type_id = %d", groupName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_items UPDATE item_category_name = '%s' WHERE item_type_id = %d", categoryName, id),
fmt.Sprintf("ALTER TABLE zkill.killmail_items UPDATE item_market_group_name = '%s' WHERE item_type_id = %d", marketGroupName, id),
}
logger.Info("Updating info for type %d: %s (%q, %q, %q)", id, typeName, groupName, categoryName, marketGroupName)
for _, query := range queries {
if err := chClient.Exec(ctx, query); err != nil {
return fmt.Errorf("failed to update type name for type_id %d: %w", id, err)
}
}
return nil
}
func namesFromType(ctx context.Context, dbClient db.DB, invType *models.InvType, id int64) (typeName, groupName, categoryName, marketGroupName string) {
typeName = escapeString(invType.TypeName)
groupName = "unknown"
invGroup, err := dbClient.GetGroup(ctx, invType.GroupID)
if err != nil {
logger.Warning("failed to get group %d for type %d: %v", invType.GroupID, id, err)
} else {
groupName = escapeString(invGroup.GroupName)
}
categoryName = "unknown"
invCategory, err := dbClient.GetCategory(ctx, invGroup.CategoryID)
if err != nil {
logger.Warning("failed to get category %d for group %d: %v", invGroup.CategoryID, invType.GroupID, err)
} else {
categoryName = escapeString(invCategory.CategoryName)
}
marketGroupName = "unknown"
invMarketGroup, err := dbClient.GetMarketGroup(ctx, invType.MarketGroupID)
if err != nil {
logger.Warning("failed to get market group %d for type %d: %v", invType.MarketGroupID, id, err)
} else {
marketGroupName = escapeString(invMarketGroup.MarketGroupName)
}
return typeName, groupName, categoryName, marketGroupName
}
func escapeString(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "\\", "\\\\"), "'", "''")
}

View File

@@ -0,0 +1,63 @@
package main
import (
"context"
"fmt"
"strings"
"zkillsusser/clickhouse"
"zkillsusser/db"
"zkillsusser/lnsq"
"zkillsusser/pipeline/resolver"
logger "git.site.quack-lab.dev/dave/cylogger"
)
func main() {
logger.InitFlag()
logger.Info("Starting missing solar system name resolver: reading from NSQ and updating ClickHouse")
if err := resolver.Run(lnsq.TopicMissingSolarSystemNameQueue, "solar-system-resolver", lnsq.BaseConsumers, resolveSolarSystem); err != nil {
logger.Error("Failed to start resolver: %v", err)
}
}
func resolveSolarSystem(ctx context.Context, chClient *clickhouse.ClickhouseClient, dbClient db.DB, id int64) error {
system, err := dbClient.GetSolarSystem(ctx, int32(id))
if err != nil {
return fmt.Errorf("failed to get solar system %d from database: %w", id, err)
}
systemName := escapeString(system.SolarSystemName)
constellation, err := dbClient.GetConstellation(ctx, system.ConstellationID)
if err != nil {
return fmt.Errorf("failed to get constellation %d for system %d: %w", system.ConstellationID, id, err)
}
constellationName := escapeString(constellation.ConstellationName)
region, err := dbClient.GetRegion(ctx, system.RegionID)
if err != nil {
return fmt.Errorf("failed to get region %d for system %d: %w", system.RegionID, id, err)
}
regionName := escapeString(region.RegionName)
queries := []string{
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE solar_system_name = '%s' WHERE solar_system_id = %d", systemName, id),
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE constellation_name = '%s' WHERE solar_system_id = %d", constellationName, id),
fmt.Sprintf("ALTER TABLE zkill.killmails UPDATE region_name = '%s' WHERE solar_system_id = %d", regionName, id),
}
logger.Info("Updating solar system info for system_id %d: %s (constellation: %s, region: %s)", id, systemName, constellationName, regionName)
for _, query := range queries {
if err := chClient.Exec(ctx, query); err != nil {
return fmt.Errorf("failed to update solar system name for system_id %d: %w", id, err)
}
}
return nil
}
func escapeString(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "\\", "\\\\"), "'", "''")
}

View File

@@ -0,0 +1,126 @@
package resolver
import (
"context"
"fmt"
"strconv"
"time"
"zkillsusser/clickhouse"
"zkillsusser/config"
"zkillsusser/db"
"zkillsusser/lnsq"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/nsqio/go-nsq"
)
func Run(topic, channel string, nConsumers int, resolveID func(context.Context, *clickhouse.ClickhouseClient, db.DB, int64) error) error {
if err := config.InitConfig(); err != nil {
return fmt.Errorf("failed to initialize config: %w", err)
}
logger.InitFlag()
dbClient, err := db.GetDB("../../sqlite-latest.sqlite")
if err != nil {
return fmt.Errorf("failed to get database client: %w", err)
}
var consumers []*nsq.Consumer
for i := 0; i < nConsumers; i++ {
chClient, err := clickhouse.NewClient()
if err != nil {
for _, c := range consumers {
c.Stop()
}
return fmt.Errorf("failed to create ClickHouse client for consumer %d: %w", i, err)
}
handler := &NSQHandler{
chClient: chClient,
dbClient: dbClient,
resolveID: resolveID,
}
consumer, err := lnsq.ConnectToNSQ(topic, channel, handler)
if err != nil {
chClient.Close()
for _, c := range consumers {
c.Stop()
}
return fmt.Errorf("failed to connect consumer %d to NSQ: %w", i, err)
}
consumers = append(consumers, consumer)
logger.Info("Connected consumer %d/%d to NSQ, topic: %s, channel: %s", i+1, nConsumers, topic, channel)
}
defer func() {
for _, consumer := range consumers {
consumer.Stop()
}
}()
logger.Info("All %d consumers connected, consuming from topic: %s, channel: %s", nConsumers, topic, channel)
depthTicker := time.NewTicker(60 * time.Second)
defer depthTicker.Stop()
logTopicDepth(topic)
for {
select {
case <-depthTicker.C:
logTopicDepth(topic)
}
}
}
func logTopicDepth(topic string) {
depth, err := lnsq.GetTopicDepth(topic)
if err != nil {
logger.Error("Failed to get topic depth for %s: %v", topic, err)
return
}
logger.Info("Topic %s has %d remaining messages", topic, depth)
}
type NSQHandler struct {
chClient *clickhouse.ClickhouseClient
dbClient db.DB
resolveID func(context.Context, *clickhouse.ClickhouseClient, db.DB, int64) error
}
func (h *NSQHandler) HandleMessage(message *nsq.Message) error {
id, err := strconv.ParseInt(string(message.Body), 10, 64)
if err != nil {
logger.Error("Failed to parse ID from message: %v", err)
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
message.Touch()
case <-ctx.Done():
return
}
}
}()
if err := h.resolveID(ctx, h.chClient, h.dbClient, id); err != nil {
logger.Error("Failed to resolve ID %d: %v", id, err)
return err
}
message.Finish()
return nil
}

156
types.go
View File

@@ -1,156 +0,0 @@
package main
import "time"
type Killmail struct {
Attackers []Attacker `json:"attackers"`
KillmailID int64 `json:"killmail_id"`
KillmailTime time.Time `json:"killmail_time"`
SolarSystemID int64 `json:"solar_system_id"`
Victim Victim `json:"victim"`
KillmailHash string `json:"killmail_hash"`
HTTPLastModified time.Time `json:"http_last_modified"`
}
type Attacker struct {
AllianceID int64 `json:"alliance_id"`
CharacterID int64 `json:"character_id"`
CorporationID int64 `json:"corporation_id"`
DamageDone int64 `json:"damage_done"`
FinalBlow bool `json:"final_blow"`
SecurityStatus float64 `json:"security_status"`
ShipTypeID int64 `json:"ship_type_id"`
WeaponTypeID int64 `json:"weapon_type_id"`
}
type Victim struct {
AllianceID int64 `json:"alliance_id"`
CharacterID int64 `json:"character_id"`
CorporationID int64 `json:"corporation_id"`
DamageTaken int64 `json:"damage_taken"`
Items []Item `json:"items"`
Position Position `json:"position"`
ShipTypeID int64 `json:"ship_type_id"`
}
type Item struct {
Flag int64 `json:"flag"`
ItemTypeID int64 `json:"item_type_id"`
QuantityDestroyed *int64 `json:"quantity_destroyed,omitempty"`
Singleton int64 `json:"singleton"`
QuantityDropped *int64 `json:"quantity_dropped,omitempty"`
}
type Position struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Z float64 `json:"z"`
}
// Helper functions
func boolToUint8(b bool) uint8 {
if b {
return 1
}
return 0
}
func derefInt64(ptr *int64) int64 {
if ptr == nil {
return 0
}
return *ptr
}
type ModuleSlot string
var (
ModuleSlotLow ModuleSlot = "Low"
ModuleSlotMid ModuleSlot = "Mid"
ModuleSlotHigh ModuleSlot = "High"
ModuleSlotRig ModuleSlot = "Rig"
ModuleSlotSubsystem ModuleSlot = "Subsystem"
ModuleSlotDrone ModuleSlot = "Drone"
ModuleSlotOther ModuleSlot = "Other"
)
// region Other various types
type QueryParams struct {
Ship int64
Systems []int64
Modules []int64
Groups []int64
KillmailLimit int
}
type ModuleStatsData struct {
KillmailIDs []int64
}
type ComprehensiveStatsData struct {
QueryParams
KillmailLimit int
}
// CacheEntry stores both statistics (JSON) and images (blobs) in unified cache
// For 404s, we store a special marker: []byte{0xFF, 0xFE, 0xFD} (NOT_FOUND_MARKER)
type CacheEntry struct {
Key string `gorm:"primaryKey"`
Data []byte `gorm:"type:BLOB;not null"`
CreatedAt time.Time `gorm:"not null;index"`
}
var notFoundMarker = []byte{0xFF, 0xFE, 0xFD} // Special marker for cached 404s
func (CacheEntry) TableName() string {
return "cache_entries"
}
type FitStatistics struct {
TotalKillmails int64
ShipBreakdown map[int64]int64
SystemBreakdown map[int64]int64
HighSlotModules map[int32]int64
MidSlotModules map[int32]int64
LowSlotModules map[int32]int64
Rigs map[int32]int64
Drones map[int32]int64
KillmailIDs []int64
}
type Character struct {
AllianceID int64 `json:"alliance_id"`
Birthday time.Time `json:"birthday"`
BloodlineID int64 `json:"bloodline_id"`
CorporationID int64 `json:"corporation_id"`
Description string `json:"description"`
Gender string `json:"gender"`
Name string `json:"name"`
RaceID int64 `json:"race_id"`
SecurityStatus float64 `json:"security_status"`
}
type Corporation struct {
AllianceID int64 `json:"alliance_id"`
CeoID int64 `json:"ceo_id"`
CreatorID int64 `json:"creator_id"`
DateFounded time.Time `json:"date_founded"`
Description string `json:"description"`
HomeStationID int64 `json:"home_station_id"`
MemberCount int64 `json:"member_count"`
Name string `json:"name"`
Shares int64 `json:"shares"`
TaxRate float64 `json:"tax_rate"`
Ticker string `json:"ticker"`
URL string `json:"url"`
WarEligible bool `json:"war_eligible"`
}
type Alliance struct {
CreatorCorporationID int64 `json:"creator_corporation_id"`
CreatorID int64 `json:"creator_id"`
DateFounded time.Time `json:"date_founded"`
ExecutorCorporationID int64 `json:"executor_corporation_id"`
Name string `json:"name"`
Ticker string `json:"ticker"`
}

60
types/esi_killmail.go Normal file
View File

@@ -0,0 +1,60 @@
package types
import "time"
type Killmail struct {
Attackers []Attacker `json:"attackers"`
KillmailID int64 `json:"killmail_id"`
KillmailTime time.Time `json:"killmail_time"`
SolarSystemID int64 `json:"solar_system_id"`
Victim Victim `json:"victim"`
KillmailHash string `json:"killmail_hash"`
HTTPLastModified time.Time `json:"http_last_modified"`
}
type Attacker struct {
AllianceID int64 `json:"alliance_id"`
CharacterID int64 `json:"character_id"`
CorporationID int64 `json:"corporation_id"`
DamageDone int64 `json:"damage_done"`
FinalBlow bool `json:"final_blow"`
SecurityStatus float64 `json:"security_status"`
ShipTypeID int64 `json:"ship_type_id"`
WeaponTypeID int64 `json:"weapon_type_id"`
}
type Victim struct {
AllianceID int64 `json:"alliance_id"`
CharacterID int64 `json:"character_id"`
CorporationID int64 `json:"corporation_id"`
DamageTaken int64 `json:"damage_taken"`
Items []Item `json:"items"`
Position Position `json:"position"`
ShipTypeID int64 `json:"ship_type_id"`
}
type Item struct {
Flag int64 `json:"flag"`
ItemTypeID int64 `json:"item_type_id"`
QuantityDestroyed *int64 `json:"quantity_destroyed,omitempty"`
Singleton int64 `json:"singleton"`
QuantityDropped *int64 `json:"quantity_dropped,omitempty"`
}
type Position struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Z float64 `json:"z"`
}
type ModuleSlot string
var (
ModuleSlotLow ModuleSlot = "Low"
ModuleSlotMid ModuleSlot = "Mid"
ModuleSlotHigh ModuleSlot = "High"
ModuleSlotRig ModuleSlot = "Rig"
ModuleSlotSubsystem ModuleSlot = "Subsystem"
ModuleSlotDrone ModuleSlot = "Drone"
ModuleSlotOther ModuleSlot = "Other"
)

40
types/esi_types.go Normal file
View File

@@ -0,0 +1,40 @@
package types
import "time"
type Character struct {
AllianceID int64 `json:"alliance_id"`
Birthday time.Time `json:"birthday"`
BloodlineID int64 `json:"bloodline_id"`
CorporationID int64 `json:"corporation_id"`
Description string `json:"description"`
Gender string `json:"gender"`
Name string `json:"name"`
RaceID int64 `json:"race_id"`
SecurityStatus float64 `json:"security_status"`
}
type Corporation struct {
AllianceID int64 `json:"alliance_id"`
CeoID int64 `json:"ceo_id"`
CreatorID int64 `json:"creator_id"`
DateFounded time.Time `json:"date_founded"`
Description string `json:"description"`
HomeStationID int64 `json:"home_station_id"`
MemberCount int64 `json:"member_count"`
Name string `json:"name"`
Shares int64 `json:"shares"`
TaxRate float64 `json:"tax_rate"`
Ticker string `json:"ticker"`
URL string `json:"url"`
WarEligible bool `json:"war_eligible"`
}
type Alliance struct {
CreatorCorporationID int64 `json:"creator_corporation_id"`
CreatorID int64 `json:"creator_id"`
DateFounded time.Time `json:"date_founded"`
ExecutorCorporationID int64 `json:"executor_corporation_id"`
Name string `json:"name"`
Ticker string `json:"ticker"`
}

128
types/flat_killmail.go Normal file
View File

@@ -0,0 +1,128 @@
package types
type FlatKillmail struct {
KillmailID int64 `json:"killmail_id"`
KillmailHash string `json:"killmail_hash"`
KillmailTime string `json:"killmail_time"`
SolarSystemID int32 `json:"solar_system_id"`
SolarSystemName string `json:"solar_system_name"`
ConstellationName string `json:"constellation_name"`
RegionName string `json:"region_name"`
Security float32 `json:"security"`
VictimCharacterID int64 `json:"victim_character_id"`
VictimCharacterName string `json:"victim_character_name"`
VictimCorporationID int64 `json:"victim_corporation_id"`
VictimCorporationName string `json:"victim_corporation_name"`
VictimAllianceID *int64 `json:"victim_alliance_id"`
VictimAllianceName string `json:"victim_alliance_name"`
VictimShipTypeID int32 `json:"victim_ship_type_id"`
VictimShipTypeName string `json:"victim_ship_type_name"`
VictimShipGroupName string `json:"victim_ship_group_name"`
VictimShipCategoryName string `json:"victim_ship_category_name"`
VictimDamageTaken int64 `json:"victim_damage_taken"`
AttackerCount uint16 `json:"attacker_count"`
HTTPLastModified string `json:"http_last_modified"`
}
type FlatKillmailAttacker struct {
KillmailID int64 `json:"killmail_id"`
CharacterID int64 `json:"character_id"`
CharacterName string `json:"character_name"`
CorporationID int64 `json:"corporation_id"`
CorporationName string `json:"corporation_name"`
AllianceID *int64 `json:"alliance_id"`
AllianceName string `json:"alliance_name"`
ShipTypeID int32 `json:"ship_type_id"`
ShipTypeName string `json:"ship_type_name"`
ShipGroupName string `json:"ship_group_name"`
WeaponTypeID int32 `json:"weapon_type_id"`
WeaponTypeName string `json:"weapon_type_name"`
DamageDone int64 `json:"damage_done"`
FinalBlow bool `json:"final_blow"`
SecurityStatus float32 `json:"security_status"`
}
type FlatKillmailItem struct {
KillmailID int64 `json:"killmail_id"`
ItemTypeID int32 `json:"item_type_id"`
ItemTypeName string `json:"item_type_name"`
ItemGroupName string `json:"item_group_name"`
ItemCategoryName string `json:"item_category_name"`
ItemMarketGroupName string `json:"item_market_group_name"`
Flag int32 `json:"flag"`
SlotType string `json:"slot_type"`
QuantityDestroyed int64 `json:"quantity_destroyed"`
QuantityDropped int64 `json:"quantity_dropped"`
Singleton int32 `json:"singleton"`
}
func (k *Killmail) Flatten() (*FlatKillmail, []FlatKillmailAttacker, []FlatKillmailItem) {
flat := &FlatKillmail{
KillmailID: k.KillmailID,
KillmailHash: k.KillmailHash,
KillmailTime: k.KillmailTime.Format("2006-01-02 15:04:05"),
HTTPLastModified: k.HTTPLastModified.Format("2006-01-02 15:04:05"),
AttackerCount: uint16(len(k.Attackers)),
SolarSystemID: int32(k.SolarSystemID),
VictimCharacterID: k.Victim.CharacterID,
VictimCorporationID: k.Victim.CorporationID,
VictimShipTypeID: int32(k.Victim.ShipTypeID),
VictimDamageTaken: k.Victim.DamageTaken,
}
if k.Victim.AllianceID != 0 {
flat.VictimAllianceID = &k.Victim.AllianceID
}
attackers := make([]FlatKillmailAttacker, len(k.Attackers))
for i, attacker := range k.Attackers {
flatAttacker := &FlatKillmailAttacker{
KillmailID: k.KillmailID,
CharacterID: attacker.CharacterID,
CorporationID: attacker.CorporationID,
ShipTypeID: int32(attacker.ShipTypeID),
WeaponTypeID: int32(attacker.WeaponTypeID),
DamageDone: attacker.DamageDone,
FinalBlow: attacker.FinalBlow,
SecurityStatus: float32(attacker.SecurityStatus),
}
if attacker.AllianceID != 0 {
flatAttacker.AllianceID = &attacker.AllianceID
}
attackers[i] = *flatAttacker
}
items := make([]FlatKillmailItem, 0, len(k.Victim.Items))
for _, item := range k.Victim.Items {
flatItem := FlatKillmailItem{
KillmailID: k.KillmailID,
ItemTypeID: int32(item.ItemTypeID),
Flag: int32(item.Flag),
QuantityDestroyed: derefInt64(item.QuantityDestroyed),
QuantityDropped: derefInt64(item.QuantityDropped),
Singleton: int32(item.Singleton),
}
items = append(items, flatItem)
}
return flat, attackers, items
}
func derefInt64(i *int64) int64 {
if i == nil {
return 0
}
return *i
}
type NSQKillmail struct {
Killmail *FlatKillmail `json:"killmail"`
Attackers []FlatKillmailAttacker `json:"attackers"`
Items []FlatKillmailItem `json:"items"`
}