Files
zkill-susser/db.go

960 lines
27 KiB
Go

package main
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"zkillsusser/models"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)
type QueryParams struct {
Ship int64
Systems []int64
Modules []int64
Groups []int64
KillmailLimit int
}
// CacheEntry stores both statistics (JSON) and images (blobs) in unified cache
// For 404s, we store a special marker: []byte{0xFF, 0xFE, 0xFD} (NOT_FOUND_MARKER)
type CacheEntry struct {
ID int64 `gorm:"primaryKey;autoIncrement"`
CacheKey string `gorm:"column:cache_key;not null"`
Data []byte `gorm:"column:data;type:BLOB;not null"`
CreatedAt time.Time `gorm:"column:created_at;not null"`
}
var notFoundMarker = []byte{0xFF, 0xFE, 0xFD} // Special marker for cached 404s
func (CacheEntry) TableName() string {
return "cache_entries"
}
type FitStatistics struct {
TotalKillmails int64
ShipBreakdown map[int64]SystemStats // shipTypeID -> {Count, Percentage}
SystemBreakdown map[int64]SystemStats // systemID -> {Count, Percentage}
HighSlotModules map[int32]ModuleStats // typeID -> {Count, Percentage}
MidSlotModules map[int32]ModuleStats // typeID -> {Count, Percentage}
LowSlotModules map[int32]ModuleStats // typeID -> {Count, Percentage}
Rigs map[int32]ModuleStats // typeID -> {Count, Percentage}
Drones map[int32]ModuleStats // typeID -> {Count, Percentage}
KillmailIDs []int64
}
type SystemStats struct {
Count int64
Percentage float64
}
type ModuleStats struct {
Count int64
Percentage float64
}
type DB interface {
SaveKillmails(killmails []Killmail) error
InitTables() error
QueryFits(params QueryParams) (*FitStatistics, error)
GetCacheEntry(cacheKey string, maxAge time.Duration) ([]byte, bool)
CacheEntry(cacheKey string, data []byte) error
SearchShips(query string, limit int) ([]models.InvType, error)
SearchSystems(query string, limit int) ([]models.MapSolarSystem, error)
SearchModules(query string, limit int) ([]models.InvType, error)
SearchGroups(query string, limit int) ([]models.InvGroup, error)
GetItemNames(ids []int32) (map[string]string, error)
GetItemGroup(itemID int64) (int64, error)
}
type DBWrapper struct {
ch driver.Conn
gormDB *gorm.DB // For SQLite (EVE static data)
}
var db *DBWrapper
func GetDB() (DB, error) {
if db != nil {
return db, nil
}
// ClickHouse connection - use HTTP interface on port 8123
// Change "localhost" to your Linux host IP or hostname
options := &clickhouse.Options{
Addr: []string{"clickhouse.site.quack-lab.dev"}, // TODO: Change to your Linux host, e.g., "192.168.1.100:8123" or "clickhouse.example.com:8123"
Auth: clickhouse.Auth{
Database: "zkill",
Username: "default",
Password: "", // Set if you configure a password on Linux host
},
Protocol: clickhouse.HTTP,
Settings: clickhouse.Settings{
"max_query_size": 100000000, // allow larger queries when filters generate big IN (...) lists
},
}
conn, err := clickhouse.Open(options)
if err != nil {
return nil, fmt.Errorf("failed to connect to ClickHouse: %w", err)
}
// SQLite connection for EVE static data
sqliteDB, err := gorm.Open(sqlite.Open("sqlite-latest.sqlite"), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
NoLowerCase: true,
},
})
if err != nil {
return nil, fmt.Errorf("failed to connect to SQLite: %w", err)
}
db = &DBWrapper{
ch: conn,
gormDB: sqliteDB,
}
return db, nil
}
func (db *DBWrapper) InitTables() error {
ctx := context.Background()
// Migrate unified cache table
// Use raw SQL to create table and index with IF NOT EXISTS to avoid errors
// For 404s, we store a special marker byte sequence instead of NULL
if err := db.gormDB.Exec(`
CREATE TABLE IF NOT EXISTS cache_entries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
cache_key TEXT NOT NULL,
data BLOB NOT NULL,
created_at DATETIME NOT NULL
)
`).Error; err != nil {
return fmt.Errorf("failed to create cache_entries table: %w", err)
}
// Create index if it doesn't exist
db.gormDB.Exec(`CREATE INDEX IF NOT EXISTS idx_cache_key_created ON cache_entries(cache_key, created_at)`)
// Create flat_killmails table
createFlatKillmails := `
CREATE TABLE IF NOT EXISTS flat_killmails (
killmail_id Int64,
killmail_time DateTime,
solar_system_id Int64,
killmail_hash String,
victim_ship_type_id Int64,
victim_character_id Int64,
victim_corporation_id Int64,
victim_alliance_id Int64,
victim_damage_taken Int64,
victim_pos_x Float64,
victim_pos_y Float64,
victim_pos_z Float64,
attacker_count UInt16,
total_damage_done Int64,
final_blow_ship_type Int64,
attackers Array(Tuple(
Int64, -- character_id
Int64, -- corporation_id
Int64, -- alliance_id
Int64, -- ship_type_id
Int64, -- weapon_type_id
Int64, -- damage_done
UInt8, -- final_blow
Float64 -- security_status
)),
items Array(Tuple(
Int64, -- flag
Int64, -- item_type_id
Int64, -- quantity_destroyed
Int64, -- quantity_dropped
Int64 -- singleton
))
) ENGINE = MergeTree()
ORDER BY (killmail_id)
PRIMARY KEY (killmail_id)`
if err := db.ch.Exec(ctx, createFlatKillmails); err != nil {
return fmt.Errorf("failed to create flat_killmails table: %w", err)
}
// Create fitted_modules table
createFittedModules := `
CREATE TABLE IF NOT EXISTS fitted_modules (
killmail_id Int64,
killmail_time DateTime,
solar_system_id Int64,
victim_ship_type_id Int64,
item_type_id Int64,
flag Int64,
quantity_destroyed Int64,
quantity_dropped Int64
) ENGINE = MergeTree()
ORDER BY (killmail_id, item_type_id, flag)
PRIMARY KEY (killmail_id, item_type_id, flag)`
if err := db.ch.Exec(ctx, createFittedModules); err != nil {
return fmt.Errorf("failed to create fitted_modules table: %w", err)
}
// Clean up old cache entries (older than 3 days)
err := db.gormDB.Exec(`DELETE FROM cache_entries WHERE created_at < datetime('now', '-3 days')`).Error
if err != nil {
return fmt.Errorf("failed to clean old cache: %w", err)
}
return nil
}
func (db *DBWrapper) SaveKillmails(killmails []Killmail) error {
ctx := context.Background()
// Prepare batch for flat_killmails
flatBatch, err := db.ch.PrepareBatch(ctx, "INSERT INTO flat_killmails")
if err != nil {
return fmt.Errorf("failed to prepare flat_killmails batch: %w", err)
}
// Prepare batch for fitted_modules
moduleBatch, err := db.ch.PrepareBatch(ctx, "INSERT INTO fitted_modules")
if err != nil {
return fmt.Errorf("failed to prepare fitted_modules batch: %w", err)
}
// Process in batches with deduplication
batchSize := 1000
seenKillmails := make(map[int64]bool)
for i := 0; i < len(killmails); i += batchSize {
end := i + batchSize
if end > len(killmails) {
end = len(killmails)
}
// Batch check for existing killmails
batchIDs := make([]int64, 0, end-i)
for _, km := range killmails[i:end] {
if !seenKillmails[km.KillmailID] {
batchIDs = append(batchIDs, km.KillmailID)
seenKillmails[km.KillmailID] = true
}
}
if len(batchIDs) > 0 {
// Check which ones already exist in database
placeholders := make([]string, len(batchIDs))
args := make([]interface{}, len(batchIDs))
for j, id := range batchIDs {
placeholders[j] = "?"
args[j] = id
}
checkQuery := fmt.Sprintf("SELECT killmail_id FROM flat_killmails WHERE killmail_id IN (%s)", strings.Join(placeholders, ","))
rows, err := db.ch.Query(ctx, checkQuery, args...)
if err == nil {
existing := make(map[int64]bool)
for rows.Next() {
var id int64
if rows.Scan(&id) == nil {
existing[id] = true
}
}
rows.Close()
// Remove existing from batch
filtered := batchIDs[:0]
for _, id := range batchIDs {
if !existing[id] {
filtered = append(filtered, id)
}
}
batchIDs = filtered
}
}
// Create map for fast lookup
allowedIDs := make(map[int64]bool)
for _, id := range batchIDs {
allowedIDs[id] = true
}
for _, km := range killmails[i:end] {
if !allowedIDs[km.KillmailID] {
continue // Skip duplicate
}
flat := km.FlattenKillmail()
modules := km.ExtractFittedModules()
// Append to flat_killmails batch
if err := flatBatch.Append(
flat.KillmailID,
flat.KillmailTime,
flat.SolarSystemID,
flat.KillmailHash,
flat.VictimShipTypeID,
flat.VictimCharacterID,
flat.VictimCorporationID,
flat.VictimAllianceID,
flat.VictimDamageTaken,
flat.VictimPosX,
flat.VictimPosY,
flat.VictimPosZ,
flat.AttackerCount,
flat.TotalDamageDone,
flat.FinalBlowShipType,
flat.Attackers,
flat.Items,
); err != nil {
return fmt.Errorf("failed to append flat killmail: %w", err)
}
// Append modules to fitted_modules batch
for _, mod := range modules {
if err := moduleBatch.Append(
mod.KillmailID,
mod.KillmailTime,
mod.SolarSystemID,
mod.VictimShipTypeID,
mod.ItemTypeID,
mod.Flag,
mod.QuantityDestroyed,
mod.QuantityDropped,
); err != nil {
return fmt.Errorf("failed to append module: %w", err)
}
}
}
// Send batches every 1000 records
if err := flatBatch.Send(); err != nil {
return fmt.Errorf("failed to send flat_killmails batch: %w", err)
}
if err := moduleBatch.Send(); err != nil {
return fmt.Errorf("failed to send fitted_modules batch: %w", err)
}
// Prepare new batches for next iteration
if end < len(killmails) {
flatBatch, err = db.ch.PrepareBatch(ctx, "INSERT INTO flat_killmails")
if err != nil {
return fmt.Errorf("failed to prepare flat_killmails batch: %w", err)
}
moduleBatch, err = db.ch.PrepareBatch(ctx, "INSERT INTO fitted_modules")
if err != nil {
return fmt.Errorf("failed to prepare fitted_modules batch: %w", err)
}
}
}
return nil
}
func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
flog := logger.Default.WithPrefix("QueryFits").WithPrefix(fmt.Sprintf("%+v", params))
flog.Info("Starting query")
// Check if params are empty (all fields zero/empty)
isEmpty := params.Ship == 0 && len(params.Systems) == 0 && len(params.Modules) == 0 && len(params.Groups) == 0
modules := deduplicateInt64(params.Modules)
flog.Debug("Deduplicated modules: %d -> %d", len(params.Modules), len(modules))
// Expand groups to typeIDs
var shipTypeIDs []int64
if params.Ship != 0 {
shipTypeIDs = []int64{params.Ship}
}
groupModuleTypeIDs, err := db.groupTypeIDs(params.Groups)
if err != nil {
flog.Error("Failed to expand groups to typeIDs: %v", err)
return nil, err
}
if len(groupModuleTypeIDs) > 0 {
flog.Debug("Expanded %d groups to %d module typeIDs", len(params.Groups), len(groupModuleTypeIDs))
}
ctx := context.Background()
var killmailIDs []int64
var systemIDs []int64
var shipTypeIDsFromResults []int64
moduleFilterIDs := deduplicateInt64(append(modules, groupModuleTypeIDs...))
if len(moduleFilterIDs) > 0 {
modules = moduleFilterIDs
placeholders := make([]string, len(moduleFilterIDs))
moduleArgs := make([]interface{}, len(moduleFilterIDs))
for i, moduleID := range moduleFilterIDs {
placeholders[i] = "?"
moduleArgs[i] = moduleID
}
var shipPlaceholders []string
var shipArgs []interface{}
if len(shipTypeIDs) > 0 {
shipPlaceholders = make([]string, len(shipTypeIDs))
for i, shipID := range shipTypeIDs {
shipPlaceholders[i] = "?"
shipArgs = append(shipArgs, shipID)
}
} else if !isEmpty {
shipPlaceholders = []string{"?"}
shipArgs = []interface{}{params.Ship}
}
var moduleQuery string
var args []interface{}
if len(shipPlaceholders) > 0 {
moduleQuery = "SELECT DISTINCT killmail_id, solar_system_id, victim_ship_type_id FROM fitted_modules WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ") AND item_type_id IN (" + strings.Join(placeholders, ",") + ")"
args = shipArgs
args = append(args, moduleArgs...)
} else {
moduleQuery = "SELECT DISTINCT killmail_id, solar_system_id, victim_ship_type_id FROM fitted_modules WHERE item_type_id IN (" + strings.Join(placeholders, ",") + ")"
args = moduleArgs
}
if len(params.Systems) > 0 {
sysPlaceholders := make([]string, len(params.Systems))
for i := range params.Systems {
sysPlaceholders[i] = "?"
args = append(args, params.Systems[i])
}
moduleQuery += " AND solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
}
rows, err := db.ch.Query(ctx, moduleQuery, args...)
if err != nil {
flog.Error("Failed to query filtered killmails: %v", err)
return nil, err
}
for rows.Next() {
var id, systemID, shipTypeID int64
if err := rows.Scan(&id, &systemID, &shipTypeID); err != nil {
rows.Close()
return nil, err
}
killmailIDs = append(killmailIDs, id)
systemIDs = append(systemIDs, systemID)
shipTypeIDsFromResults = append(shipTypeIDsFromResults, shipTypeID)
}
rows.Close()
} else {
// No module filter - query flat_killmails directly
var query string
var args []interface{}
if len(shipTypeIDs) > 0 {
shipPlaceholders := make([]string, len(shipTypeIDs))
for i, shipID := range shipTypeIDs {
shipPlaceholders[i] = "?"
args = append(args, shipID)
}
query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ")"
} else if !isEmpty {
query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails WHERE victim_ship_type_id = ?"
args = []interface{}{params.Ship}
} else {
query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails"
}
if len(params.Systems) > 0 {
placeholders := make([]string, len(params.Systems))
for i := range params.Systems {
placeholders[i] = "?"
args = append(args, params.Systems[i])
}
if strings.Contains(query, "WHERE") {
query += " AND solar_system_id IN (" + strings.Join(placeholders, ",") + ")"
} else {
query += " WHERE solar_system_id IN (" + strings.Join(placeholders, ",") + ")"
}
}
rows, err := db.ch.Query(ctx, query, args...)
if err != nil {
flog.Error("Failed to execute query: %v", err)
return nil, err
}
defer rows.Close()
for rows.Next() {
var killmailID, systemID, shipTypeID int64
if err := rows.Scan(&killmailID, &systemID, &shipTypeID); err != nil {
flog.Error("Failed to scan row: %v", err)
return nil, err
}
killmailIDs = append(killmailIDs, killmailID)
systemIDs = append(systemIDs, systemID)
shipTypeIDsFromResults = append(shipTypeIDsFromResults, shipTypeID)
}
}
totalKillmails := int64(len(killmailIDs))
flog.Info("Found %d killmails after filtering", totalKillmails)
if totalKillmails > 0 {
flog.Debug("Sample killmail IDs: %v", killmailIDs[:min(5, len(killmailIDs))])
}
stats := &FitStatistics{
TotalKillmails: totalKillmails,
ShipBreakdown: make(map[int64]SystemStats),
SystemBreakdown: make(map[int64]SystemStats),
HighSlotModules: make(map[int32]ModuleStats),
MidSlotModules: make(map[int32]ModuleStats),
LowSlotModules: make(map[int32]ModuleStats),
Rigs: make(map[int32]ModuleStats),
Drones: make(map[int32]ModuleStats),
KillmailIDs: limitKillmails(killmailIDs, params.KillmailLimit),
}
if totalKillmails == 0 {
flog.Info("No killmails found, returning empty statistics")
return stats, nil
}
// Calculate ship breakdown if params are empty or we have ship data
if isEmpty || len(shipTypeIDsFromResults) > 0 {
flog.Debug("Calculating ship breakdown")
shipCounts := make(map[int64]int64)
for _, shipTypeID := range shipTypeIDsFromResults {
shipCounts[shipTypeID]++
}
for shipTypeID, count := range shipCounts {
percentage := float64(count) / float64(totalKillmails) * 100.0
stats.ShipBreakdown[shipTypeID] = SystemStats{
Count: count,
Percentage: percentage,
}
}
flog.Debug("Ship breakdown: %d unique ships", len(stats.ShipBreakdown))
}
flog.Debug("Calculating system breakdown")
systemCounts := make(map[int64]int64)
for _, systemID := range systemIDs {
systemCounts[systemID]++
}
// Calculate system percentages
for systemID, count := range systemCounts {
percentage := float64(count) / float64(totalKillmails) * 100.0
stats.SystemBreakdown[systemID] = SystemStats{
Count: count,
Percentage: percentage,
}
}
flog.Debug("System breakdown: %d unique systems", len(stats.SystemBreakdown))
flog.Debug("Calculating module statistics for %d killmails", len(killmailIDs))
if err := db.calculateModuleStats(params, shipTypeIDs, killmailIDs, stats, totalKillmails, flog); err != nil {
flog.Error("Failed to calculate module stats: %v", err)
return nil, err
}
flog.Info("Statistics calculated: %d high, %d mid, %d low, %d rigs, %d drones",
len(stats.HighSlotModules), len(stats.MidSlotModules), len(stats.LowSlotModules),
len(stats.Rigs), len(stats.Drones))
return stats, nil
}
func (db *DBWrapper) GetCacheEntry(cacheKey string, maxAge time.Duration) ([]byte, bool) {
var cached CacheEntry
err := db.gormDB.
Where("cache_key = ? AND created_at > ?", cacheKey, time.Now().Add(-maxAge)).
Order("created_at DESC").
Limit(1).
First(&cached).Error
if err != nil {
return nil, false
}
// Check if this is a 404 marker
if len(cached.Data) == len(notFoundMarker) {
isNotFound := true
for i, b := range notFoundMarker {
if cached.Data[i] != b {
isNotFound = false
break
}
}
if isNotFound {
return nil, true // Cached 404
}
}
// If Data is empty, treat as not found
if len(cached.Data) == 0 {
return nil, false
}
return cached.Data, true
}
func (db *DBWrapper) CacheEntry(cacheKey string, data []byte) error {
// Delete old entries with same key to avoid duplicates
db.gormDB.Where("cache_key = ?", cacheKey).Delete(&CacheEntry{})
// If data is nil (404), store the special marker
cacheData := data
if data == nil {
cacheData = notFoundMarker
}
return db.gormDB.Create(&CacheEntry{
CacheKey: cacheKey,
Data: cacheData,
CreatedAt: time.Now(),
}).Error
}
func (db *DBWrapper) SearchShips(query string, limit int) ([]models.InvType, error) {
var ships []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.gormDB.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (6)", searchPattern).
Limit(limit).
Find(&ships).Error
return ships, err
}
func (db *DBWrapper) SearchSystems(query string, limit int) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.gormDB.Table("mapSolarSystems").
Where("LOWER(\"solarSystemName\") LIKE ?", searchPattern).
Limit(limit).
Find(&systems).Error
return systems, err
}
func (db *DBWrapper) SearchModules(query string, limit int) ([]models.InvType, error) {
var modules []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.gormDB.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (7, 66)", searchPattern).
Limit(limit).
Find(&modules).Error
return modules, err
}
func (db *DBWrapper) SearchGroups(query string, limit int) ([]models.InvGroup, error) {
var groups []models.InvGroup
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.gormDB.Table("invGroups").
Where("LOWER(\"groupName\") LIKE ?", searchPattern).
Limit(limit).
Find(&groups).Error
return groups, err
}
func (db *DBWrapper) GetItemNames(ids []int32) (map[string]string, error) {
names := make(map[string]string)
var items []models.InvType
if err := db.gormDB.Table("invTypes").
Where("typeID IN ?", ids).
Find(&items).Error; err != nil {
return nil, err
}
for _, item := range items {
names[strconv.FormatInt(int64(item.TypeID), 10)] = item.TypeName
}
var systems []models.MapSolarSystem
if err := db.gormDB.Table("mapSolarSystems").
Where("\"solarSystemID\" IN ?", ids).
Find(&systems).Error; err != nil {
return nil, err
}
for _, system := range systems {
names[strconv.FormatInt(int64(system.SolarSystemID), 10)] = system.SolarSystemName
}
return names, nil
}
func (db *DBWrapper) GetItemGroup(itemID int64) (int64, error) {
var item models.InvType
err := db.gormDB.Table("invTypes").
Where("typeID = ?", int32(itemID)).
First(&item).Error
if err != nil {
return 0, err
}
return int64(item.GroupID), nil
}
func deduplicateInt64(slice []int64) []int64 {
seen := make(map[int64]bool)
result := make([]int64, 0, len(slice))
for _, v := range slice {
if !seen[v] {
seen[v] = true
result = append(result, v)
}
}
return result
}
func (db *DBWrapper) groupTypeIDs(groupIDs []int64) ([]int64, error) {
if len(groupIDs) == 0 {
return nil, nil
}
var groupTypeIDs []int32
err := db.gormDB.Model(&models.InvType{}).
Select("typeID").
Where("groupID IN ?", groupIDs).
Pluck("typeID", &groupTypeIDs).Error
if err != nil {
return nil, err
}
result := make([]int64, 0, len(groupTypeIDs))
for _, typeID := range groupTypeIDs {
result = append(result, int64(typeID))
}
return result, nil
}
var moduleSlotCache = make(map[int64]string)
func (db *DBWrapper) getModuleSlots(moduleIDs []int64) (map[int64]string, error) {
result := make(map[int64]string)
uncached := make([]int64, 0)
for _, id := range moduleIDs {
if slot, cached := moduleSlotCache[id]; cached {
result[id] = slot
} else {
uncached = append(uncached, id)
}
}
if len(uncached) == 0 {
return result, nil
}
var effects []struct {
TypeID int32
EffectID int32
}
err := db.gormDB.Table("dgmTypeEffects").
Select("typeID, effectID").
Where("typeID IN ? AND effectID IN (11, 12, 13, 2663)", uncached).
Find(&effects).Error
if err != nil {
return nil, err
}
for _, e := range effects {
var slot string
switch e.EffectID {
case 11:
slot = "low"
case 12:
slot = "high"
case 13:
slot = "mid"
case 2663:
slot = "rig"
}
result[int64(e.TypeID)] = slot
moduleSlotCache[int64(e.TypeID)] = slot
}
droneCategoryID := int32(18)
var droneTypeIDs []int32
err = db.gormDB.Table("invTypes").
Select("invTypes.typeID").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("invTypes.typeID IN ? AND invGroups.categoryID = ?", uncached, droneCategoryID).
Pluck("invTypes.typeID", &droneTypeIDs).Error
if err != nil {
return nil, err
}
for _, id := range droneTypeIDs {
result[int64(id)] = "drone"
moduleSlotCache[int64(id)] = "drone"
}
return result, nil
}
func (db *DBWrapper) calculateModuleStats(params QueryParams, shipTypeIDs []int64, killmailIDs []int64, stats *FitStatistics, total int64, flog *logger.Logger) error {
if total == 0 {
return nil
}
ctx := context.Background()
var query string
var args []interface{}
if len(killmailIDs) > 0 && len(killmailIDs) < 100000 {
placeholders := make([]string, len(killmailIDs))
for i := range killmailIDs {
placeholders[i] = "?"
args = append(args, killmailIDs[i])
}
query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules WHERE killmail_id IN (" + strings.Join(placeholders, ",") + ")"
} else {
var shipPlaceholders []string
if len(shipTypeIDs) > 0 {
shipPlaceholders = make([]string, len(shipTypeIDs))
for i, shipID := range shipTypeIDs {
shipPlaceholders[i] = "?"
args = append(args, shipID)
}
} else if params.Ship != 0 {
shipPlaceholders = []string{"?"}
args = []interface{}{params.Ship}
}
if len(shipPlaceholders) > 0 {
query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ")"
} else {
query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules"
}
if len(params.Systems) > 0 {
sysPlaceholders := make([]string, len(params.Systems))
for i := range params.Systems {
sysPlaceholders[i] = "?"
args = append(args, params.Systems[i])
}
if len(shipPlaceholders) > 0 {
query += " AND solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
} else {
query += " WHERE solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
}
}
}
query += " GROUP BY item_type_id, flag"
rows, err := db.ch.Query(ctx, query, args...)
if err != nil {
flog.Error("Failed to query module stats: %v", err)
return err
}
defer rows.Close()
// Map to aggregate counts per item_type_id (not per flag)
itemCounts := make(map[int64]uint64)
itemFlags := make(map[int64]int64)
for rows.Next() {
var itemTypeID, flag int64
var count uint64
if err := rows.Scan(&itemTypeID, &flag, &count); err != nil {
flog.Error("Failed to scan module stat: %v", err)
return err
}
// Only count fitted modules - ignore cargo (flag 5) and other non-module flags
if flag < 11 || (flag > 34 && flag != 87 && (flag < 92 || flag > 99)) {
continue
}
// Aggregate: if we've seen this item_type_id before, use the max count (should be same, but just in case)
if existing, exists := itemCounts[itemTypeID]; !exists || count > existing {
itemCounts[itemTypeID] = count
itemFlags[itemTypeID] = flag
}
}
// For filtered modules, they should be in 100% of fits - ADD THEM FIRST
filteredModules := make(map[int64]bool)
moduleSlots := make(map[int64]string)
if len(params.Modules) > 0 {
slots, err := db.getModuleSlots(params.Modules)
if err == nil {
moduleSlots = slots
}
for _, moduleID := range params.Modules {
filteredModules[moduleID] = true
// Add filtered modules immediately with 100%
moduleStats := ModuleStats{
Count: total,
Percentage: 100.0,
}
slot, _ := moduleSlots[moduleID]
switch slot {
case "low":
stats.LowSlotModules[int32(moduleID)] = moduleStats
case "mid":
stats.MidSlotModules[int32(moduleID)] = moduleStats
case "high":
stats.HighSlotModules[int32(moduleID)] = moduleStats
case "rig":
stats.Rigs[int32(moduleID)] = moduleStats
case "drone":
stats.Drones[int32(moduleID)] = moduleStats
default:
stats.HighSlotModules[int32(moduleID)] = moduleStats
}
}
}
// Add all modules from query results (filtered ones already added with 100%)
for itemTypeID, count := range itemCounts {
if filteredModules[itemTypeID] {
continue
}
percentage := float64(count) / float64(total) * 100.0
moduleStats := ModuleStats{
Count: int64(count),
Percentage: percentage,
}
flag := itemFlags[itemTypeID]
switch {
case flag >= 11 && flag <= 18:
stats.LowSlotModules[int32(itemTypeID)] = moduleStats
case flag >= 19 && flag <= 26:
stats.MidSlotModules[int32(itemTypeID)] = moduleStats
case flag >= 27 && flag <= 34:
stats.HighSlotModules[int32(itemTypeID)] = moduleStats
case flag >= 92 && flag <= 99:
stats.Rigs[int32(itemTypeID)] = moduleStats
case flag == 87:
stats.Drones[int32(itemTypeID)] = moduleStats
}
}
return nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func limitKillmails(ids []int64, limit int) []int64 {
if limit <= 0 || len(ids) == 0 {
return nil
}
if limit > len(ids) {
limit = len(ids)
}
return ids[:limit]
}