Files
zkill-susser/db.go
2026-01-06 14:49:37 +01:00

938 lines
27 KiB
Go

package main
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"zkillsusser/models"
logger "git.site.quack-lab.dev/dave/cylogger"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)
type DB interface {
Init() error
Get() *gorm.DB
SaveKillmails(killmails []Killmail) error
QueryFits(params QueryParams) (*FitStatistics, error)
SearchShips(query string, limit int) ([]models.InvType, error)
SearchSystems(query string, limit int) ([]models.MapSolarSystem, error)
SearchModules(query string, limit int) ([]models.InvType, error)
SearchGroups(query string, limit int) ([]models.InvGroup, error)
// Non retarded APIs below
GetItemTypes(itemIDs []int64) ([]models.InvType, error)
GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error)
ExpandGroups(groups []int64) ([]int64, error)
GetModuleSlots(moduleIDs []int64) (map[int64]ModuleSlot, error)
CacheSet(key string, data []byte) error
CacheGet(key string) ([]byte, error)
CacheClean() error
}
type DBWrapper struct {
ch driver.Conn
db *gorm.DB // For SQLite (EVE static data)
}
var db *DBWrapper
func GetDB() (DB, error) {
if db != nil {
return db, nil
}
sdb, err := GetDBSqlite()
if err != nil {
return nil, fmt.Errorf("failed to connect to SQLite: %w", err)
}
conn, err := GetDBClickhouse()
if err != nil {
return nil, fmt.Errorf("failed to connect to ClickHouse: %w", err)
}
db = &DBWrapper{
ch: conn,
db: sdb,
}
err = db.Init()
return db, err
}
func GetDBSqlite() (*gorm.DB, error) {
return gorm.Open(sqlite.Open("sqlite-latest.sqlite"), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
NoLowerCase: true,
},
})
}
func GetDBClickhouse() (driver.Conn, error) {
options := &clickhouse.Options{
Addr: []string{"clickhouse.site.quack-lab.dev"},
Auth: clickhouse.Auth{
Database: "zkill",
Username: "default",
Password: "",
},
Protocol: clickhouse.HTTP,
Settings: clickhouse.Settings{
"max_query_size": 100000000,
},
}
return clickhouse.Open(options)
}
func (db *DBWrapper) Get() *gorm.DB {
return db.db
}
func (db *DBWrapper) Init() error {
ctx := context.Background()
// Migrate unified cache table
// Use raw SQL to create table and index with IF NOT EXISTS to avoid errors
// For 404s, we store a special marker byte sequence instead of NULL
err := db.db.AutoMigrate(&CacheEntry{})
if err != nil {
return fmt.Errorf("failed to migrate cache_entries table: %w", err)
}
// Create flat_killmails table
createFlatKillmails := `
CREATE TABLE IF NOT EXISTS flat_killmails (
killmail_id Int64,
killmail_time DateTime,
solar_system_id Int64,
killmail_hash String,
victim_ship_type_id Int64,
victim_character_id Int64,
victim_corporation_id Int64,
victim_alliance_id Int64,
victim_damage_taken Int64,
victim_pos_x Float64,
victim_pos_y Float64,
victim_pos_z Float64,
attacker_count UInt16,
total_damage_done Int64,
final_blow_ship_type Int64,
attackers Array(Tuple(
Int64, -- character_id
Int64, -- corporation_id
Int64, -- alliance_id
Int64, -- ship_type_id
Int64, -- weapon_type_id
Int64, -- damage_done
UInt8, -- final_blow
Float64 -- security_status
)),
items Array(Tuple(
Int64, -- flag
Int64, -- item_type_id
Int64, -- quantity_destroyed
Int64, -- quantity_dropped
Int64 -- singleton
))
) ENGINE = MergeTree()
ORDER BY (killmail_id)
PRIMARY KEY (killmail_id)`
err = db.ch.Exec(ctx, createFlatKillmails)
if err != nil {
return fmt.Errorf("failed to create flat_killmails table: %w", err)
}
// Create fitted_modules table
createFittedModules := `
CREATE TABLE IF NOT EXISTS fitted_modules (
killmail_id Int64,
killmail_time DateTime,
solar_system_id Int64,
victim_ship_type_id Int64,
item_type_id Int64,
flag Int64,
quantity_destroyed Int64,
quantity_dropped Int64
) ENGINE = MergeTree()
ORDER BY (killmail_id, item_type_id, flag)
PRIMARY KEY (killmail_id, item_type_id, flag)`
err = db.ch.Exec(ctx, createFittedModules)
if err != nil {
return fmt.Errorf("failed to create fitted_modules table: %w", err)
}
return nil
}
func (db *DBWrapper) SaveKillmails(killmails []Killmail) error {
// ctx := context.Background()
// // Prepare batch for flat_killmails
// flatBatch, err := db.ch.PrepareBatch(ctx, "INSERT INTO flat_killmails")
// if err != nil {
// return fmt.Errorf("failed to prepare flat_killmails batch: %w", err)
// }
// // Prepare batch for fitted_modules
// moduleBatch, err := db.ch.PrepareBatch(ctx, "INSERT INTO fitted_modules")
// if err != nil {
// return fmt.Errorf("failed to prepare fitted_modules batch: %w", err)
// }
// // Process in batches with deduplication
// batchSize := 1000
// seenKillmails := make(map[int64]bool)
// for i := 0; i < len(killmails); i += batchSize {
// end := i + batchSize
// if end > len(killmails) {
// end = len(killmails)
// }
// // Batch check for existing killmails
// batchIDs := make([]int64, 0, end-i)
// for _, km := range killmails[i:end] {
// if !seenKillmails[km.KillmailID] {
// batchIDs = append(batchIDs, km.KillmailID)
// seenKillmails[km.KillmailID] = true
// }
// }
// if len(batchIDs) > 0 {
// // Check which ones already exist in database
// placeholders := make([]string, len(batchIDs))
// args := make([]interface{}, len(batchIDs))
// for j, id := range batchIDs {
// placeholders[j] = "?"
// args[j] = id
// }
// checkQuery := fmt.Sprintf("SELECT killmail_id FROM flat_killmails WHERE killmail_id IN (%s)", strings.Join(placeholders, ","))
// rows, err := db.ch.Query(ctx, checkQuery, args...)
// if err == nil {
// existing := make(map[int64]bool)
// for rows.Next() {
// var id int64
// if rows.Scan(&id) == nil {
// existing[id] = true
// }
// }
// rows.Close()
// // Remove existing from batch
// filtered := batchIDs[:0]
// for _, id := range batchIDs {
// if !existing[id] {
// filtered = append(filtered, id)
// }
// }
// batchIDs = filtered
// }
// }
// // Create map for fast lookup
// allowedIDs := make(map[int64]bool)
// for _, id := range batchIDs {
// allowedIDs[id] = true
// }
// for _, km := range killmails[i:end] {
// if !allowedIDs[km.KillmailID] {
// continue // Skip duplicate
// }
// flat := km.FlattenKillmail()
// modules := km.ExtractFittedModules()
// // Append to flat_killmails batch
// if err := flatBatch.Append(
// flat.KillmailID,
// flat.KillmailTime,
// flat.SolarSystemID,
// flat.KillmailHash,
// flat.VictimShipTypeID,
// flat.VictimCharacterID,
// flat.VictimCorporationID,
// flat.VictimAllianceID,
// flat.VictimDamageTaken,
// flat.VictimPosX,
// flat.VictimPosY,
// flat.VictimPosZ,
// flat.AttackerCount,
// flat.TotalDamageDone,
// flat.FinalBlowShipType,
// flat.Attackers,
// flat.Items,
// ); err != nil {
// return fmt.Errorf("failed to append flat killmail: %w", err)
// }
// // Append modules to fitted_modules batch
// for _, mod := range modules {
// if err := moduleBatch.Append(
// mod.KillmailID,
// mod.KillmailTime,
// mod.SolarSystemID,
// mod.VictimShipTypeID,
// mod.ItemTypeID,
// mod.Flag,
// mod.QuantityDestroyed,
// mod.QuantityDropped,
// ); err != nil {
// return fmt.Errorf("failed to append module: %w", err)
// }
// }
// }
// // Send batches every 1000 records
// if err := flatBatch.Send(); err != nil {
// return fmt.Errorf("failed to send flat_killmails batch: %w", err)
// }
// if err := moduleBatch.Send(); err != nil {
// return fmt.Errorf("failed to send fitted_modules batch: %w", err)
// }
// // Prepare new batches for next iteration
// if end < len(killmails) {
// flatBatch, err = db.ch.PrepareBatch(ctx, "INSERT INTO flat_killmails")
// if err != nil {
// return fmt.Errorf("failed to prepare flat_killmails batch: %w", err)
// }
// moduleBatch, err = db.ch.PrepareBatch(ctx, "INSERT INTO fitted_modules")
// if err != nil {
// return fmt.Errorf("failed to prepare fitted_modules batch: %w", err)
// }
// }
// }
// return nil
return nil
}
func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
flog := logger.Default.WithPrefix("QueryFits").WithPrefix(fmt.Sprintf("%+v", params))
flog.Info("Starting query")
newItemTypes, err := db.ExpandGroups(params.Groups)
if err != nil {
flog.Error("Failed to expand groups: %v", err)
return nil, err
}
params.Modules = append(params.Modules, newItemTypes...)
modules := deduplicateInt64(params.Modules)
flog.Debug("Deduplicated modules: %d -> %d", len(params.Modules), len(modules))
// ctx := context.Background()
// var killmailIDs []int64
// var systemIDs []int64
// var shipTypeIDsFromResults []int64
// moduleFilterIDs := deduplicateInt64(append(modules, groupModuleTypeIDs...))
// if len(moduleFilterIDs) > 0 {
// modules = moduleFilterIDs
// placeholders := make([]string, len(moduleFilterIDs))
// moduleArgs := make([]interface{}, len(moduleFilterIDs))
// for i, moduleID := range moduleFilterIDs {
// placeholders[i] = "?"
// moduleArgs[i] = moduleID
// }
// var shipPlaceholders []string
// var shipArgs []interface{}
// if len(shipTypeIDs) > 0 {
// shipPlaceholders = make([]string, len(shipTypeIDs))
// for i, shipID := range shipTypeIDs {
// shipPlaceholders[i] = "?"
// shipArgs = append(shipArgs, shipID)
// }
// } else if !isEmpty {
// shipPlaceholders = []string{"?"}
// shipArgs = []interface{}{params.Ship}
// }
// var moduleQuery string
// var args []interface{}
// if len(shipPlaceholders) > 0 {
// moduleQuery = "SELECT DISTINCT killmail_id, solar_system_id, victim_ship_type_id FROM fitted_modules WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ") AND item_type_id IN (" + strings.Join(placeholders, ",") + ")"
// args = shipArgs
// args = append(args, moduleArgs...)
// } else {
// moduleQuery = "SELECT DISTINCT killmail_id, solar_system_id, victim_ship_type_id FROM fitted_modules WHERE item_type_id IN (" + strings.Join(placeholders, ",") + ")"
// args = moduleArgs
// }
// if len(params.Systems) > 0 {
// sysPlaceholders := make([]string, len(params.Systems))
// for i := range params.Systems {
// sysPlaceholders[i] = "?"
// args = append(args, params.Systems[i])
// }
// moduleQuery += " AND solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
// }
// rows, err := db.ch.Query(ctx, moduleQuery, args...)
// if err != nil {
// flog.Error("Failed to query filtered killmails: %v", err)
// return nil, err
// }
// for rows.Next() {
// var id, systemID, shipTypeID int64
// if err := rows.Scan(&id, &systemID, &shipTypeID); err != nil {
// rows.Close()
// return nil, err
// }
// killmailIDs = append(killmailIDs, id)
// systemIDs = append(systemIDs, systemID)
// shipTypeIDsFromResults = append(shipTypeIDsFromResults, shipTypeID)
// }
// rows.Close()
// } else {
// // No module filter - query flat_killmails directly
// var query string
// var args []interface{}
// if len(shipTypeIDs) > 0 {
// shipPlaceholders := make([]string, len(shipTypeIDs))
// for i, shipID := range shipTypeIDs {
// shipPlaceholders[i] = "?"
// args = append(args, shipID)
// }
// query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ")"
// } else if !isEmpty {
// query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails WHERE victim_ship_type_id = ?"
// args = []interface{}{params.Ship}
// } else {
// query = "SELECT killmail_id, solar_system_id, victim_ship_type_id FROM flat_killmails"
// }
// if len(params.Systems) > 0 {
// placeholders := make([]string, len(params.Systems))
// for i := range params.Systems {
// placeholders[i] = "?"
// args = append(args, params.Systems[i])
// }
// if strings.Contains(query, "WHERE") {
// query += " AND solar_system_id IN (" + strings.Join(placeholders, ",") + ")"
// } else {
// query += " WHERE solar_system_id IN (" + strings.Join(placeholders, ",") + ")"
// }
// }
// rows, err := db.ch.Query(ctx, query, args...)
// if err != nil {
// flog.Error("Failed to execute query: %v", err)
// return nil, err
// }
// defer rows.Close()
// for rows.Next() {
// var killmailID, systemID, shipTypeID int64
// if err := rows.Scan(&killmailID, &systemID, &shipTypeID); err != nil {
// flog.Error("Failed to scan row: %v", err)
// return nil, err
// }
// killmailIDs = append(killmailIDs, killmailID)
// systemIDs = append(systemIDs, systemID)
// shipTypeIDsFromResults = append(shipTypeIDsFromResults, shipTypeID)
// }
// }
// totalKillmails := int64(len(killmailIDs))
// flog.Info("Found %d killmails after filtering", totalKillmails)
// if totalKillmails > 0 {
// flog.Debug("Sample killmail IDs: %v", killmailIDs[:min(5, len(killmailIDs))])
// }
// stats := &FitStatistics{
// TotalKillmails: totalKillmails,
// ShipBreakdown: make(map[int64]Stats),
// SystemBreakdown: make(map[int64]Stats),
// HighSlotModules: make(map[int32]Stats),
// MidSlotModules: make(map[int32]Stats),
// LowSlotModules: make(map[int32]Stats),
// Rigs: make(map[int32]Stats),
// Drones: make(map[int32]Stats),
// KillmailIDs: limitKillmails(killmailIDs, params.KillmailLimit),
// }
// if totalKillmails == 0 {
// flog.Info("No killmails found, returning empty statistics")
// return stats, nil
// }
// // Calculate ship breakdown if params are empty or we have ship data
// if isEmpty || len(shipTypeIDsFromResults) > 0 {
// flog.Debug("Calculating ship breakdown")
// shipCounts := make(map[int64]int64)
// for _, shipTypeID := range shipTypeIDsFromResults {
// shipCounts[shipTypeID]++
// }
// for shipTypeID, count := range shipCounts {
// percentage := float64(count) / float64(totalKillmails) * 100.0
// stats.ShipBreakdown[shipTypeID] = Stats{
// Count: count,
// Percentage: percentage,
// }
// }
// flog.Debug("Ship breakdown: %d unique ships", len(stats.ShipBreakdown))
// }
// flog.Debug("Calculating system breakdown")
// systemCounts := make(map[int64]int64)
// for _, systemID := range systemIDs {
// systemCounts[systemID]++
// }
// // Calculate system percentages
// for systemID, count := range systemCounts {
// percentage := float64(count) / float64(totalKillmails) * 100.0
// stats.SystemBreakdown[systemID] = Stats{
// Count: count,
// Percentage: percentage,
// }
// }
// flog.Debug("System breakdown: %d unique systems", len(stats.SystemBreakdown))
// flog.Debug("Calculating module statistics for %d killmails", len(killmailIDs))
// if err := db.calculateStats(params, shipTypeIDs, killmailIDs, stats, totalKillmails, flog); err != nil {
// flog.Error("Failed to calculate module stats: %v", err)
// return nil, err
// }
// flog.Info("Statistics calculated: %d high, %d mid, %d low, %d rigs, %d drones",
// len(stats.HighSlotModules), len(stats.MidSlotModules), len(stats.LowSlotModules),
// len(stats.Rigs), len(stats.Drones))
// return stats, nil
return nil, nil
}
func (db *DBWrapper) ExpandGroups(groups []int64) ([]int64, error) {
var groupTypeIDs []int64
result := db.db.Model(&models.InvType{}).
Select("typeID").
Where("groupID IN ?", groups).
Pluck("typeID", &groupTypeIDs)
return groupTypeIDs, result.Error
}
func (db *DBWrapper) GetCacheEntry(cacheKey string, maxAge time.Duration) ([]byte, bool) {
var cached CacheEntry
err := db.db.
Where("cache_key = ? AND created_at > ?", cacheKey, time.Now().Add(-maxAge)).
Order("created_at DESC").
Limit(1).
First(&cached).Error
if err != nil {
return nil, false
}
// Check if this is a 404 marker
if len(cached.Data) == len(notFoundMarker) {
isNotFound := true
for i, b := range notFoundMarker {
if cached.Data[i] != b {
isNotFound = false
break
}
}
if isNotFound {
return nil, true // Cached 404
}
}
// If Data is empty, treat as not found
if len(cached.Data) == 0 {
return nil, false
}
return cached.Data, true
}
func (db *DBWrapper) CacheEntry(cacheKey string, data []byte) error {
// // Delete old entries with same key to avoid duplicates
// db.gormDB.Where("cache_key = ?", cacheKey).Delete(&CacheEntry{})
// // If data is nil (404), store the special marker
// cacheData := data
// if data == nil {
// cacheData = notFoundMarker
// }
// return db.gormDB.Create(&CacheEntry{
// CacheKey: cacheKey,
// Data: cacheData,
// CreatedAt: time.Now(),
// }).Error
return nil
}
func (db *DBWrapper) SearchShips(query string, limit int) ([]models.InvType, error) {
var ships []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (6)", searchPattern).
Limit(limit).
Find(&ships).Error
return ships, err
}
func (db *DBWrapper) SearchSystems(query string, limit int) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("mapSolarSystems").
Where("LOWER(\"solarSystemName\") LIKE ?", searchPattern).
Limit(limit).
Find(&systems).Error
return systems, err
}
func (db *DBWrapper) SearchModules(query string, limit int) ([]models.InvType, error) {
var modules []models.InvType
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invTypes").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("LOWER(invTypes.\"typeName\") LIKE ? AND invGroups.categoryID IN (7, 66)", searchPattern).
Limit(limit).
Find(&modules).Error
return modules, err
}
func (db *DBWrapper) SearchGroups(query string, limit int) ([]models.InvGroup, error) {
var groups []models.InvGroup
searchPattern := "%" + strings.ToLower(query) + "%"
err := db.db.Table("invGroups").
Where("LOWER(\"groupName\") LIKE ?", searchPattern).
Limit(limit).
Find(&groups).Error
return groups, err
}
func (db *DBWrapper) GetItemNames(ids []int32) (map[string]string, error) {
names := make(map[string]string)
var items []models.InvType
if err := db.db.Table("invTypes").
Where("typeID IN ?", ids).
Find(&items).Error; err != nil {
return nil, err
}
for _, item := range items {
names[strconv.FormatInt(int64(item.TypeID), 10)] = item.TypeName
}
var systems []models.MapSolarSystem
if err := db.db.Table("mapSolarSystems").
Where("\"solarSystemID\" IN ?", ids).
Find(&systems).Error; err != nil {
return nil, err
}
for _, system := range systems {
names[strconv.FormatInt(int64(system.SolarSystemID), 10)] = system.SolarSystemName
}
return names, nil
}
func (db *DBWrapper) GetItemTypes(itemIDs []int64) ([]models.InvType, error) {
var itemTypes []models.InvType
res := db.db.Model(&models.InvType{}).
Where("typeID IN ?", itemIDs).
Find(&itemTypes)
return itemTypes, res.Error
}
func (db *DBWrapper) GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error) {
var systems []models.MapSolarSystem
res := db.db.Model(&models.MapSolarSystem{}).
Where("solarSystemID IN ?", systemIDs).
Find(&systems)
return systems, res.Error
}
func deduplicateInt64(slice []int64) []int64 {
seen := make(map[int64]bool)
result := make([]int64, 0, len(slice))
for _, v := range slice {
if !seen[v] {
seen[v] = true
result = append(result, v)
}
}
return result
}
func (db *DBWrapper) GetModuleSlots(moduleIDs []int64) (map[int64]ModuleSlot, error) {
mlog := logger.Default.WithPrefix("GetModuleSlots").WithPrefix(fmt.Sprintf("%v", moduleIDs))
mlog.Debug("Starting")
mlog.Dump("moduleIDs", moduleIDs)
result := make(map[int64]ModuleSlot)
var effects []models.DgmTypeEffect
qres := db.db.Model(&models.DgmTypeEffect{}).
Select("typeID, effectID").
Where("typeID IN ? AND effectID IN (11, 12, 13, 2663)", moduleIDs).
Find(&effects)
if qres.Error != nil {
mlog.Debug("Failed to get effects: %v", qres.Error)
return nil, qres.Error
}
mlog.Debug("Found %d effects", qres.RowsAffected)
mlog.Dump("effects", effects)
for _, e := range effects {
var slot ModuleSlot
switch e.EffectID {
case 11:
slot = ModuleSlotLow
case 12:
slot = ModuleSlotHigh
case 13:
slot = ModuleSlotMid
case 2663:
slot = ModuleSlotRig
}
result[int64(e.TypeID)] = slot
}
mlog.Debug("Found %d slots", len(result))
mlog.Dump("result", result)
if len(result) == len(moduleIDs) {
// All done, no more to do
mlog.Debug("All done, no more to do")
return result, nil
}
// Still have to find drones...
mlog.Debug("Still have to find drones...")
var droneTypeIDs []int32
qres = db.db.Table("invTypes").
Select("invTypes.typeID").
Joins("INNER JOIN invGroups ON invTypes.groupID = invGroups.groupID").
Where("invGroups.categoryID = ?", 18).
Where("invTypes.typeID IN ?", moduleIDs).
Pluck("invTypes.typeID", &droneTypeIDs)
if qres.Error != nil {
mlog.Debug("Failed to get drone type IDs: %v", qres.Error)
return nil, qres.Error
}
mlog.Debug("Found %d drone type IDs", qres.RowsAffected)
mlog.Dump("droneTypeIDs", droneTypeIDs)
for _, id := range droneTypeIDs {
result[int64(id)] = ModuleSlotDrone
}
mlog.Debug("Found %d drone slots", len(result))
mlog.Dump("result", result)
return result, nil
}
func (db *DBWrapper) calculateStats(params QueryParams, shipTypeIDs []int64, killmailIDs []int64, stats *FitStatistics, total int64, flog *logger.Logger) error {
// if total == 0 {
// return nil
// }
// ctx := context.Background()
// var query string
// var args []interface{}
// if len(killmailIDs) > 0 && len(killmailIDs) < 100000 {
// placeholders := make([]string, len(killmailIDs))
// for i := range killmailIDs {
// placeholders[i] = "?"
// args = append(args, killmailIDs[i])
// }
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules WHERE killmail_id IN (" + strings.Join(placeholders, ",") + ")"
// } else {
// var shipPlaceholders []string
// if len(shipTypeIDs) > 0 {
// shipPlaceholders = make([]string, len(shipTypeIDs))
// for i, shipID := range shipTypeIDs {
// shipPlaceholders[i] = "?"
// args = append(args, shipID)
// }
// } else if params.Ship != 0 {
// shipPlaceholders = []string{"?"}
// args = []interface{}{params.Ship}
// }
// if len(shipPlaceholders) > 0 {
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ")"
// } else {
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM fitted_modules"
// }
// if len(params.Systems) > 0 {
// sysPlaceholders := make([]string, len(params.Systems))
// for i := range params.Systems {
// sysPlaceholders[i] = "?"
// args = append(args, params.Systems[i])
// }
// if len(shipPlaceholders) > 0 {
// query += " AND solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
// } else {
// query += " WHERE solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
// }
// }
// }
// query += " GROUP BY item_type_id, flag"
// rows, err := db.ch.Query(ctx, query, args...)
// if err != nil {
// flog.Error("Failed to query module stats: %v", err)
// return err
// }
// defer rows.Close()
// // Map to aggregate counts per item_type_id (not per flag)
// itemCounts := make(map[int64]uint64)
// itemFlags := make(map[int64]int64)
// for rows.Next() {
// var itemTypeID, flag int64
// var count uint64
// if err := rows.Scan(&itemTypeID, &flag, &count); err != nil {
// flog.Error("Failed to scan module stat: %v", err)
// return err
// }
// // Only count fitted modules - ignore cargo (flag 5) and other non-module flags
// if flag < 11 || (flag > 34 && flag != 87 && (flag < 92 || flag > 99)) {
// continue
// }
// // Aggregate: if we've seen this item_type_id before, use the max count (should be same, but just in case)
// if existing, exists := itemCounts[itemTypeID]; !exists || count > existing {
// itemCounts[itemTypeID] = count
// itemFlags[itemTypeID] = flag
// }
// }
// // For filtered modules, they should be in 100% of fits - ADD THEM FIRST
// filteredModules := make(map[int64]bool)
// moduleSlots := make(map[int64]string)
// if len(params.Modules) > 0 {
// slots, err := db.getModuleSlots(params.Modules)
// if err == nil {
// moduleSlots = slots
// }
// for _, moduleID := range params.Modules {
// filteredModules[moduleID] = true
// // Add filtered modules immediately with 100%
// Stats := Stats{
// Count: total,
// Percentage: 100.0,
// }
// slot, _ := moduleSlots[moduleID]
// switch slot {
// case "low":
// stats.LowSlotModules[int32(moduleID)] = Stats
// case "mid":
// stats.MidSlotModules[int32(moduleID)] = Stats
// case "high":
// stats.HighSlotModules[int32(moduleID)] = Stats
// case "rig":
// stats.Rigs[int32(moduleID)] = Stats
// case "drone":
// stats.Drones[int32(moduleID)] = Stats
// default:
// stats.HighSlotModules[int32(moduleID)] = Stats
// }
// }
// }
// // Add all modules from query results (filtered ones already added with 100%)
// for itemTypeID, count := range itemCounts {
// if filteredModules[itemTypeID] {
// continue
// }
// percentage := float64(count) / float64(total) * 100.0
// Stats := Stats{
// Count: int64(count),
// Percentage: percentage,
// }
// flag := itemFlags[itemTypeID]
// switch {
// case flag >= 11 && flag <= 18:
// stats.LowSlotModules[int32(itemTypeID)] = Stats
// case flag >= 19 && flag <= 26:
// stats.MidSlotModules[int32(itemTypeID)] = Stats
// case flag >= 27 && flag <= 34:
// stats.HighSlotModules[int32(itemTypeID)] = Stats
// case flag >= 92 && flag <= 99:
// stats.Rigs[int32(itemTypeID)] = Stats
// case flag == 87:
// stats.Drones[int32(itemTypeID)] = Stats
// }
// }
// return nil
return nil
}
func (db *DBWrapper) CacheSet(key string, data []byte) error {
flog := logger.Default.WithPrefix("CacheSet").WithPrefix(key)
flog.Dump("data", data)
err := db.CacheClean()
if err != nil {
flog.Debug("Failed to clean cache: %v", err)
return err
}
cacheEntry := CacheEntry{
Key: key,
Data: data,
CreatedAt: time.Now(),
}
flog.Debug("Creating cache entry")
flog.Dump("cacheEntry", cacheEntry)
return db.db.Create(&cacheEntry).Error
}
var ErrCacheMiss = gorm.ErrRecordNotFound
func (db *DBWrapper) CacheGet(key string) ([]byte, error) {
flog := logger.Default.WithPrefix("CacheGet").WithPrefix(key)
flog.Dump("key", key)
err := db.CacheClean()
if err != nil {
flog.Debug("Failed to clean cache: %v", err)
return nil, err
}
cacheEntry := CacheEntry{Key: key}
res := db.db.Model(&CacheEntry{}).
Where(cacheEntry).
First(&cacheEntry)
flog.Debug("Found cache entry")
flog.Dump("cacheEntry", cacheEntry)
flog.Dump("res", res)
return cacheEntry.Data, res.Error
}
func (db *DBWrapper) CacheClean() error {
flog := logger.Default.WithPrefix("CacheClean")
threshold := time.Now().Add(-72 * time.Hour)
flog.Dump("threshold", threshold)
return db.db.
Where("created_at < ?", threshold).
Delete(&CacheEntry{}).Error
}