Hallucinate every fucking thing

This commit is contained in:
2026-01-06 20:08:44 +01:00
parent 0e98f7d796
commit 73408a2e5f
4 changed files with 51 additions and 74 deletions

56
api.go
View File

@@ -22,7 +22,7 @@ type APIStatisticsRequest struct {
}
type APIItemCount struct {
ItemId int64 `json:"itemId"`
ItemID int64 `json:"itemId"`
Count int64 `json:"count"`
}
@@ -90,7 +90,7 @@ func convertMapToItemCounts(m map[int64]int64) []APIItemCount {
result := make([]APIItemCount, 0, len(m))
for id, count := range m {
result = append(result, APIItemCount{
ItemId: id,
ItemID: id,
Count: count,
})
}
@@ -101,7 +101,7 @@ func convertModuleMapToItemCounts(m map[int32]int64) []APIItemCount {
result := make([]APIItemCount, 0, len(m))
for id, count := range m {
result = append(result, APIItemCount{
ItemId: int64(id),
ItemID: int64(id),
Count: count,
})
}
@@ -109,79 +109,47 @@ func convertModuleMapToItemCounts(m map[int32]int64) []APIItemCount {
}
func handleStatistics(w http.ResponseWriter, r *http.Request) {
flog := logger.Default.WithPrefix("handleStatistics")
flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
if r.Method != http.MethodPost {
flog.Debug("Method not allowed: %s", r.Method)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
flog.Debug("Decoding request body")
var req APIStatisticsRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
flog.Error("Failed to decode request body: %v", err)
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
flog.Dump("Request", req)
params := QueryParams{}
if req.Ship != nil {
params.Ship = *req.Ship
flog.Debug("Ship filter: %d", params.Ship)
}
params.Systems = req.Systems
if len(params.Systems) > 0 {
flog.Debug("Systems filter: %d systems", len(params.Systems))
}
params.Modules = req.Modules
if len(params.Modules) > 0 {
flog.Debug("Modules filter: %d modules", len(params.Modules))
}
params.Groups = req.Groups
if len(params.Groups) > 0 {
flog.Debug("Groups filter: %d groups", len(params.Groups))
}
// Killmail limit defaults to 20 when not provided or invalid
if req.KillmailLimit != nil && *req.KillmailLimit > 0 {
params.KillmailLimit = *req.KillmailLimit
} else {
params.KillmailLimit = 20
}
flog.Debug("Killmail limit: %d", params.KillmailLimit)
db, err := GetDB()
if err != nil {
flog.Error("Failed to get database: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Trace("Database connection obtained")
flog.Info("Querying database")
flog.Debug("Executing QueryFits with params: %+v", params)
stats, err := db.QueryFits(params)
if err != nil {
flog.Error("Failed to query fits: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Info("Query completed: %d total killmails", stats.TotalKillmails)
flog.Dump("Statistics", stats)
flog.Debug("Converting statistics to API format")
apiStats := convertFitStatistics(stats)
flog.Dump("API Statistics", apiStats)
w.Header().Set("Content-Type", "application/json")
flog.Trace("Encoding response")
if err := json.NewEncoder(w).Encode(apiStats); err != nil {
flog.Error("Failed to encode response: %v", err)
return
}
flog.Info("Response sent successfully")
json.NewEncoder(w).Encode(apiStats)
}
func handleSearch(w http.ResponseWriter, r *http.Request) {
@@ -717,6 +685,19 @@ func handleImageBatch(w http.ResponseWriter, r *http.Request) {
return
}
// CACHING DISABLED TEMPORARILY FOR TESTING
// // Try to get from cache first
// cachedData, err := db.CacheGet(cacheKey)
// if err == nil {
// flog.Info("Returning cached images")
// w.Header().Set("Content-Type", "application/json")
// w.Write(cachedData)
// return
// }
// if err != ErrCacheMiss {
// flog.Debug("Cache get error: %v", err)
// }
response := APIImageBatchResponse{
Images: make(map[string]APIImageData),
}
@@ -728,6 +709,7 @@ func handleImageBatch(w http.ResponseWriter, r *http.Request) {
// Fetch image from EVE Online's image service
esiURL := fmt.Sprintf("https://images.evetech.net/types/%d/icon?size=%d", img.TypeID, img.Size)
flog.Debug("Fetching image: %s", esiURL)
resp, err := client.Get(esiURL)
if err != nil {
flog.Debug("Failed to fetch image for typeID %d size %d: %v", img.TypeID, img.Size, err)
@@ -766,6 +748,8 @@ func handleImageBatch(w http.ResponseWriter, r *http.Request) {
}
}
// CACHING DISABLED TEMPORARILY FOR TESTING
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(response); err != nil {
flog.Error("Failed to encode response: %v", err)

View File

@@ -49,6 +49,9 @@ func TestAPIStatistics(t *testing.T) {
t.Logf("Total killmails: %d", result.TotalKillmails)
t.Logf("Ships: %d", len(result.Ships))
if len(result.Ships) > 0 {
t.Logf("First ship: id=%d, count=%d", result.Ships[0].ItemID, result.Ships[0].Count)
}
t.Logf("Systems: %d", len(result.SystemBreakdown))
t.Logf("High slots: %d", len(result.HighSlotModules))
t.Logf("Killmail IDs: %d", len(result.KillmailIDs))

64
db.go
View File

@@ -28,7 +28,6 @@ type DB interface {
SearchModules(query string, limit int) ([]models.InvType, error)
SearchGroups(query string, limit int) ([]models.InvGroup, error)
// Non retarded APIs below
GetItemTypes(itemIDs []int64) ([]models.InvType, error)
GetSolarSystems(systemIDs []int64) ([]models.MapSolarSystem, error)
ExpandGroupsIntoItemTypeIds(groups []int64) ([]int64, error)
@@ -169,26 +168,19 @@ func (db *DBWrapper) Init() error {
func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
ctx := context.Background()
flog := logger.Default.WithPrefix("QueryFits").WithPrefix(fmt.Sprintf("%+v", params))
flog.Info("Starting query")
// Expand groups into item type IDs
newItemTypes, err := db.ExpandGroupsIntoItemTypeIds(params.Groups)
if err != nil {
flog.Error("Failed to expand groups: %v", err)
return nil, err
}
params.Modules = append(params.Modules, newItemTypes...)
modules := deduplicateInt64(params.Modules)
flog.Debug("Deduplicated modules: %d -> %d", len(params.Modules), len(modules))
// Build different filter queries for different aggregations
baseFilterQuery, baseFilterArgs := db.buildBaseFilterQuery(params) // ship/system only
fullFilterQuery, fullFilterArgs := db.buildFilterQuery(params, modules) // includes modules
flog.Debug("Base filter query: %s with %d args", baseFilterQuery, len(baseFilterArgs))
flog.Debug("Full filter query: %s with %d args", fullFilterQuery, len(fullFilterArgs))
stats := &FitStatistics{
ShipBreakdown: make(map[int64]int64),
SystemBreakdown: make(map[int64]int64),
@@ -206,7 +198,6 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
// Query 1: Get total count (base filters only)
go func() {
query := fmt.Sprintf("SELECT COUNT(*) FROM (%s)", baseFilterQuery)
flog.Debug("Total count query: %s", query)
rows, err := db.ch.Query(ctx, query, baseFilterArgs...)
if err != nil {
errChan <- fmt.Errorf("total count query failed: %w", err)
@@ -216,11 +207,12 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
if rows.Next() {
var count uint64
rows.Scan(&count)
if err := rows.Scan(&count); err != nil {
errChan <- fmt.Errorf("failed to scan total count: %w", err)
return
}
stats.TotalKillmails = int64(count)
flog.Debug("Total count result: %d", stats.TotalKillmails)
}
rows.Close()
errChan <- nil
}()
@@ -236,10 +228,12 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
for rows.Next() {
var id int64
rows.Scan(&id)
if err := rows.Scan(&id); err != nil {
errChan <- fmt.Errorf("failed to scan killmail ID: %w", err)
return
}
stats.KillmailIDs = append(stats.KillmailIDs, id)
}
rows.Close()
errChan <- nil
}()
@@ -254,11 +248,14 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
defer rows.Close()
for rows.Next() {
var id, count int64
rows.Scan(&id, &count)
stats.ShipBreakdown[id] = count
var id int64
var shipCount uint64
if err := rows.Scan(&id, &shipCount); err != nil {
errChan <- fmt.Errorf("failed to scan ship breakdown row: %w", err)
return
}
stats.ShipBreakdown[id] = int64(shipCount)
}
rows.Close()
errChan <- nil
}()
@@ -273,11 +270,14 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
defer rows.Close()
for rows.Next() {
var id, count int64
rows.Scan(&id, &count)
stats.SystemBreakdown[id] = count
var id int64
var count uint64
if err := rows.Scan(&id, &count); err != nil {
errChan <- fmt.Errorf("failed to scan system breakdown row: %w", err)
return
}
stats.SystemBreakdown[id] = int64(count)
}
rows.Close()
errChan <- nil
}()
@@ -307,7 +307,10 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
var itemID int64
var count uint64
rows.Scan(&slot, &itemID, &count)
if err := rows.Scan(&slot, &itemID, &count); err != nil {
errChan <- fmt.Errorf("failed to scan module row: %w", err)
return
}
int32ID := int32(itemID)
int64Count := int64(count)
@@ -324,29 +327,16 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
stats.Drones[int32ID] = int64Count
}
}
rows.Close()
errChan <- nil
}()
// Wait for all queries to complete
for i := 0; i < 5; i++ {
if err := <-errChan; err != nil {
flog.Error("Query failed: %v", err)
return nil, err
}
}
flog.Info("All queries completed: %d total killmails, %d ships, %d systems, %d high, %d mid, %d low, %d rigs, %d drones, %d killmail IDs",
stats.TotalKillmails,
len(stats.ShipBreakdown),
len(stats.SystemBreakdown),
len(stats.HighSlotModules),
len(stats.MidSlotModules),
len(stats.LowSlotModules),
len(stats.Rigs),
len(stats.Drones),
len(stats.KillmailIDs))
return stats, nil
}
@@ -620,7 +610,7 @@ func parseKillmailIDs(data string, result *[]int64) {
}
}
// Add methods to QueryParams for template use
// HasModules returns true if the query has module filters
func (qp QueryParams) HasModules() bool {
return len(qp.Modules) > 0
}