Rehallucinate the API

This commit is contained in:
2026-01-06 18:06:19 +01:00
parent 15dc7983fd
commit 65a3cb587b
5 changed files with 583 additions and 396 deletions

724
api.go
View File

@@ -1,7 +1,13 @@
package main
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"time"
logger "git.site.quack-lab.dev/dave/cylogger"
)
type APIStatisticsRequest struct {
@@ -62,387 +68,382 @@ type APIImageData struct {
const cacheTTL = 24 * time.Hour
// func convertFitStatistics(stats *FitStatistics) *APIFitStatistics {
// api := &APIFitStatistics{
// TotalKillmails: stats.TotalKillmails,
// Ships: convertMapToItemCounts(stats.ShipBreakdown),
// SystemBreakdown: convertMapToItemCounts(stats.SystemBreakdown),
// HighSlotModules: convertModuleMapToItemCounts(stats.HighSlotModules),
// MidSlotModules: convertModuleMapToItemCounts(stats.MidSlotModules),
// LowSlotModules: convertModuleMapToItemCounts(stats.LowSlotModules),
// Rigs: convertModuleMapToItemCounts(stats.Rigs),
// Drones: convertModuleMapToItemCounts(stats.Drones),
// KillmailIDs: stats.KillmailIDs,
// }
// return api
// }
func convertFitStatistics(stats *FitStatistics) *APIFitStatistics {
api := &APIFitStatistics{
TotalKillmails: stats.TotalKillmails,
Ships: convertMapToItemCounts(stats.ShipBreakdown),
SystemBreakdown: convertMapToItemCounts(stats.SystemBreakdown),
HighSlotModules: convertModuleMapToItemCounts(stats.HighSlotModules),
MidSlotModules: convertModuleMapToItemCounts(stats.MidSlotModules),
LowSlotModules: convertModuleMapToItemCounts(stats.LowSlotModules),
Rigs: convertModuleMapToItemCounts(stats.Rigs),
Drones: convertModuleMapToItemCounts(stats.Drones),
KillmailIDs: stats.KillmailIDs,
}
return api
}
// func convertMapToItemCounts(m map[int64]Stats) []APIItemCount {
// result := make([]APIItemCount, 0, len(m))
// for id, stats := range m {
// result = append(result, APIItemCount{
// ItemId: id,
// Count: stats.Count,
// })
// }
// return result
// }
func convertMapToItemCounts(m map[int64]int64) []APIItemCount {
result := make([]APIItemCount, 0, len(m))
for id, count := range m {
result = append(result, APIItemCount{
ItemId: id,
Count: count,
})
}
return result
}
// func convertModuleMapToItemCounts(m map[int32]Stats) []APIItemCount {
// result := make([]APIItemCount, 0, len(m))
// for id, stats := range m {
// result = append(result, APIItemCount{
// ItemId: int64(id),
// Count: stats.Count,
// })
// }
// return result
// }
func convertModuleMapToItemCounts(m map[int32]int64) []APIItemCount {
result := make([]APIItemCount, 0, len(m))
for id, count := range m {
result = append(result, APIItemCount{
ItemId: int64(id),
Count: count,
})
}
return result
}
// func handleStatistics(w http.ResponseWriter, r *http.Request) {
// flog := logger.Default.WithPrefix("handleStatistics")
// flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
func handleStatistics(w http.ResponseWriter, r *http.Request) {
flog := logger.Default.WithPrefix("handleStatistics")
flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
// if r.Method != http.MethodPost {
// flog.Debug("Method not allowed: %s", r.Method)
// http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
// return
// }
if r.Method != http.MethodPost {
flog.Debug("Method not allowed: %s", r.Method)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// flog.Debug("Decoding request body")
// var req APIStatisticsRequest
// if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
// flog.Error("Failed to decode request body: %v", err)
// http.Error(w, "Invalid request body", http.StatusBadRequest)
// return
// }
// flog.Dump("Request", req)
flog.Debug("Decoding request body")
var req APIStatisticsRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
flog.Error("Failed to decode request body: %v", err)
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
flog.Dump("Request", req)
// params := QueryParams{}
// if req.Ship != nil {
// params.Ship = *req.Ship
// flog.Debug("Ship filter: %d", params.Ship)
// }
// params.Systems = req.Systems
// if len(params.Systems) > 0 {
// flog.Debug("Systems filter: %d systems", len(params.Systems))
// }
// params.Modules = req.Modules
// if len(params.Modules) > 0 {
// flog.Debug("Modules filter: %d modules", len(params.Modules))
// }
// params.Groups = req.Groups
// if len(params.Groups) > 0 {
// flog.Debug("Groups filter: %d groups", len(params.Groups))
// }
// // Killmail limit defaults to 20 when not provided or invalid
// if req.KillmailLimit != nil && *req.KillmailLimit > 0 {
// params.KillmailLimit = *req.KillmailLimit
// } else {
// params.KillmailLimit = 20
// }
// flog.Debug("Killmail limit: %d", params.KillmailLimit)
params := QueryParams{}
if req.Ship != nil {
params.Ship = *req.Ship
flog.Debug("Ship filter: %d", params.Ship)
}
params.Systems = req.Systems
if len(params.Systems) > 0 {
flog.Debug("Systems filter: %d systems", len(params.Systems))
}
params.Modules = req.Modules
if len(params.Modules) > 0 {
flog.Debug("Modules filter: %d modules", len(params.Modules))
}
params.Groups = req.Groups
if len(params.Groups) > 0 {
flog.Debug("Groups filter: %d groups", len(params.Groups))
}
// Killmail limit defaults to 20 when not provided or invalid
if req.KillmailLimit != nil && *req.KillmailLimit > 0 {
params.KillmailLimit = *req.KillmailLimit
} else {
params.KillmailLimit = 20
}
flog.Debug("Killmail limit: %d", params.KillmailLimit)
// db, err := GetDB()
// if err != nil {
// flog.Error("Failed to get database: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Trace("Database connection obtained")
db, err := GetDB()
if err != nil {
flog.Error("Failed to get database: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Trace("Database connection obtained")
// cacheKeyBytes, _ := json.Marshal(req)
// cacheKey := "stats:" + string(cacheKeyBytes)
// flog.Debug("Checking cache for key: %s", cacheKey)
flog.Info("Querying database")
flog.Debug("Executing QueryFits with params: %+v", params)
stats, err := db.QueryFits(params)
if err != nil {
flog.Error("Failed to query fits: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Info("Query completed: %d total killmails", stats.TotalKillmails)
flog.Dump("Statistics", stats)
// cachedData, found := db.GetCacheEntry(cacheKey, 3*24*time.Hour)
// if found {
// flog.Info("Serving from cache")
// w.Header().Set("Content-Type", "application/json")
// w.Write(cachedData)
// return
// }
flog.Debug("Converting statistics to API format")
apiStats := convertFitStatistics(stats)
flog.Dump("API Statistics", apiStats)
// flog.Info("Cache miss, querying database")
// flog.Debug("Executing QueryFits with params: %+v", params)
// stats, err := db.QueryFits(params)
// if err != nil {
// flog.Error("Failed to query fits: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Info("Query completed: %d total killmails", stats.TotalKillmails)
// flog.Dump("Statistics", stats)
w.Header().Set("Content-Type", "application/json")
flog.Trace("Encoding response")
if err := json.NewEncoder(w).Encode(apiStats); err != nil {
flog.Error("Failed to encode response: %v", err)
return
}
flog.Info("Response sent successfully")
}
// flog.Debug("Converting statistics to API format")
// apiStats := convertFitStatistics(stats)
// flog.Dump("API Statistics", apiStats)
func handleSearch(w http.ResponseWriter, r *http.Request) {
flog := logger.Default.WithPrefix("handleSearch")
flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
// responseData, err := json.Marshal(apiStats)
// if err != nil {
// flog.Error("Failed to marshal response: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
if r.Method != http.MethodGet {
flog.Debug("Method not allowed: %s", r.Method)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// flog.Debug("Storing in cache")
// if err := db.CacheEntry(cacheKey, responseData); err != nil {
// flog.Error("Failed to cache statistics: %v", err)
// }
query := r.URL.Query().Get("q")
flog.Debug("Search query: %q", query)
if query == "" {
flog.Info("Empty query, returning empty results")
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode([]APISearchResult{})
return
}
// w.Header().Set("Content-Type", "application/json")
// flog.Trace("Encoding response")
// w.Write(responseData)
// flog.Info("Response sent successfully")
// }
db, err := GetDB()
if err != nil {
flog.Error("Failed to get database: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Trace("Database connection obtained")
// func handleSearch(w http.ResponseWriter, r *http.Request) {
// flog := logger.Default.WithPrefix("handleSearch")
// flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
results := []APISearchResult{}
// if r.Method != http.MethodGet {
// flog.Debug("Method not allowed: %s", r.Method)
// http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
// return
// }
flog.Debug("Searching ships")
ships, err := db.SearchShips(query, 10)
if err != nil {
flog.Error("Failed to search ships: %v", err)
} else {
flog.Info("Found %d ships", len(ships))
for _, ship := range ships {
results = append(results, APISearchResult{
ID: int64(ship.TypeID),
Name: ship.TypeName,
Type: "ship",
})
}
}
// query := r.URL.Query().Get("q")
// flog.Debug("Search query: %q", query)
// if query == "" {
// flog.Info("Empty query, returning empty results")
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode([]APISearchResult{})
// return
// }
flog.Debug("Searching systems")
systems, err := db.SearchSystems(query, 10)
if err != nil {
flog.Error("Failed to search systems: %v", err)
} else {
flog.Info("Found %d systems", len(systems))
for _, system := range systems {
results = append(results, APISearchResult{
ID: int64(system.SolarSystemID),
Name: system.SolarSystemName,
Type: "system",
})
}
}
// db, err := GetDB()
// if err != nil {
// flog.Error("Failed to get database: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Trace("Database connection obtained")
flog.Debug("Searching modules")
modules, err := db.SearchModules(query, 10)
if err != nil {
flog.Error("Failed to search modules: %v", err)
} else {
flog.Info("Found %d modules", len(modules))
for _, module := range modules {
results = append(results, APISearchResult{
ID: int64(module.TypeID),
Name: module.TypeName,
Type: "module",
})
}
}
// results := []APISearchResult{}
flog.Debug("Searching groups")
groups, err := db.SearchGroups(query, 10)
if err != nil {
flog.Error("Failed to search groups: %v", err)
} else {
flog.Info("Found %d groups", len(groups))
for _, group := range groups {
results = append(results, APISearchResult{
ID: int64(group.GroupID),
Name: group.GroupName,
Type: "group",
})
}
}
// flog.Debug("Searching ships")
// ships, err := db.SearchShips(query, 10)
// if err != nil {
// flog.Error("Failed to search ships: %v", err)
// } else {
// flog.Info("Found %d ships", len(ships))
// for _, ship := range ships {
// results = append(results, APISearchResult{
// ID: int64(ship.TypeID),
// Name: ship.TypeName,
// Type: "ship",
// })
// }
// }
flog.Info("Total search results: %d", len(results))
flog.Dump("Results", results)
// flog.Debug("Searching systems")
// systems, err := db.SearchSystems(query, 10)
// if err != nil {
// flog.Error("Failed to search systems: %v", err)
// } else {
// flog.Info("Found %d systems", len(systems))
// for _, system := range systems {
// results = append(results, APISearchResult{
// ID: int64(system.SolarSystemID),
// Name: system.SolarSystemName,
// Type: "system",
// })
// }
// }
w.Header().Set("Content-Type", "application/json")
flog.Trace("Encoding response")
if err := json.NewEncoder(w).Encode(results); err != nil {
flog.Error("Failed to encode response: %v", err)
return
}
flog.Info("Response sent successfully")
}
// flog.Debug("Searching modules")
// modules, err := db.SearchModules(query, 10)
// if err != nil {
// flog.Error("Failed to search modules: %v", err)
// } else {
// flog.Info("Found %d modules", len(modules))
// for _, module := range modules {
// results = append(results, APISearchResult{
// ID: int64(module.TypeID),
// Name: module.TypeName,
// Type: "module",
// })
// }
// }
func handleItemNames(w http.ResponseWriter, r *http.Request) {
flog := logger.Default.WithPrefix("handleItemNames")
flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
// flog.Debug("Searching groups")
// groups, err := db.SearchGroups(query, 10)
// if err != nil {
// flog.Error("Failed to search groups: %v", err)
// } else {
// flog.Info("Found %d groups", len(groups))
// for _, group := range groups {
// results = append(results, APISearchResult{
// ID: int64(group.GroupID),
// Name: group.GroupName,
// Type: "group",
// })
// }
// }
if r.Method != http.MethodGet {
flog.Debug("Method not allowed: %s", r.Method)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// flog.Info("Total search results: %d", len(results))
// flog.Dump("Results", results)
idsParam := r.URL.Query().Get("ids")
flog.Debug("IDs parameter: %q", idsParam)
if idsParam == "" {
flog.Info("Empty IDs parameter, returning empty map")
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(APIItemNames{})
return
}
// w.Header().Set("Content-Type", "application/json")
// flog.Trace("Encoding response")
// if err := json.NewEncoder(w).Encode(results); err != nil {
// flog.Error("Failed to encode response: %v", err)
// return
// }
// flog.Info("Response sent successfully")
// }
flog.Debug("Parsing IDs")
idStrs := strings.Split(idsParam, ",")
flog.Trace("ID strings: %v", idStrs)
ids := make([]int32, 0, len(idStrs))
for _, idStr := range idStrs {
id, err := strconv.ParseInt(idStr, 10, 32)
if err != nil {
flog.Debug("Failed to parse ID %q: %v", idStr, err)
continue
}
ids = append(ids, int32(id))
}
flog.Info("Parsed %d valid IDs from %d strings", len(ids), len(idStrs))
// func handleItemNames(w http.ResponseWriter, r *http.Request) {
// flog := logger.Default.WithPrefix("handleItemNames")
// flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
if len(ids) == 0 {
flog.Info("No valid IDs, returning empty map")
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(APIItemNames{})
return
}
// if r.Method != http.MethodGet {
// flog.Debug("Method not allowed: %s", r.Method)
// http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
// return
// }
db, err := GetDB()
if err != nil {
flog.Error("Failed to get database: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Trace("Database connection obtained")
// idsParam := r.URL.Query().Get("ids")
// flog.Debug("IDs parameter: %q", idsParam)
// if idsParam == "" {
// flog.Info("Empty IDs parameter, returning empty map")
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode(APIItemNames{})
// return
// }
flog.Debug("Getting item types for %d IDs", len(ids))
// Convert []int32 to []int64 for GetItemTypes
itemIDs := make([]int64, len(ids))
for i, id := range ids {
itemIDs[i] = int64(id)
}
// flog.Debug("Parsing IDs")
// idStrs := strings.Split(idsParam, ",")
// flog.Trace("ID strings: %v", idStrs)
// ids := make([]int32, 0, len(idStrs))
// for _, idStr := range idStrs {
// id, err := strconv.ParseInt(idStr, 10, 32)
// if err != nil {
// flog.Debug("Failed to parse ID %q: %v", idStr, err)
// continue
// }
// ids = append(ids, int32(id))
// }
// flog.Info("Parsed %d valid IDs from %d strings", len(ids), len(idStrs))
items, err := db.GetItemTypes(itemIDs)
if err != nil {
flog.Error("Failed to get item types: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
// if len(ids) == 0 {
// flog.Info("No valid IDs, returning empty map")
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode(APIItemNames{})
// return
// }
// Convert to APIItemNames format (map[string]string)
names := make(APIItemNames)
for _, item := range items {
names[strconv.FormatInt(int64(item.TypeID), 10)] = item.TypeName
}
// db, err := GetDB()
// if err != nil {
// flog.Error("Failed to get database: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Trace("Database connection obtained")
flog.Info("Total names: %d", len(names))
flog.Dump("Names", names)
// flog.Debug("Getting item names for %d IDs", len(ids))
// names, err := db.GetItemNames(ids)
// if err != nil {
// flog.Error("Failed to get item names: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Info("Total names: %d", len(names))
// flog.Dump("Names", names)
w.Header().Set("Content-Type", "application/json")
flog.Trace("Encoding response")
if err := json.NewEncoder(w).Encode(names); err != nil {
flog.Error("Failed to encode response: %v", err)
return
}
flog.Info("Response sent successfully")
}
// w.Header().Set("Content-Type", "application/json")
// flog.Trace("Encoding response")
// if err := json.NewEncoder(w).Encode(names); err != nil {
// flog.Error("Failed to encode response: %v", err)
// return
// }
// flog.Info("Response sent successfully")
// }
func handleItemGroup(w http.ResponseWriter, r *http.Request) {
flog := logger.Default.WithPrefix("handleItemGroup")
flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
// func handleItemGroup(w http.ResponseWriter, r *http.Request) {
// flog := logger.Default.WithPrefix("handleItemGroup")
// flog.Trace("Request received: %s %s", r.Method, r.URL.Path)
if r.Method != http.MethodGet {
flog.Debug("Method not allowed: %s", r.Method)
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// if r.Method != http.MethodGet {
// flog.Debug("Method not allowed: %s", r.Method)
// http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
// return
// }
flog.Debug("Parsing item ID from path: %s", r.URL.Path)
path := strings.TrimPrefix(r.URL.Path, "/api/items/")
path = strings.TrimSuffix(path, "/group")
flog.Trace("Trimmed path: %q", path)
parts := strings.Split(path, "/")
flog.Trace("Path parts: %v", parts)
if len(parts) == 0 {
flog.Error("Invalid path: no parts after splitting")
http.Error(w, "Invalid item ID", http.StatusBadRequest)
return
}
itemID, err := strconv.ParseInt(parts[0], 10, 64)
if err != nil {
flog.Error("Failed to parse item ID %q: %v", parts[0], err)
http.Error(w, "Invalid item ID", http.StatusBadRequest)
return
}
flog.Info("Item ID: %d", itemID)
// flog.Debug("Parsing item ID from path: %s", r.URL.Path)
// path := strings.TrimPrefix(r.URL.Path, "/api/items/")
// path = strings.TrimSuffix(path, "/group")
// flog.Trace("Trimmed path: %q", path)
// parts := strings.Split(path, "/")
// flog.Trace("Path parts: %v", parts)
// if len(parts) == 0 {
// flog.Error("Invalid path: no parts after splitting")
// http.Error(w, "Invalid item ID", http.StatusBadRequest)
// return
// }
// itemID, err := strconv.ParseInt(parts[0], 10, 64)
// if err != nil {
// flog.Error("Failed to parse item ID %q: %v", parts[0], err)
// http.Error(w, "Invalid item ID", http.StatusBadRequest)
// return
// }
// flog.Info("Item ID: %d", itemID)
db, err := GetDB()
if err != nil {
flog.Error("Failed to get database: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
flog.Trace("Database connection obtained")
// db, err := GetDB()
// if err != nil {
// flog.Error("Failed to get database: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Trace("Database connection obtained")
flog.Debug("Getting item type for itemID: %d", itemID)
items, err := db.GetItemTypes([]int64{itemID})
if err != nil {
flog.Error("Failed to get item type: %v", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
// flog.Debug("Getting group for itemID: %d", itemID)
// groupID, err := db.GetItemGroup(itemID)
// if err != nil {
// if strings.Contains(err.Error(), "record not found") {
// flog.Info("Item not found: typeID %d", itemID)
// http.Error(w, "Item not found", http.StatusNotFound)
// return
// }
// flog.Error("Failed to get item group: %v", err)
// http.Error(w, "Internal server error", http.StatusInternalServerError)
// return
// }
// flog.Info("Found groupID %d for itemID %d", groupID, itemID)
if len(items) == 0 {
flog.Info("Item not found: typeID %d", itemID)
http.Error(w, "Item not found", http.StatusNotFound)
return
}
// groupInfo := APIGroupInfo{
// GroupID: groupID,
// }
// flog.Dump("Group Info", groupInfo)
item := items[0]
flog.Info("Found groupID %d for itemID %d", item.GroupID, itemID)
// w.Header().Set("Content-Type", "application/json")
// flog.Trace("Encoding response")
// if err := json.NewEncoder(w).Encode(groupInfo); err != nil {
// flog.Error("Failed to encode response: %v", err)
// return
// }
// flog.Info("Response sent successfully")
// }
groupInfo := APIGroupInfo{
GroupID: int64(item.GroupID),
}
flog.Dump("Group Info", groupInfo)
// func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
// return func(w http.ResponseWriter, r *http.Request) {
// w.Header().Set("Access-Control-Allow-Origin", "*")
// w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
// w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Content-Type", "application/json")
flog.Trace("Encoding response")
if err := json.NewEncoder(w).Encode(groupInfo); err != nil {
flog.Error("Failed to encode response: %v", err)
return
}
flog.Info("Response sent successfully")
}
// if r.Method == http.MethodOptions {
// w.WriteHeader(http.StatusNoContent)
// return
// }
func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
// next(w, r)
// }
// }
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
next(w, r)
}
}
// func handleImage(w http.ResponseWriter, r *http.Request) {
// flog := logger.Default.WithPrefix("handleImage")
@@ -698,29 +699,30 @@ const cacheTTL = 24 * time.Hour
// json.NewEncoder(w).Encode(APIImageBatchResponse{Images: results})
// }
// func StartAPIServer(port string) {
// flog := logger.Default.WithPrefix("StartAPIServer")
// flog.Info("Initializing API server")
func StartAPIServer(port string) {
flog := logger.Default.WithPrefix("StartAPIServer")
flog.Info("Initializing API server")
// mux := http.NewServeMux()
// mux.HandleFunc("/api/statistics", corsMiddleware(handleStatistics))
// mux.HandleFunc("/api/search", corsMiddleware(handleSearch))
// mux.HandleFunc("/api/items/names", corsMiddleware(handleItemNames))
// mux.HandleFunc("/api/items/", corsMiddleware(handleItemGroup))
// mux.HandleFunc("/api/images/batch", corsMiddleware(handleImageBatch))
// mux.HandleFunc("/api/images/", corsMiddleware(handleImage))
mux := http.NewServeMux()
mux.HandleFunc("/api/statistics", corsMiddleware(handleStatistics))
mux.HandleFunc("/api/search", corsMiddleware(handleSearch))
mux.HandleFunc("/api/items/", corsMiddleware(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/group") {
handleItemGroup(w, r)
} else {
handleItemNames(w, r)
}
}))
// flog.Debug("Registered routes:")
// flog.Debug(" POST /api/statistics")
// flog.Debug(" GET /api/search")
// flog.Debug(" GET /api/items/names")
// flog.Debug(" GET /api/items/{id}/group")
// flog.Debug(" POST /api/images/batch")
// flog.Debug(" GET /api/images/{typeId}/{size}")
flog.Debug("Registered routes:")
flog.Debug(" POST /api/statistics")
flog.Debug(" GET /api/search")
flog.Debug(" GET /api/items/ (names)")
flog.Debug(" GET /api/items/{id}/group")
// flog.Info("Starting API server on port %s", port)
// flog.Trace("Listening on :%s", port)
// if err := http.ListenAndServe(":"+port, mux); err != nil {
// flog.Error("Failed to start API server: %v", err)
// }
// }
flog.Info("Starting API server on port %s", port)
flog.Trace("Listening on :%s", port)
if err := http.ListenAndServe(":"+port, mux); err != nil {
flog.Error("Failed to start API server: %v", err)
}
}

View File

@@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"strings"
"git.site.quack-lab.dev/dave/cyutils"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
@@ -200,7 +201,15 @@ func (db *DBWrapper) checkExistingKillmails(ctx context.Context, killmailIDs []i
return make(map[int64]struct{}), nil
}
rows, err := db.ch.Query(ctx, "SELECT killmail_id FROM killmails WHERE killmail_id IN ?", killmailIDs)
placeholders := make([]string, len(killmailIDs))
args := make([]interface{}, len(killmailIDs))
for j, id := range killmailIDs {
placeholders[j] = "?"
args[j] = id
}
query := fmt.Sprintf("SELECT killmail_id FROM killmails WHERE killmail_id IN (%s)", strings.Join(placeholders, ","))
rows, err := db.ch.Query(ctx, query, args...)
if err != nil {
return nil, err
}
@@ -213,5 +222,6 @@ func (db *DBWrapper) checkExistingKillmails(ctx context.Context, killmailIDs []i
existing[id] = struct{}{}
}
}
return existing, nil
}

233
db.go
View File

@@ -17,15 +17,6 @@ import (
"gorm.io/gorm/schema"
)
// Add methods to QueryParams for template use
func (qp QueryParams) HasModules() bool {
return len(qp.Modules) > 0
}
type ModuleStatsData struct {
KillmailIDs []int64
}
type DB interface {
Init() error
Get() *gorm.DB
@@ -191,15 +182,49 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
modules := deduplicateInt64(params.Modules)
flog.Debug("Deduplicated modules: %d -> %d", len(params.Modules), len(modules))
// Create a copy of params with deduplicated modules for template use
templateParams := params
templateParams.Modules = modules
// Build the base query - start with all killmails
baseQuery := `
SELECT
fk.killmail_id,
fk.solar_system_id,
fk.victim_ship_type_id
FROM killmails fk`
// Generate query using template
query, args, err := ExecuteTemplate(MainQueryTmpl, templateParams)
if err != nil {
flog.Error("Failed to execute main query template: %v", err)
return nil, err
args := []interface{}{}
whereClauses := []string{}
// Apply filters
if params.Ship > 0 {
whereClauses = append(whereClauses, "fk.victim_ship_type_id = ?")
args = append(args, params.Ship)
}
if len(params.Systems) > 0 {
placeholders := make([]string, len(params.Systems))
for i := range params.Systems {
placeholders[i] = "?"
args = append(args, params.Systems[i])
}
whereClauses = append(whereClauses, "fk.solar_system_id IN ("+strings.Join(placeholders, ",")+")")
}
// For module filters, we need to join with modules
var moduleJoin string
if len(modules) > 0 {
placeholders := make([]string, len(modules))
for i := range modules {
placeholders[i] = "?"
args = append(args, modules[i])
}
moduleJoin = `
INNER JOIN modules fm ON fk.killmail_id = fm.killmail_id`
whereClauses = append(whereClauses, "fm.item_type_id IN ("+strings.Join(placeholders, ",")+")")
}
// Build final query
query := baseQuery + moduleJoin
if len(whereClauses) > 0 {
query += " WHERE " + strings.Join(whereClauses, " AND ")
}
flog.Debug("Executing query: %s", query)
@@ -244,14 +269,14 @@ func (db *DBWrapper) QueryFits(params QueryParams) (*FitStatistics, error) {
}
// Calculate ship breakdown from database
shipBreakdown, err := db.calculateShipBreakdown(templateParams, totalKillmails, flog)
shipBreakdown, err := db.calculateShipBreakdown(query, args, totalKillmails, flog)
if err != nil {
flog.Error("Failed to calculate ship breakdown: %v", err)
return nil, err
}
// Calculate system breakdown from database
systemBreakdown, err := db.calculateSystemBreakdown(templateParams, totalKillmails, flog)
systemBreakdown, err := db.calculateSystemBreakdown(query, args, totalKillmails, flog)
if err != nil {
flog.Error("Failed to calculate system breakdown: %v", err)
return nil, err
@@ -362,6 +387,150 @@ func deduplicateInt64(slice []int64) []int64 {
return result
}
func (db *DBWrapper) calculateStats(params QueryParams, shipTypeIDs []int64, killmailIDs []int64, stats *FitStatistics, total int64, flog *logger.Logger) error {
// if total == 0 {
// return nil
// }
// ctx := context.Background()
// var query string
// var args []interface{}
// if len(killmailIDs) > 0 && len(killmailIDs) < 100000 {
// placeholders := make([]string, len(killmailIDs))
// for i := range killmailIDs {
// placeholders[i] = "?"
// args = append(args, killmailIDs[i])
// }
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM modules WHERE killmail_id IN (" + strings.Join(placeholders, ",") + ")"
// } else {
// var shipPlaceholders []string
// if len(shipTypeIDs) > 0 {
// shipPlaceholders = make([]string, len(shipTypeIDs))
// for i, shipID := range shipTypeIDs {
// shipPlaceholders[i] = "?"
// args = append(args, shipID)
// }
// } else if params.Ship != 0 {
// shipPlaceholders = []string{"?"}
// args = []interface{}{params.Ship}
// }
// if len(shipPlaceholders) > 0 {
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM modules WHERE victim_ship_type_id IN (" + strings.Join(shipPlaceholders, ",") + ")"
// } else {
// query = "SELECT item_type_id, flag, count(DISTINCT killmail_id) as count FROM modules"
// }
// if len(params.Systems) > 0 {
// sysPlaceholders := make([]string, len(params.Systems))
// for i := range params.Systems {
// sysPlaceholders[i] = "?"
// args = append(args, params.Systems[i])
// }
// if len(shipPlaceholders) > 0 {
// query += " AND solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
// } else {
// query += " WHERE solar_system_id IN (" + strings.Join(sysPlaceholders, ",") + ")"
// }
// }
// }
// query += " GROUP BY item_type_id, flag"
// rows, err := db.ch.Query(ctx, query, args...)
// if err != nil {
// flog.Error("Failed to query module stats: %v", err)
// return err
// }
// defer rows.Close()
// // Map to aggregate counts per item_type_id (not per flag)
// itemCounts := make(map[int64]uint64)
// itemFlags := make(map[int64]int64)
// for rows.Next() {
// var itemTypeID, flag int64
// var count uint64
// if err := rows.Scan(&itemTypeID, &flag, &count); err != nil {
// flog.Error("Failed to scan module stat: %v", err)
// return err
// }
// // Only count fitted modules - ignore cargo (flag 5) and other non-module flags
// if flag < 11 || (flag > 34 && flag != 87 && (flag < 92 || flag > 99)) {
// continue
// }
// // Aggregate: if we've seen this item_type_id before, use the max count (should be same, but just in case)
// if existing, exists := itemCounts[itemTypeID]; !exists || count > existing {
// itemCounts[itemTypeID] = count
// itemFlags[itemTypeID] = flag
// }
// }
// // For filtered modules, they should be in 100% of fits - ADD THEM FIRST
// filteredModules := make(map[int64]bool)
// moduleSlots := make(map[int64]string)
// if len(params.Modules) > 0 {
// slots, err := db.getModuleSlots(params.Modules)
// if err == nil {
// moduleSlots = slots
// }
// for _, moduleID := range params.Modules {
// filteredModules[moduleID] = true
// // Add filtered modules immediately with 100%
// Stats := Stats{
// Count: total,
// Percentage: 100.0,
// }
// slot, _ := moduleSlots[moduleID]
// switch slot {
// case "low":
// stats.LowSlotModules[int32(moduleID)] = Stats
// case "mid":
// stats.MidSlotModules[int32(moduleID)] = Stats
// case "high":
// stats.HighSlotModules[int32(moduleID)] = Stats
// case "rig":
// stats.Rigs[int32(moduleID)] = Stats
// case "drone":
// stats.Drones[int32(moduleID)] = Stats
// default:
// stats.HighSlotModules[int32(moduleID)] = Stats
// }
// }
// }
// // Add all modules from query results (filtered ones already added with 100%)
// for itemTypeID, count := range itemCounts {
// if filteredModules[itemTypeID] {
// continue
// }
// percentage := float64(count) / float64(total) * 100.0
// Stats := Stats{
// Count: int64(count),
// Percentage: percentage,
// }
// flag := itemFlags[itemTypeID]
// switch {
// case flag >= 11 && flag <= 18:
// stats.LowSlotModules[int32(itemTypeID)] = Stats
// case flag >= 19 && flag <= 26:
// stats.MidSlotModules[int32(itemTypeID)] = Stats
// case flag >= 27 && flag <= 34:
// stats.HighSlotModules[int32(itemTypeID)] = Stats
// case flag >= 92 && flag <= 99:
// stats.Rigs[int32(itemTypeID)] = Stats
// case flag == 87:
// stats.Drones[int32(itemTypeID)] = Stats
// }
// }
// return nil
return nil
}
func (db *DBWrapper) CacheSet(key string, data []byte) error {
flog := logger.Default.WithPrefix("CacheSet").WithPrefix(key)
flog.Dump("data", data)
@@ -453,14 +622,15 @@ func limitKillmails(killmailIDs []int64, limit int) []int64 {
return killmailIDs[:limit]
}
func (db *DBWrapper) calculateShipBreakdown(templateData QueryParams, totalKillmails int64, flog *cylogger.Logger) (map[int64]int64, error) {
func (db *DBWrapper) calculateShipBreakdown(baseQuery string, args []interface{}, totalKillmails int64, flog *cylogger.Logger) (map[int64]int64, error) {
ctx := context.Background()
// Generate query using template
query, args, err := ExecuteTemplate(ShipBreakdownTmpl, templateData)
if err != nil {
return nil, fmt.Errorf("failed to execute ship breakdown template: %w", err)
}
// Modify the base query to group by ship type
query := strings.Replace(baseQuery, "SELECT\n\t\t\tfk.killmail_id,\n\t\t\tfk.solar_system_id,\n\t\t\tfk.victim_ship_type_id\n\t\tFROM killmails fk",
"SELECT\n\t\t\tfk.victim_ship_type_id,\n\t\t\tCOUNT(*) as count\n\t\tFROM killmails fk", 1)
// Add GROUP BY
query += " GROUP BY fk.victim_ship_type_id ORDER BY count DESC"
flog.Debug("Ship breakdown query: %s", query)
rows, err := db.ch.Query(ctx, query, args...)
@@ -482,14 +652,15 @@ func (db *DBWrapper) calculateShipBreakdown(templateData QueryParams, totalKillm
return shipBreakdown, nil
}
func (db *DBWrapper) calculateSystemBreakdown(templateData QueryParams, totalKillmails int64, flog *cylogger.Logger) (map[int64]int64, error) {
func (db *DBWrapper) calculateSystemBreakdown(baseQuery string, args []interface{}, totalKillmails int64, flog *cylogger.Logger) (map[int64]int64, error) {
ctx := context.Background()
// Generate query using template
query, args, err := ExecuteTemplate(SystemBreakdownTmpl, templateData)
if err != nil {
return nil, fmt.Errorf("failed to execute system breakdown template: %w", err)
}
// Modify the base query to group by solar system
query := strings.Replace(baseQuery, "SELECT\n\t\t\tfk.killmail_id,\n\t\t\tfk.solar_system_id,\n\t\t\tfk.victim_ship_type_id\n\t\tFROM killmails fk",
"SELECT\n\t\t\tfk.solar_system_id,\n\t\t\tCOUNT(*) as count\n\t\tFROM killmails fk", 1)
// Add GROUP BY
query += " GROUP BY fk.solar_system_id ORDER BY count DESC"
flog.Debug("System breakdown query: %s", query)
rows, err := db.ch.Query(ctx, query, args...)

View File

@@ -13,7 +13,7 @@ import (
func main() {
ingest := flag.Bool("ingest", false, "ingest killmails from data directory")
server := flag.Bool("server", false, "start API server")
// port := flag.String("port", "3000", "API server port")
port := flag.String("port", "3000", "API server port")
flag.Parse()
logger.InitFlag()
logger.Default = logger.Default.ToFile("zkill.log")
@@ -31,13 +31,13 @@ func main() {
}
if *server {
// StartAPIServer(*port)
StartAPIServer(*port)
return
}
logger.Info("Querying fits")
params := QueryParams{
Ship: 32872,
Ship: 32872,
}
stats, err := db.QueryFits(params)
if err != nil {

View File

@@ -123,6 +123,10 @@ type QueryParams struct {
KillmailLimit int
}
type ModuleStatsData struct {
KillmailIDs []int64
}
// CacheEntry stores both statistics (JSON) and images (blobs) in unified cache
// For 404s, we store a special marker: []byte{0xFF, 0xFE, 0xFD} (NOT_FOUND_MARKER)
type CacheEntry struct {