Merge branch 'master' into singularity
This commit is contained in:
6
.gitignore
vendored
6
.gitignore
vendored
@@ -11,8 +11,12 @@
|
||||
|
||||
#Patch files
|
||||
*.patch
|
||||
|
||||
#Personal
|
||||
/saveddata
|
||||
saveddata/
|
||||
|
||||
#PyCharm
|
||||
.idea/
|
||||
|
||||
#Pyfa file
|
||||
pyfaFits.html
|
||||
|
||||
@@ -23,7 +23,7 @@ from eos.types import MetaData
|
||||
from eos.db import gamedata_meta
|
||||
|
||||
metadata_table = Table("metadata", gamedata_meta,
|
||||
Column("fieldName", String, primary_key=True),
|
||||
Column("fieldValue", String))
|
||||
Column("field_name", String, primary_key=True),
|
||||
Column("field_value", String))
|
||||
|
||||
mapper(MetaData, metadata_table)
|
||||
|
||||
@@ -28,7 +28,7 @@ import traceback
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
class Effect(EqBase):
|
||||
'''
|
||||
@@ -325,9 +325,7 @@ class Item(EqBase):
|
||||
return False
|
||||
|
||||
class MetaData(EqBase):
|
||||
def __init__(self, name, val=None):
|
||||
self.fieldName = name
|
||||
self.fieldValue = val
|
||||
pass
|
||||
|
||||
class EffectInfo(EqBase):
|
||||
pass
|
||||
|
||||
@@ -33,7 +33,7 @@ import time
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
class Fit(object):
|
||||
"""Represents a fitting, with modules, ship, implants, etc."""
|
||||
|
||||
@@ -1,497 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#===============================================================================
|
||||
# Copyright (C) 2010-2011 Anton Vorobyov
|
||||
#
|
||||
# This file is part of eos.
|
||||
#
|
||||
# eos is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# eos is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with eos. If not, see <http://www.gnu.org/licenses/>.
|
||||
#===============================================================================
|
||||
|
||||
|
||||
'''
|
||||
This script is used to compare two different database versions.
|
||||
It shows removed/changed/new items with list of changed effects,
|
||||
changed attributes and effects which were renamed
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
parser = argparse.ArgumentParser(description="Compare two databases generated from eve dump to find eos-related differences")
|
||||
parser.add_argument("-o", "--old", type=str, required=True, help="path to old cache data dump")
|
||||
parser.add_argument("-n", "--new", type=str, required=True, help="path to new cache data dump")
|
||||
parser.add_argument("-g", "--nogroups", action="store_false", default=True, dest="groups", help="don't show changed groups")
|
||||
parser.add_argument("-e", "--noeffects", action="store_false", default=True, dest="effects", help="don't show list of changed effects")
|
||||
parser.add_argument("-a", "--noattributes", action="store_false", default=True, dest="attributes", help="don't show list of changed attributes")
|
||||
parser.add_argument("-r", "--norenames", action="store_false", default=True, dest="renames", help="don't show list of renamed data")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Open both databases and get their cursors
|
||||
old_db = sqlite3.connect(os.path.expanduser(args.old))
|
||||
old_cursor = old_db.cursor()
|
||||
new_db = sqlite3.connect(os.path.expanduser(args.new))
|
||||
new_cursor = new_db.cursor()
|
||||
|
||||
# Force some of the items to make them published
|
||||
FORCEPUB_TYPES = ("Ibis", "Impairor", "Velator", "Reaper")
|
||||
OVERRIDES_TYPEPUB = 'UPDATE invtypes SET published = 1 WHERE typeName = ?'
|
||||
for typename in FORCEPUB_TYPES:
|
||||
old_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
new_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
|
||||
# Initialization of few things used by both changed/renamed effects list
|
||||
effectspath = os.path.join("..", "..", "effects")
|
||||
implemented = set()
|
||||
|
||||
for filename in os.listdir(effectspath):
|
||||
basename, extension = filename.rsplit('.', 1)
|
||||
# Ignore non-py files and exclude implementation-specific 'effect'
|
||||
if extension == "py" and basename not in ("__init__",):
|
||||
implemented.add(basename)
|
||||
|
||||
# Effects' names are used w/o any special symbols by eos
|
||||
stripspec = "[^A-Za-z0-9]"
|
||||
|
||||
# Method to get data if effect is implemented in eos or not
|
||||
def geteffst(effectname):
|
||||
eosname = re.sub(stripspec, "", effectname).lower()
|
||||
if eosname in implemented:
|
||||
impstate = True
|
||||
else:
|
||||
impstate = False
|
||||
return impstate
|
||||
|
||||
def findrenames(ren_dict, query, strip=False):
|
||||
|
||||
old_namedata = {}
|
||||
new_namedata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_namedata), (new_cursor, new_namedata)):
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
id = row[0]
|
||||
name = row[1]
|
||||
if strip is True:
|
||||
name = re.sub(stripspec, "", name)
|
||||
dictionary[id] = name
|
||||
|
||||
for id in set(old_namedata.keys()).intersection(new_namedata.keys()):
|
||||
oldname = old_namedata[id]
|
||||
newname = new_namedata[id]
|
||||
if oldname != newname:
|
||||
ren_dict[id] = (oldname, newname)
|
||||
return
|
||||
|
||||
def printrenames(ren_dict, title, implementedtag=False):
|
||||
if len(ren_dict) > 0:
|
||||
print('\nRenamed ' + title + ':')
|
||||
for id in sorted(ren_dict):
|
||||
couple = ren_dict[id]
|
||||
if implementedtag:
|
||||
print("\n[{0}] \"{1}\"\n[{2}] \"{3}\"".format(geteffst(couple[0]), couple[0], geteffst(couple[1]), couple[1]))
|
||||
else:
|
||||
print("\n\"{0}\"\n\"{1}\"".format(couple[0], couple[1]))
|
||||
|
||||
groupcats = {}
|
||||
def getgroupcat(grp):
|
||||
"""Get group category from the new db"""
|
||||
if grp in groupcats:
|
||||
cat = groupcats[grp]
|
||||
else:
|
||||
query = 'SELECT categoryID FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
cat = 0
|
||||
for row in new_cursor:
|
||||
cat = row[0]
|
||||
groupcats[grp] = cat
|
||||
return cat
|
||||
|
||||
itemnames = {}
|
||||
def getitemname(item):
|
||||
"""Get item name from the new db"""
|
||||
if item in itemnames:
|
||||
name = itemnames[item]
|
||||
else:
|
||||
query = 'SELECT typeName FROM invtypes WHERE typeID = ?'
|
||||
new_cursor.execute(query, (item,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (item,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
itemnames[item] = name
|
||||
return name
|
||||
|
||||
groupnames = {}
|
||||
def getgroupname(grp):
|
||||
"""Get group name from the new db"""
|
||||
if grp in groupnames:
|
||||
name = groupnames[grp]
|
||||
else:
|
||||
query = 'SELECT groupName FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (grp,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
groupnames[grp] = name
|
||||
return name
|
||||
|
||||
effectnames = {}
|
||||
def geteffectname(effect):
|
||||
"""Get effect name from the new db"""
|
||||
if effect in effectnames:
|
||||
name = effectnames[effect]
|
||||
else:
|
||||
query = 'SELECT effectName FROM dgmeffects WHERE effectID = ?'
|
||||
new_cursor.execute(query, (effect,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (effect,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
effectnames[effect] = name
|
||||
return name
|
||||
|
||||
attrnames = {}
|
||||
def getattrname(attr):
|
||||
"""Get attribute name from the new db"""
|
||||
if attr in attrnames:
|
||||
name = attrnames[attr]
|
||||
else:
|
||||
query = 'SELECT attributeName FROM dgmattribs WHERE attributeID = ?'
|
||||
new_cursor.execute(query, (attr,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (attr,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
attrnames[attr] = name
|
||||
return name
|
||||
|
||||
# State table
|
||||
S = {"unchanged": 0,
|
||||
"removed": 1,
|
||||
"changed": 2,
|
||||
"added": 3 }
|
||||
|
||||
if args.effects or args.attributes or args.groups:
|
||||
# Format:
|
||||
# Key: item id
|
||||
# Value: [groupID, set(effects), {attribute id : value}]
|
||||
old_itmdata = {}
|
||||
new_itmdata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_itmdata), (new_cursor, new_itmdata)):
|
||||
# Compose list of items we're interested in, filtered by category
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryName IN ("Ship", "Module", "Charge", "Skill", "Drone", "Implant", "Subsystem")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
# Initialize container for the data for each item with empty stuff besides groupID
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
# Add items filtered by group
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
|
||||
if args.effects:
|
||||
# Pull all eff
|
||||
query = 'SELECT it.typeID, de.effectID FROM invtypes AS it INNER JOIN dgmtypeeffects AS dte ON dte.typeID = it.typeID INNER JOIN dgmeffects AS de ON de.effectID = dte.effectID WHERE it.published = 1'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
effectID = row[1]
|
||||
# Process only items we need
|
||||
if itemid in dictionary:
|
||||
# Add effect to the set
|
||||
effectSet = dictionary[itemid][1]
|
||||
effectSet.add(effectID)
|
||||
|
||||
if args.attributes:
|
||||
# Add base attributes to our data
|
||||
query = 'SELECT it.typeID, it.mass, it.capacity, it.volume FROM invtypes AS it'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrdict = dictionary[itemid][2]
|
||||
# Add base attributes: mass (4), capacity (38) and volume (161)
|
||||
attrdict[4] = row[1]
|
||||
attrdict[38] = row[2]
|
||||
attrdict[161] = row[3]
|
||||
|
||||
# Add attribute data for other attributes
|
||||
query = 'SELECT dta.typeID, dta.attributeID, dta.value FROM dgmtypeattribs AS dta'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrid = row[1]
|
||||
attrval = row[2]
|
||||
attrdict = dictionary[itemid][2]
|
||||
if attrid in attrdict:
|
||||
print("Warning: base attribute is described in non-base attribute table")
|
||||
else:
|
||||
attrdict[attrid] = attrval
|
||||
|
||||
# Get set of IDs from both dictionaries
|
||||
items_old = set(old_itmdata.keys())
|
||||
items_new = set(new_itmdata.keys())
|
||||
|
||||
# Format:
|
||||
# Key: item state
|
||||
# Value: {item id: ((group state, old group, new group), {effect state: set(effects)}, {attribute state: {attributeID: (old value, new value)}})}
|
||||
global_itmdata = {}
|
||||
|
||||
# Initialize it
|
||||
for state in S:
|
||||
global_itmdata[S[state]] = {}
|
||||
|
||||
|
||||
# Fill all the data for removed items
|
||||
for item in items_old.difference(items_new):
|
||||
# Set item state to removed
|
||||
state = S["removed"]
|
||||
# Set only old group for item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], oldgroup, None)
|
||||
# Set old set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if args.effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects)
|
||||
# Set old set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if args.attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
for attr in oldattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = (oldattrs[attr], "NULL")
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
|
||||
# Now, for added items
|
||||
for item in items_new.difference(items_old):
|
||||
# Set item state to added
|
||||
state = S["added"]
|
||||
# Set only new group for item
|
||||
newgroup = new_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], None, newgroup)
|
||||
# Set new set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if args.effects:
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(neweffects)
|
||||
# Set new set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if args.attributes:
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in newattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = ("NULL", newattrs[attr])
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# Now, check all the items which exist in both databases
|
||||
for item in items_old.intersection(items_new):
|
||||
# Set group data for an item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
newgroup = new_itmdata[item][0]
|
||||
# If we're not asked to compare groups, mark them as unchanged anyway
|
||||
groupdata = (S["changed"] if oldgroup != newgroup and args.groups else S["unchanged"], oldgroup, newgroup)
|
||||
# Fill effects data into appropriate groups
|
||||
effectsdata = {}
|
||||
for state in S:
|
||||
# We do not have changed effects whatsoever
|
||||
if state != "changed":
|
||||
effectsdata[S[state]] = set()
|
||||
if args.effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects.intersection(neweffects))
|
||||
effectsdata[S["removed"]].update(oldeffects.difference(neweffects))
|
||||
effectsdata[S["added"]].update(neweffects.difference(oldeffects))
|
||||
# Go through all attributes, filling global data dictionary
|
||||
attrdata = {}
|
||||
for state in S:
|
||||
attrdata[S[state]] = {}
|
||||
if args.attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in set(oldattrs.keys()).union(newattrs.keys()):
|
||||
# NULL will mean there's no such attribute in db
|
||||
oldattr = oldattrs.get(attr, "NULL")
|
||||
newattr = newattrs.get(attr, "NULL")
|
||||
attrstate = S["unchanged"]
|
||||
if oldattr == "NULL" and newattr != "NULL":
|
||||
attrstate = S["added"]
|
||||
elif oldattr != "NULL" and newattr == "NULL":
|
||||
attrstate = S["removed"]
|
||||
elif oldattr != newattr:
|
||||
attrstate = S["changed"]
|
||||
attrdata[attrstate][attr] = (oldattr, newattr)
|
||||
# Consider item as unchanged by default and set it to change when we see any changes in sub-items
|
||||
state = S["unchanged"]
|
||||
if state == S["unchanged"] and groupdata[0] != S["unchanged"]:
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(effectsdata[S["removed"]]) > 0 or len(effectsdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(attrdata[S["removed"]]) > 0 or len(attrdata[S["changed"]]) > 0 or len(attrdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# As eos uses names as unique IDs in lot of places, we have to keep track of name changes
|
||||
if args.renames:
|
||||
ren_effects = {}
|
||||
query = 'SELECT effectID, effectName FROM dgmeffects'
|
||||
findrenames(ren_effects, query, strip = True)
|
||||
|
||||
ren_attributes = {}
|
||||
query = 'SELECT attributeID, attributeName FROM dgmattribs'
|
||||
findrenames(ren_attributes, query)
|
||||
|
||||
ren_categories = {}
|
||||
query = 'SELECT categoryID, categoryName FROM invcategories'
|
||||
findrenames(ren_categories, query)
|
||||
|
||||
ren_groups = {}
|
||||
query = 'SELECT groupID, groupName FROM invgroups'
|
||||
findrenames(ren_groups, query)
|
||||
|
||||
ren_marketgroups = {}
|
||||
query = 'SELECT marketGroupID, marketGroupName FROM invmarketgroups'
|
||||
findrenames(ren_marketgroups, query)
|
||||
|
||||
ren_items = {}
|
||||
query = 'SELECT typeID, typeName FROM invtypes'
|
||||
findrenames(ren_items, query)
|
||||
|
||||
# Get db metadata
|
||||
old_meta = {}
|
||||
new_meta = {}
|
||||
query = 'SELECT fieldName, fieldValue FROM metadata'
|
||||
old_cursor.execute(query)
|
||||
for row in old_cursor:
|
||||
old_meta[row[0]] = row[1]
|
||||
new_cursor.execute(query)
|
||||
for row in new_cursor:
|
||||
new_meta[row[0]] = row[1]
|
||||
|
||||
# Print jobs
|
||||
print("Comparing databases:\n{0}-{1}\n{2}-{3}\n".format(old_meta.get("version"), old_meta.get("release"),
|
||||
new_meta.get("version"), new_meta.get("release")))
|
||||
if args.effects or args.attributes or args.groups:
|
||||
# Print legend only when there're any interesting changes
|
||||
if len(global_itmdata[S["removed"]]) > 0 or len(global_itmdata[S["changed"]]) > 0 or len(global_itmdata[S["added"]]) > 0:
|
||||
genleg = "[+] - new item\n[-] - removed item\n[*] - changed item\n"
|
||||
grpleg = "(x => y) - group changes\n" if args.groups else ""
|
||||
attreffleg = " [+] - effect or attribute has been added to item\n [-] - effect or attribute has been removed from item\n" if args.attributes or args.effects else ""
|
||||
effleg = " [y] - effect is implemented\n [n] - effect is not implemented\n" if args.effects else ""
|
||||
print("{0}{1}{2}{3}\nItems:".format(genleg, grpleg, attreffleg, effleg))
|
||||
|
||||
# Make sure our states are sorted
|
||||
stateorder = sorted(global_itmdata)
|
||||
|
||||
TG = {S["unchanged"]: "+", S["changed"]: "*",
|
||||
S["removed"]: "-",
|
||||
S["added"]: "+"}
|
||||
|
||||
# Cycle through states
|
||||
for itmstate in stateorder:
|
||||
# Skip unchanged items
|
||||
if itmstate == S["unchanged"]:
|
||||
continue
|
||||
items = global_itmdata[itmstate]
|
||||
# Sort by name first
|
||||
itemorder = sorted(items, key=lambda item: getitemname(item))
|
||||
# Then by group id
|
||||
itemorder = sorted(itemorder, key=lambda item: items[item][0][2] or items[item][0][1])
|
||||
# Then by category id
|
||||
itemorder = sorted(itemorder, key=lambda item: getgroupcat(items[item][0][2] or items[item][0][1]))
|
||||
|
||||
for item in itemorder:
|
||||
groupdata = items[item][0]
|
||||
groupstr = " ({0} => {1})".format(getgroupname(groupdata[1]), getgroupname(groupdata[2])) if groupdata[0] == S["changed"] else ""
|
||||
print("\n[{0}] {1}{2}".format(TG[itmstate], getitemname(item), groupstr))
|
||||
|
||||
effdata = items[item][1]
|
||||
for effstate in stateorder:
|
||||
# Skip unchanged effect sets, but always include them for added or removed ships
|
||||
# Also, always skip empty data
|
||||
if (effstate == S["unchanged"] and itmstate not in (S["removed"], S["added"])) or effstate not in effdata:
|
||||
continue
|
||||
effects = effdata[effstate]
|
||||
efforder = sorted(effects, key=lambda eff: geteffectname(eff))
|
||||
for eff in efforder:
|
||||
# Take tag from item if item was added or removed
|
||||
tag = TG[effstate] if itmstate not in (S["removed"], S["added"]) else TG[itmstate]
|
||||
print(" [{0}|{1}] {2}".format(tag, "y" if geteffst(geteffectname(eff)) else "n", geteffectname(eff)))
|
||||
|
||||
attrdata = items[item][2]
|
||||
for attrstate in stateorder:
|
||||
# Skip unchanged and empty attribute sets, also skip attributes display for added and removed items
|
||||
if (attrstate == S["unchanged"] and itmstate != S["added"]) or itmstate in (S["removed"], ) or attrstate not in attrdata:
|
||||
continue
|
||||
attrs = attrdata[attrstate]
|
||||
attrorder = sorted(attrs, key=lambda attr: getattrname(attr))
|
||||
for attr in attrorder:
|
||||
valline = ""
|
||||
if attrs[attr][0] == "NULL" or itmstate == S["added"]:
|
||||
valline = "{0}".format(attrs[attr][1] or 0)
|
||||
elif attrs[attr][1] == "NULL":
|
||||
valline = "{0}".format(attrs[attr][0] or 0)
|
||||
else:
|
||||
valline = "{0} => {1}".format(attrs[attr][0] or 0, attrs[attr][1] or 0)
|
||||
print(" [{0}] {1}: {2}".format(TG[attrstate], getattrname(attr), valline))
|
||||
|
||||
if args.renames:
|
||||
title = 'effects'
|
||||
printrenames(ren_effects, title, implementedtag=True)
|
||||
|
||||
title = 'attributes'
|
||||
printrenames(ren_attributes, title)
|
||||
|
||||
title = 'categories'
|
||||
printrenames(ren_categories, title)
|
||||
|
||||
title = 'groups'
|
||||
printrenames(ren_groups, title)
|
||||
|
||||
title = 'market groups'
|
||||
printrenames(ren_marketgroups, title)
|
||||
|
||||
title = 'items'
|
||||
printrenames(ren_items, title)
|
||||
@@ -1,127 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from optparse import OptionParser
|
||||
import os.path
|
||||
import shutil
|
||||
import tempfile
|
||||
import sys
|
||||
import tarfile
|
||||
import datetime
|
||||
import random
|
||||
import string
|
||||
|
||||
class FileStub():
|
||||
def write(self, *args):
|
||||
pass
|
||||
|
||||
def flush(self, *args):
|
||||
pass
|
||||
|
||||
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
if __name__ == "__main__":
|
||||
oldstd = sys.stdout
|
||||
parser = OptionParser()
|
||||
parser.add_option("-s", "--skeleton", dest="skeleton", help="Location of skeleton directory")
|
||||
parser.add_option("-b", "--base", dest="base", help="location of the base directory")
|
||||
parser.add_option("-d", "--destination", dest="destination", help="where to copy our archive")
|
||||
parser.add_option("-t", "--static", dest="static", help="directory containing static files")
|
||||
parser.add_option("-q", "--quiet", dest="silent", action="store_true")
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if options.skeleton is None or options.base is None or options.destination is None:
|
||||
print "Need --skeleton argument as well as --base and --destination argument"
|
||||
parser.print_help()
|
||||
sys.exit()
|
||||
|
||||
if options.silent:
|
||||
sys.stdout = FileStub()
|
||||
|
||||
randomId = id_generator()
|
||||
infoDict = {}
|
||||
skeleton = os.path.expanduser(options.skeleton)
|
||||
info = execfile(os.path.join(skeleton, "info.py"), infoDict)
|
||||
now = datetime.datetime.now()
|
||||
now = "%04d%02d%02d" % (now.year, now.month, now.day)
|
||||
dirName = "nighty-build-%s-%s" % (now, randomId)
|
||||
dst = os.path.join(os.getcwd(), dirName)
|
||||
tmpFile = os.path.join(os.getcwd(), "nighty-build-%s-%s-%s.tar.bz2" % (now, infoDict["os"], randomId))
|
||||
config = os.path.join(skeleton, "config.py")
|
||||
destination = os.path.expanduser(options.destination)
|
||||
|
||||
i = 0
|
||||
gitData = (".git", ".gitignore", ".gitmodules")
|
||||
def loginfo(path, names):
|
||||
global i
|
||||
i += 1
|
||||
if i % 10 == 0:
|
||||
sys.stdout.write(".")
|
||||
sys.stdout.flush()
|
||||
return gitData
|
||||
|
||||
try:
|
||||
print "copying skeleton to ", dst
|
||||
i = 0
|
||||
shutil.copytree(skeleton, dst, ignore=loginfo)
|
||||
print ""
|
||||
|
||||
base = os.path.join(dst, infoDict["base"])
|
||||
print "copying base to ", base
|
||||
|
||||
i = 0
|
||||
for stuff in os.listdir(os.path.expanduser(options.base)):
|
||||
currSource = os.path.join(os.path.expanduser(options.base), stuff)
|
||||
currDest = os.path.join(base, stuff)
|
||||
if stuff in gitData:
|
||||
continue
|
||||
elif os.path.isdir(currSource):
|
||||
shutil.copytree(currSource, currDest, ignore=loginfo)
|
||||
else:
|
||||
shutil.copy2(currSource, currDest)
|
||||
|
||||
print ""
|
||||
if os.path.exists(config):
|
||||
print "adding skeleton config file"
|
||||
shutil.copy2(config, base)
|
||||
|
||||
|
||||
if options.static is not None and os.path.exists(os.path.expanduser(options.static)):
|
||||
print "copying static data to ", os.path.join(base, "staticdata")
|
||||
static = os.path.expanduser(options.static)
|
||||
shutil.copytree(static, os.path.join(base, "staticdata"), ignore=loginfo)
|
||||
|
||||
print "removing development data"
|
||||
paths = []
|
||||
paths.append(os.path.join(base, "eos", "tests"))
|
||||
paths.append(os.path.join(base, "eos", "utils", "scripts"))
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
print path
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
print "copying done, making archive: ", tmpFile
|
||||
archive = tarfile.open(tmpFile, "w:bz2")
|
||||
print "making archive"
|
||||
archive.add(dst, arcname=infoDict["arcname"])
|
||||
print "closing"
|
||||
archive.close()
|
||||
print "copying archive to ", destination
|
||||
shutil.move(tmpFile, destination)
|
||||
except:
|
||||
print "encountered an error"
|
||||
raise
|
||||
finally:
|
||||
print "deleting tmp files"
|
||||
try:
|
||||
shutil.rmtree(dst)
|
||||
os.unlink(tmpFile)
|
||||
except:
|
||||
pass
|
||||
|
||||
sys.stdout = oldstd
|
||||
if os.path.isdir(destination):
|
||||
print os.path.join(destination, os.path.split(tmpFile)[1])
|
||||
else:
|
||||
print destination
|
||||
@@ -25,7 +25,7 @@ import time
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
cachedBitmapsCount = 0
|
||||
cachedBitmaps = OrderedDict()
|
||||
|
||||
@@ -26,7 +26,7 @@ import locale
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
class TargetingMiscViewFull(StatsView):
|
||||
name = "targetingmiscViewFull"
|
||||
|
||||
@@ -32,7 +32,7 @@ import config
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
class ItemStatsDialog(wx.Dialog):
|
||||
counter = 0
|
||||
|
||||
202
scripts/dist.py
Executable file
202
scripts/dist.py
Executable file
@@ -0,0 +1,202 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Script for generating distributables based on platform skeletons.
|
||||
|
||||
User supplies path for pyfa code base, root skeleton directory, and where the
|
||||
builds go. The builds are automatically named depending on the pyfa config
|
||||
values of `version` and `tag`. If it's a Stable release, the naming convention is:
|
||||
pyfa-pyfaversion-expansion-expversion-platform
|
||||
If it is not Stable (tag=git), we determine if the pyfa code base includes the
|
||||
git repo to use as an ID. If not, uses randomly generated 6-character ID. The
|
||||
unstable naming convention:
|
||||
pyfa-YYYMMDD-id-platform
|
||||
|
||||
dist.py can also build the Windows installer provided that it has a path to Inno
|
||||
Setup (and, for generating on non-Windows platforms, that WINE is installed).
|
||||
To build the EXE file, `win` must be included in the platforms to be built.
|
||||
"""
|
||||
|
||||
from optparse import OptionParser
|
||||
import os.path
|
||||
import shutil
|
||||
import sys
|
||||
import tarfile
|
||||
import datetime
|
||||
import random
|
||||
import string
|
||||
import zipfile
|
||||
from subprocess import call
|
||||
|
||||
class FileStub():
|
||||
def write(self, *args):
|
||||
pass
|
||||
|
||||
def flush(self, *args):
|
||||
pass
|
||||
|
||||
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
def zipdir(path, zip):
|
||||
for root, dirs, files in os.walk(path):
|
||||
for file in files:
|
||||
zip.write(os.path.join(root, file))
|
||||
|
||||
skels = ['win', 'mac', 'src']
|
||||
iscc = "C:\Program Files (x86)\Inno Setup 5\ISCC.exe" # inno script location via wine
|
||||
|
||||
if __name__ == "__main__":
|
||||
oldstd = sys.stdout
|
||||
parser = OptionParser()
|
||||
parser.add_option("-s", "--skeleton", dest="skeleton", help="Location of Pyfa-skel directory")
|
||||
parser.add_option("-b", "--base", dest="base", help="Location of cleaned read-only base directory")
|
||||
parser.add_option("-d", "--destination", dest="destination", help="Where to copy our distributable")
|
||||
parser.add_option("-p", "--platforms", dest="platforms", help="Comma-separated list of platforms to build", default="win,src,mac")
|
||||
parser.add_option("-t", "--static", dest="static", help="Directory containing static files")
|
||||
parser.add_option("-q", "--quiet", dest="silent", action="store_true")
|
||||
parser.add_option("-w", "--winexe", dest="winexe", action="store_true", help="Build the Windows installer file (needs Inno Setup). Must include 'win' in platform options")
|
||||
parser.add_option("-z", "--zip", dest="zip", action="store_true", help="zip archive instead of tar")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if options.skeleton is None or options.base is None or options.destination is None:
|
||||
print "Need --skeleton argument as well as --base and --destination argument"
|
||||
parser.print_help()
|
||||
sys.exit()
|
||||
|
||||
if options.silent:
|
||||
sys.stdout = FileStub()
|
||||
|
||||
options.platforms = options.platforms.split(",")
|
||||
|
||||
sys.path.append(options.base)
|
||||
import config as pyfaconfig
|
||||
|
||||
for skel in skels:
|
||||
if skel not in options.platforms:
|
||||
continue
|
||||
|
||||
print "\n======== %s ========"%skel
|
||||
infoDict = {}
|
||||
skeleton = os.path.expanduser(os.path.join(options.skeleton, skel))
|
||||
info = execfile(os.path.join(skeleton, "info.py"), infoDict)
|
||||
dirName = infoDict["arcname"]
|
||||
nowdt = datetime.datetime.now()
|
||||
now = "%04d%02d%02d" % (nowdt.year, nowdt.month, nowdt.day)
|
||||
git = False
|
||||
if pyfaconfig.tag.lower() == "git":
|
||||
try: # if there is a git repo associated with base, use master commit
|
||||
with open(os.path.join(options.base,"..",".git","refs","heads","master"), 'r') as f:
|
||||
id = f.readline()[0:6]
|
||||
git = True
|
||||
except: # else, use custom ID
|
||||
id = id_generator()
|
||||
fileName = "pyfa-%s-%s-%s" % (now, id, infoDict["os"])
|
||||
else:
|
||||
fileName = "pyfa-%s-%s-%s-%s" % (pyfaconfig.version, pyfaconfig.expansionName.lower(), pyfaconfig.expansionVersion, infoDict["os"])
|
||||
|
||||
archiveName = "%s.%s"%(fileName, "zip" if options.zip else "tar.bz2")
|
||||
dst = os.path.join(os.getcwd(), dirName) # tmp directory where files are copied
|
||||
tmpFile = os.path.join(os.getcwd(), archiveName)
|
||||
config = os.path.join(skeleton, "config.py")
|
||||
destination = os.path.expanduser(options.destination)
|
||||
|
||||
i = 0
|
||||
gitData = (".git", ".gitignore", ".gitmodules")
|
||||
def loginfo(path, names):
|
||||
global i
|
||||
i += 1
|
||||
if i % 10 == 0:
|
||||
sys.stdout.write(".")
|
||||
sys.stdout.flush()
|
||||
return gitData
|
||||
|
||||
try:
|
||||
print "Copying skeleton to ", dst
|
||||
i = 0
|
||||
shutil.copytree(skeleton, dst, ignore=loginfo)
|
||||
print
|
||||
base = os.path.join(dst, infoDict["base"])
|
||||
print "Copying base to ", base
|
||||
|
||||
i = 0
|
||||
for stuff in os.listdir(os.path.expanduser(options.base)):
|
||||
currSource = os.path.join(os.path.expanduser(options.base), stuff)
|
||||
currDest = os.path.join(base, stuff)
|
||||
if stuff in gitData:
|
||||
continue
|
||||
elif os.path.isdir(currSource):
|
||||
shutil.copytree(currSource, currDest, ignore=loginfo)
|
||||
else:
|
||||
shutil.copy2(currSource, currDest)
|
||||
|
||||
print
|
||||
|
||||
if os.path.exists(config):
|
||||
print "Adding skeleton config file"
|
||||
shutil.copy2(config, base)
|
||||
|
||||
|
||||
if options.static is not None and os.path.exists(os.path.expanduser(options.static)):
|
||||
print "Copying static data to ", os.path.join(base, "staticdata")
|
||||
static = os.path.expanduser(options.static)
|
||||
shutil.copytree(static, os.path.join(base, "staticdata"), ignore=loginfo)
|
||||
|
||||
print "Copying done, making archive: ", tmpFile
|
||||
|
||||
if options.zip:
|
||||
archive = zipfile.ZipFile(tmpFile, 'w', compression=zipfile.ZIP_DEFLATED)
|
||||
zipdir(dirName, archive)
|
||||
archive.close()
|
||||
else:
|
||||
archive = tarfile.open(tmpFile, "w:bz2")
|
||||
archive.add(dst, arcname=infoDict["arcname"])
|
||||
archive.close()
|
||||
|
||||
print "Moving archive to ", destination
|
||||
shutil.move(tmpFile, destination)
|
||||
|
||||
if "win" in skel and options.winexe:
|
||||
print "Compiling EXE"
|
||||
|
||||
if pyfaconfig.tag.lower() == "git":
|
||||
if git: # if git repo info available, use git commit
|
||||
expansion = "git-%s"%(id)
|
||||
else: # if there is no git repo, use timestamp
|
||||
expansion = now
|
||||
else: # if code is Stable, use expansion name
|
||||
expansion = "%s %s"%(pyfaconfig.expansionName, pyfaconfig.expansionVersion),
|
||||
|
||||
calllist = ["wine"] if 'win' not in sys.platform else []
|
||||
|
||||
call(calllist + [
|
||||
iscc,
|
||||
os.path.join(os.path.dirname(__file__), "pyfa-setup.iss"),
|
||||
"/dMyAppVersion=%s"%(pyfaconfig.version),
|
||||
"/dMyAppExpansion=%s"%(expansion),
|
||||
"/dMyAppDir=%s"%dst,
|
||||
"/dMyOutputDir=%s"%destination,
|
||||
"/dMyOutputFile=%s"%fileName]) #stdout=devnull, stderr=devnull
|
||||
|
||||
print "EXE completed"
|
||||
|
||||
except Exception as e:
|
||||
print "Encountered an error: \n\t", e
|
||||
raise
|
||||
finally:
|
||||
print "Deleting tmp files\n"
|
||||
try:
|
||||
try:
|
||||
shutil.rmtree("dist") # Inno dir
|
||||
except:
|
||||
pass
|
||||
shutil.rmtree(dst)
|
||||
os.unlink(tmpFile)
|
||||
except:
|
||||
pass
|
||||
|
||||
sys.stdout = oldstd
|
||||
if os.path.isdir(destination):
|
||||
print os.path.join(destination, os.path.split(tmpFile)[1])
|
||||
else:
|
||||
print destination
|
||||
@@ -57,11 +57,16 @@ import re
|
||||
import sqlite3
|
||||
from optparse import OptionParser
|
||||
|
||||
script_dir = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
|
||||
|
||||
# Form list of effects for processing
|
||||
effects_path = os.path.join(script_dir, "..", "eos", "effects")
|
||||
|
||||
usage = "usage: %prog --database=DB [--debug=DEBUG]"
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.add_option("-d", "--database", help="path to eve cache data dump in \
|
||||
sqlite format, default pyfa database path is used if none specified",
|
||||
type="string", default=os.path.join("~", ".pyfa","eve.db"))
|
||||
sqlite format, default to eve database file included in pyfa (../staticdata/eve.db)",
|
||||
type="string", default=os.path.join(script_dir, "..", "staticdata", "eve.db"))
|
||||
parser.add_option("-e", "--effects", help="explicit comma-separated list of \
|
||||
effects to process", type="string", default="")
|
||||
parser.add_option("-r", "--remove", help="remove effect files that are not \
|
||||
@@ -379,8 +384,6 @@ for typeid in publishedtypes:
|
||||
(set(), len(typenamesplitted))
|
||||
globalmap_typeid_typenamecombtuple[typeid][0].add(typenamecomb)
|
||||
|
||||
# Form list of effects for processing
|
||||
effects_path = os.path.join("..", "..", "effects")
|
||||
if options.effects:
|
||||
effect_list = options.effects.split(",")
|
||||
else:
|
||||
@@ -5,8 +5,10 @@ import os.path
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
script_dir = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
|
||||
|
||||
# Connect to database and set up cursor
|
||||
db = sqlite3.connect(os.path.join("..", "..", "..", "staticdata", "eve.db"))
|
||||
db = sqlite3.connect(os.path.join(script_dir, "..", "staticdata", "eve.db"))
|
||||
cursor = db.cursor()
|
||||
|
||||
# Queries to get raw data
|
||||
507
scripts/itemDiff.py
Executable file
507
scripts/itemDiff.py
Executable file
@@ -0,0 +1,507 @@
|
||||
#!/usr/bin/env python3
|
||||
#===============================================================================
|
||||
# Copyright (C) 2010-2011 Anton Vorobyov
|
||||
#
|
||||
# This file is part of eos.
|
||||
#
|
||||
# eos is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# eos is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with eos. If not, see <http://www.gnu.org/licenses/>.
|
||||
#===============================================================================
|
||||
|
||||
|
||||
'''
|
||||
This script is used to compare two different database versions.
|
||||
It shows removed/changed/new items with list of changed effects,
|
||||
changed attributes and effects which were renamed
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import re
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
default_old = os.path.join(script_dir, "..", "staticdata", "eve.db")
|
||||
|
||||
def main(old, new, groups=True, effects=True, attributes=True, renames=True):
|
||||
# Open both databases and get their cursors
|
||||
old_db = sqlite3.connect(os.path.expanduser(old))
|
||||
old_cursor = old_db.cursor()
|
||||
new_db = sqlite3.connect(os.path.expanduser(new))
|
||||
new_cursor = new_db.cursor()
|
||||
|
||||
# Force some of the items to make them published
|
||||
FORCEPUB_TYPES = ("Ibis", "Impairor", "Velator", "Reaper")
|
||||
OVERRIDES_TYPEPUB = 'UPDATE invtypes SET published = 1 WHERE typeName = ?'
|
||||
for typename in FORCEPUB_TYPES:
|
||||
old_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
new_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
|
||||
# Initialization of few things used by both changed/renamed effects list
|
||||
script_dir = os.path.dirname(__file__)
|
||||
effectspath = os.path.join(script_dir, "..", "eos", "effects")
|
||||
implemented = set()
|
||||
|
||||
for filename in os.listdir(effectspath):
|
||||
basename, extension = filename.rsplit('.', 1)
|
||||
# Ignore non-py files and exclude implementation-specific 'effect'
|
||||
if extension == "py" and basename not in ("__init__",):
|
||||
implemented.add(basename)
|
||||
|
||||
# Effects' names are used w/o any special symbols by eos
|
||||
stripspec = "[^A-Za-z0-9]"
|
||||
|
||||
# Method to get data if effect is implemented in eos or not
|
||||
def geteffst(effectname):
|
||||
eosname = re.sub(stripspec, "", effectname).lower()
|
||||
if eosname in implemented:
|
||||
impstate = True
|
||||
else:
|
||||
impstate = False
|
||||
return impstate
|
||||
|
||||
def findrenames(ren_dict, query, strip=False):
|
||||
|
||||
old_namedata = {}
|
||||
new_namedata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_namedata), (new_cursor, new_namedata)):
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
id = row[0]
|
||||
name = row[1]
|
||||
if strip is True:
|
||||
name = re.sub(stripspec, "", name)
|
||||
dictionary[id] = name
|
||||
|
||||
for id in set(old_namedata.keys()).intersection(new_namedata.keys()):
|
||||
oldname = old_namedata[id]
|
||||
newname = new_namedata[id]
|
||||
if oldname != newname:
|
||||
ren_dict[id] = (oldname, newname)
|
||||
return
|
||||
|
||||
def printrenames(ren_dict, title, implementedtag=False):
|
||||
if len(ren_dict) > 0:
|
||||
print('\nRenamed ' + title + ':')
|
||||
for id in sorted(ren_dict):
|
||||
couple = ren_dict[id]
|
||||
if implementedtag:
|
||||
print("\n[{0}] \"{1}\"\n[{2}] \"{3}\"".format(geteffst(couple[0]), couple[0], geteffst(couple[1]), couple[1]))
|
||||
else:
|
||||
print("\n\"{0}\"\n\"{1}\"".format(couple[0], couple[1]))
|
||||
|
||||
groupcats = {}
|
||||
def getgroupcat(grp):
|
||||
"""Get group category from the new db"""
|
||||
if grp in groupcats:
|
||||
cat = groupcats[grp]
|
||||
else:
|
||||
query = 'SELECT categoryID FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
cat = 0
|
||||
for row in new_cursor:
|
||||
cat = row[0]
|
||||
groupcats[grp] = cat
|
||||
return cat
|
||||
|
||||
itemnames = {}
|
||||
def getitemname(item):
|
||||
"""Get item name from the new db"""
|
||||
if item in itemnames:
|
||||
name = itemnames[item]
|
||||
else:
|
||||
query = 'SELECT typeName FROM invtypes WHERE typeID = ?'
|
||||
new_cursor.execute(query, (item,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (item,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
itemnames[item] = name
|
||||
return name
|
||||
|
||||
groupnames = {}
|
||||
def getgroupname(grp):
|
||||
"""Get group name from the new db"""
|
||||
if grp in groupnames:
|
||||
name = groupnames[grp]
|
||||
else:
|
||||
query = 'SELECT groupName FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (grp,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
groupnames[grp] = name
|
||||
return name
|
||||
|
||||
effectnames = {}
|
||||
def geteffectname(effect):
|
||||
"""Get effect name from the new db"""
|
||||
if effect in effectnames:
|
||||
name = effectnames[effect]
|
||||
else:
|
||||
query = 'SELECT effectName FROM dgmeffects WHERE effectID = ?'
|
||||
new_cursor.execute(query, (effect,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (effect,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
effectnames[effect] = name
|
||||
return name
|
||||
|
||||
attrnames = {}
|
||||
def getattrname(attr):
|
||||
"""Get attribute name from the new db"""
|
||||
if attr in attrnames:
|
||||
name = attrnames[attr]
|
||||
else:
|
||||
query = 'SELECT attributeName FROM dgmattribs WHERE attributeID = ?'
|
||||
new_cursor.execute(query, (attr,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (attr,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
attrnames[attr] = name
|
||||
return name
|
||||
|
||||
# State table
|
||||
S = {"unchanged": 0,
|
||||
"removed": 1,
|
||||
"changed": 2,
|
||||
"added": 3 }
|
||||
|
||||
if effects or attributes or groups:
|
||||
# Format:
|
||||
# Key: item id
|
||||
# Value: [groupID, set(effects), {attribute id : value}]
|
||||
old_itmdata = {}
|
||||
new_itmdata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_itmdata), (new_cursor, new_itmdata)):
|
||||
# Compose list of items we're interested in, filtered by category
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryName IN ("Ship", "Module", "Charge", "Skill", "Drone", "Implant", "Subsystem")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
# Initialize container for the data for each item with empty stuff besides groupID
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
# Add items filtered by group
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
|
||||
if effects:
|
||||
# Pull all eff
|
||||
query = 'SELECT it.typeID, de.effectID FROM invtypes AS it INNER JOIN dgmtypeeffects AS dte ON dte.typeID = it.typeID INNER JOIN dgmeffects AS de ON de.effectID = dte.effectID WHERE it.published = 1'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
effectID = row[1]
|
||||
# Process only items we need
|
||||
if itemid in dictionary:
|
||||
# Add effect to the set
|
||||
effectSet = dictionary[itemid][1]
|
||||
effectSet.add(effectID)
|
||||
|
||||
if attributes:
|
||||
# Add base attributes to our data
|
||||
query = 'SELECT it.typeID, it.mass, it.capacity, it.volume FROM invtypes AS it'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrdict = dictionary[itemid][2]
|
||||
# Add base attributes: mass (4), capacity (38) and volume (161)
|
||||
attrdict[4] = row[1]
|
||||
attrdict[38] = row[2]
|
||||
attrdict[161] = row[3]
|
||||
|
||||
# Add attribute data for other attributes
|
||||
query = 'SELECT dta.typeID, dta.attributeID, dta.value FROM dgmtypeattribs AS dta'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrid = row[1]
|
||||
attrval = row[2]
|
||||
attrdict = dictionary[itemid][2]
|
||||
if attrid in attrdict:
|
||||
print("Warning: base attribute is described in non-base attribute table")
|
||||
else:
|
||||
attrdict[attrid] = attrval
|
||||
|
||||
# Get set of IDs from both dictionaries
|
||||
items_old = set(old_itmdata.keys())
|
||||
items_new = set(new_itmdata.keys())
|
||||
|
||||
# Format:
|
||||
# Key: item state
|
||||
# Value: {item id: ((group state, old group, new group), {effect state: set(effects)}, {attribute state: {attributeID: (old value, new value)}})}
|
||||
global_itmdata = {}
|
||||
|
||||
# Initialize it
|
||||
for state in S:
|
||||
global_itmdata[S[state]] = {}
|
||||
|
||||
|
||||
# Fill all the data for removed items
|
||||
for item in items_old.difference(items_new):
|
||||
# Set item state to removed
|
||||
state = S["removed"]
|
||||
# Set only old group for item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], oldgroup, None)
|
||||
# Set old set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects)
|
||||
# Set old set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
for attr in oldattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = (oldattrs[attr], "NULL")
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
|
||||
# Now, for added items
|
||||
for item in items_new.difference(items_old):
|
||||
# Set item state to added
|
||||
state = S["added"]
|
||||
# Set only new group for item
|
||||
newgroup = new_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], None, newgroup)
|
||||
# Set new set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if effects:
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(neweffects)
|
||||
# Set new set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if attributes:
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in newattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = ("NULL", newattrs[attr])
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# Now, check all the items which exist in both databases
|
||||
for item in items_old.intersection(items_new):
|
||||
# Set group data for an item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
newgroup = new_itmdata[item][0]
|
||||
# If we're not asked to compare groups, mark them as unchanged anyway
|
||||
groupdata = (S["changed"] if oldgroup != newgroup and groups else S["unchanged"], oldgroup, newgroup)
|
||||
# Fill effects data into appropriate groups
|
||||
effectsdata = {}
|
||||
for state in S:
|
||||
# We do not have changed effects whatsoever
|
||||
if state != "changed":
|
||||
effectsdata[S[state]] = set()
|
||||
if effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects.intersection(neweffects))
|
||||
effectsdata[S["removed"]].update(oldeffects.difference(neweffects))
|
||||
effectsdata[S["added"]].update(neweffects.difference(oldeffects))
|
||||
# Go through all attributes, filling global data dictionary
|
||||
attrdata = {}
|
||||
for state in S:
|
||||
attrdata[S[state]] = {}
|
||||
if attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in set(oldattrs.keys()).union(newattrs.keys()):
|
||||
# NULL will mean there's no such attribute in db
|
||||
oldattr = oldattrs.get(attr, "NULL")
|
||||
newattr = newattrs.get(attr, "NULL")
|
||||
attrstate = S["unchanged"]
|
||||
if oldattr == "NULL" and newattr != "NULL":
|
||||
attrstate = S["added"]
|
||||
elif oldattr != "NULL" and newattr == "NULL":
|
||||
attrstate = S["removed"]
|
||||
elif oldattr != newattr:
|
||||
attrstate = S["changed"]
|
||||
attrdata[attrstate][attr] = (oldattr, newattr)
|
||||
# Consider item as unchanged by default and set it to change when we see any changes in sub-items
|
||||
state = S["unchanged"]
|
||||
if state == S["unchanged"] and groupdata[0] != S["unchanged"]:
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(effectsdata[S["removed"]]) > 0 or len(effectsdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(attrdata[S["removed"]]) > 0 or len(attrdata[S["changed"]]) > 0 or len(attrdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# As eos uses names as unique IDs in lot of places, we have to keep track of name changes
|
||||
if renames:
|
||||
ren_effects = {}
|
||||
query = 'SELECT effectID, effectName FROM dgmeffects'
|
||||
findrenames(ren_effects, query, strip = True)
|
||||
|
||||
ren_attributes = {}
|
||||
query = 'SELECT attributeID, attributeName FROM dgmattribs'
|
||||
findrenames(ren_attributes, query)
|
||||
|
||||
ren_categories = {}
|
||||
query = 'SELECT categoryID, categoryName FROM invcategories'
|
||||
findrenames(ren_categories, query)
|
||||
|
||||
ren_groups = {}
|
||||
query = 'SELECT groupID, groupName FROM invgroups'
|
||||
findrenames(ren_groups, query)
|
||||
|
||||
ren_marketgroups = {}
|
||||
query = 'SELECT marketGroupID, marketGroupName FROM invmarketgroups'
|
||||
findrenames(ren_marketgroups, query)
|
||||
|
||||
ren_items = {}
|
||||
query = 'SELECT typeID, typeName FROM invtypes'
|
||||
findrenames(ren_items, query)
|
||||
|
||||
try:
|
||||
# Get db metadata
|
||||
old_meta = {}
|
||||
new_meta = {}
|
||||
query = 'SELECT field_name, field_value FROM metadata WHERE field_name LIKE "client_build"'
|
||||
old_cursor.execute(query)
|
||||
for row in old_cursor:
|
||||
old_meta[row[0]] = row[1]
|
||||
new_cursor.execute(query)
|
||||
for row in new_cursor:
|
||||
new_meta[row[0]] = row[1]
|
||||
except:
|
||||
pass
|
||||
# Print jobs
|
||||
print("Comparing databases:\n{0} -> {1}\n".format(old_meta.get("client_build"), new_meta.get("client_build")))
|
||||
if effects or attributes or groups:
|
||||
# Print legend only when there're any interesting changes
|
||||
if len(global_itmdata[S["removed"]]) > 0 or len(global_itmdata[S["changed"]]) > 0 or len(global_itmdata[S["added"]]) > 0:
|
||||
genleg = "[+] - new item\n[-] - removed item\n[*] - changed item\n"
|
||||
grpleg = "(x => y) - group changes\n" if groups else ""
|
||||
attreffleg = " [+] - effect or attribute has been added to item\n [-] - effect or attribute has been removed from item\n" if attributes or effects else ""
|
||||
effleg = " [y] - effect is implemented\n [n] - effect is not implemented\n" if effects else ""
|
||||
print("{0}{1}{2}{3}\nItems:".format(genleg, grpleg, attreffleg, effleg))
|
||||
|
||||
# Make sure our states are sorted
|
||||
stateorder = sorted(global_itmdata)
|
||||
|
||||
TG = {S["unchanged"]: "+", S["changed"]: "*",
|
||||
S["removed"]: "-",
|
||||
S["added"]: "+"}
|
||||
|
||||
# Cycle through states
|
||||
for itmstate in stateorder:
|
||||
# Skip unchanged items
|
||||
if itmstate == S["unchanged"]:
|
||||
continue
|
||||
items = global_itmdata[itmstate]
|
||||
# Sort by name first
|
||||
itemorder = sorted(items, key=lambda item: getitemname(item))
|
||||
# Then by group id
|
||||
itemorder = sorted(itemorder, key=lambda item: items[item][0][2] or items[item][0][1])
|
||||
# Then by category id
|
||||
itemorder = sorted(itemorder, key=lambda item: getgroupcat(items[item][0][2] or items[item][0][1]))
|
||||
|
||||
for item in itemorder:
|
||||
groupdata = items[item][0]
|
||||
groupstr = " ({0} => {1})".format(getgroupname(groupdata[1]), getgroupname(groupdata[2])) if groupdata[0] == S["changed"] else ""
|
||||
print("\n[{0}] {1}{2}".format(TG[itmstate], getitemname(item), groupstr))
|
||||
|
||||
effdata = items[item][1]
|
||||
for effstate in stateorder:
|
||||
# Skip unchanged effect sets, but always include them for added or removed ships
|
||||
# Also, always skip empty data
|
||||
if (effstate == S["unchanged"] and itmstate not in (S["removed"], S["added"])) or effstate not in effdata:
|
||||
continue
|
||||
effects = effdata[effstate]
|
||||
efforder = sorted(effects, key=lambda eff: geteffectname(eff))
|
||||
for eff in efforder:
|
||||
# Take tag from item if item was added or removed
|
||||
tag = TG[effstate] if itmstate not in (S["removed"], S["added"]) else TG[itmstate]
|
||||
print(" [{0}|{1}] {2}".format(tag, "y" if geteffst(geteffectname(eff)) else "n", geteffectname(eff)))
|
||||
|
||||
attrdata = items[item][2]
|
||||
for attrstate in stateorder:
|
||||
# Skip unchanged and empty attribute sets, also skip attributes display for added and removed items
|
||||
if (attrstate == S["unchanged"] and itmstate != S["added"]) or itmstate in (S["removed"], ) or attrstate not in attrdata:
|
||||
continue
|
||||
attrs = attrdata[attrstate]
|
||||
attrorder = sorted(attrs, key=lambda attr: getattrname(attr))
|
||||
for attr in attrorder:
|
||||
valline = ""
|
||||
if attrs[attr][0] == "NULL" or itmstate == S["added"]:
|
||||
valline = "{0}".format(attrs[attr][1] or 0)
|
||||
elif attrs[attr][1] == "NULL":
|
||||
valline = "{0}".format(attrs[attr][0] or 0)
|
||||
else:
|
||||
valline = "{0} => {1}".format(attrs[attr][0] or 0, attrs[attr][1] or 0)
|
||||
print(" [{0}] {1}: {2}".format(TG[attrstate], getattrname(attr), valline))
|
||||
|
||||
if renames:
|
||||
title = 'effects'
|
||||
printrenames(ren_effects, title, implementedtag=True)
|
||||
|
||||
title = 'attributes'
|
||||
printrenames(ren_attributes, title)
|
||||
|
||||
title = 'categories'
|
||||
printrenames(ren_categories, title)
|
||||
|
||||
title = 'groups'
|
||||
printrenames(ren_groups, title)
|
||||
|
||||
title = 'market groups'
|
||||
printrenames(ren_marketgroups, title)
|
||||
|
||||
title = 'items'
|
||||
printrenames(ren_items, title)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Compare two databases generated from eve dump to find eos-related differences")
|
||||
parser.add_argument("-o", "--old", type=str, help="path to old cache data dump, defaults to current pyfa eve.db", default=default_old)
|
||||
parser.add_argument("-n", "--new", type=str, required=True, help="path to new cache data dump")
|
||||
parser.add_argument("-g", "--nogroups", action="store_false", default=True, dest="groups", help="don't show changed groups")
|
||||
parser.add_argument("-e", "--noeffects", action="store_false", default=True, dest="effects", help="don't show list of changed effects")
|
||||
parser.add_argument("-a", "--noattributes", action="store_false", default=True, dest="attributes", help="don't show list of changed attributes")
|
||||
parser.add_argument("-r", "--norenames", action="store_false", default=True, dest="renames", help="don't show list of renamed data")
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args.old, args.new, args.groups, args.effects, args.attributes, args.renames)
|
||||
@@ -23,22 +23,18 @@ import sys
|
||||
|
||||
# Add eos root path to sys.path so we can import ourselves
|
||||
path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
|
||||
sys.path.append(os.path.realpath(os.path.join(path, "..", "..", "..")))
|
||||
sys.path.append(os.path.realpath(os.path.join(path, "..")))
|
||||
|
||||
import json
|
||||
import argparse
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="This scripts dumps effects from an sqlite cache dump to mongo")
|
||||
parser.add_argument("-d", "--db", required=True, type=str, help="The sqlalchemy connectionstring, example: sqlite:///c:/tq.db")
|
||||
parser.add_argument("-j", "--json", required=True, type=str, help="The path to the json dum")
|
||||
args = parser.parse_args()
|
||||
def main(db, json_path):
|
||||
|
||||
jsonPath = os.path.expanduser(args.json)
|
||||
jsonPath = os.path.expanduser(json_path)
|
||||
|
||||
# Import eos.config first and change it
|
||||
import eos.config
|
||||
eos.config.gamedata_connectionstring = args.db
|
||||
eos.config.gamedata_connectionstring = db
|
||||
eos.config.debug = False
|
||||
|
||||
# Now thats done, we can import the eos modules using the config
|
||||
@@ -62,6 +58,7 @@ if __name__ == "__main__":
|
||||
"invmetatypes": eos.gamedata.MetaType,
|
||||
"invtypes": eos.gamedata.Item,
|
||||
"phbtraits": eos.gamedata.Traits,
|
||||
"phbmetadata": eos.gamedata.MetaData,
|
||||
"mapbulk_marketGroups": eos.gamedata.MarketGroup
|
||||
}
|
||||
|
||||
@@ -181,6 +178,14 @@ if __name__ == "__main__":
|
||||
|
||||
eos.db.gamedata_session.add(instance)
|
||||
|
||||
eos.db.gamedata_session.commit()
|
||||
eos.db.gamedata_session.commit()
|
||||
|
||||
print("done")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="This scripts dumps effects from an sqlite cache dump to mongo")
|
||||
parser.add_argument("-d", "--db", required=True, type=str, help="The sqlalchemy connectionstring, example: sqlite:///c:/tq.db")
|
||||
parser.add_argument("-j", "--json", required=True, type=str, help="The path to the json dump")
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args.db, args.json)
|
||||
94
scripts/prep_data.py
Normal file
94
scripts/prep_data.py
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Phobos location
|
||||
phb_path = os.path.expanduser("path/to/phobos")
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-e", "--eve", dest="eve_path", help="Location of EVE directory", required=True)
|
||||
parser.add_argument("-c", "--cache", dest="cache_path", help="Location of EVE cache directory. If not specified, an attempt will be make to automatically determine path.")
|
||||
parser.add_argument("-d", "--dump", dest="dump_path", help="Location of Phobos JSON dump directory", required=True)
|
||||
parser.add_argument("-p", "--phobos", dest="phb_path", help="Location of Phobos, defaults to path noted in script", default=phb_path)
|
||||
parser.add_argument("-s", "--singularity", action="store_true", help="Singularity build")
|
||||
parser.add_argument("-j", "--nojson", dest="nojson", action="store_true", help="Skip Phobos JSON data dump.")
|
||||
|
||||
args = parser.parse_args()
|
||||
eve_path = os.path.expanduser(unicode(args.eve_path, sys.getfilesystemencoding()))
|
||||
cache_path = os.path.expanduser(unicode(args.cache_path, sys.getfilesystemencoding())) if args.cache_path else None
|
||||
dump_path = os.path.expanduser(unicode(args.dump_path, sys.getfilesystemencoding()))
|
||||
script_path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
|
||||
|
||||
### Append Phobos to path
|
||||
sys.path.append(os.path.expanduser(unicode(args.phb_path, sys.getfilesystemencoding())))
|
||||
|
||||
def header(text, subtext=None):
|
||||
print
|
||||
print "* "*30
|
||||
print text.center(60)
|
||||
if subtext:
|
||||
print subtext.center(60)
|
||||
print "* "*30
|
||||
print
|
||||
|
||||
### Data dump
|
||||
if not args.nojson:
|
||||
header("Dumping Phobos Data", dump_path)
|
||||
|
||||
import reverence
|
||||
from flow import FlowManager
|
||||
from miner import *
|
||||
from translator import Translator
|
||||
from writer import *
|
||||
|
||||
rvr = reverence.blue.EVE(eve_path, cachepath=args.cache_path, server="singularity" if args.singularity else "tranquility")
|
||||
|
||||
spickle_miner = StuffedPickleMiner(rvr)
|
||||
trans = Translator(spickle_miner)
|
||||
bulkdata_miner = BulkdataMiner(rvr, trans)
|
||||
|
||||
miners = (
|
||||
MetadataMiner(eve_path),
|
||||
bulkdata_miner,
|
||||
TraitMiner(bulkdata_miner, trans),
|
||||
SqliteMiner(eve_path, trans),
|
||||
CachedCallsMiner(rvr, trans),
|
||||
spickle_miner
|
||||
)
|
||||
|
||||
writers = (
|
||||
JsonWriter(dump_path, indent=2),
|
||||
)
|
||||
|
||||
list = "dgmexpressions,dgmattribs,dgmeffects,dgmtypeattribs,dgmtypeeffects,"\
|
||||
"dgmunits,icons,invcategories,invgroups,invmetagroups,invmetatypes,"\
|
||||
"invtypes,mapbulk_marketGroups,phbmetadata,phbtraits"
|
||||
|
||||
FlowManager(miners, writers).run(list, "multi")
|
||||
|
||||
### SQL Convert
|
||||
import jsonToSql
|
||||
|
||||
db_file = os.path.join(dump_path, "eve.db")
|
||||
header("Converting Data to SQL", db_file)
|
||||
|
||||
if os.path.isfile(db_file):
|
||||
os.remove(db_file)
|
||||
|
||||
jsonToSql.main("sqlite:///"+db_file, dump_path)
|
||||
|
||||
### Diff generation
|
||||
import itemDiff
|
||||
diff_file = os.path.join(dump_path, "diff.txt")
|
||||
old_db = os.path.join(script_path, "..", "staticdata", "eve.db")
|
||||
|
||||
header("Generating DIFF", diff_file)
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = open(diff_file, 'w')
|
||||
itemDiff.main(old=old_db, new=db_file)
|
||||
sys.stdout = old_stdout
|
||||
|
||||
print "\nAll done."
|
||||
104
scripts/pyfa-setup.iss
Normal file
104
scripts/pyfa-setup.iss
Normal file
@@ -0,0 +1,104 @@
|
||||
; Script generated by the Inno Setup Script Wizard.
|
||||
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
|
||||
|
||||
; Versioning
|
||||
; we do some #ifdef conditionals because automated compilation passes these as arguments
|
||||
|
||||
#ifndef MyAppVersion
|
||||
#define MyAppVersion "1.3.0"
|
||||
#endif
|
||||
#ifndef MyAppExpansion
|
||||
#define MyAppExpansion "Crius 1.0"
|
||||
#endif
|
||||
|
||||
; Other config
|
||||
|
||||
#define MyAppName "pyfa"
|
||||
#define MyAppPublisher "pyfa"
|
||||
#define MyAppURL "https://forums.eveonline.com/default.aspx?g=posts&t=247609&p=1"
|
||||
#define MyAppExeName "pyfa.exe"
|
||||
|
||||
#ifndef MyOutputFile
|
||||
#define MyOutputFile LowerCase(StringChange(MyAppName+'-'+MyAppVersion+'-'+MyAppExpansion+'-win', " ", "-"))
|
||||
#endif
|
||||
#ifndef MyAppDir
|
||||
#define MyAppDir "pyfa"
|
||||
#endif
|
||||
#ifndef MyOutputDir
|
||||
#define MyOutputDir "dist"
|
||||
#endif
|
||||
|
||||
[Setup]
|
||||
|
||||
; NOTE: The value of AppId uniquely identifies this application.
|
||||
; Do not use the same AppId value in installers for other applications.
|
||||
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
||||
AppId={{3DA39096-C08D-49CD-90E0-1D177F32C8AA}
|
||||
AppName={#MyAppName}
|
||||
AppVersion={#MyAppVersion} ({#MyAppExpansion})
|
||||
AppPublisher={#MyAppPublisher}
|
||||
AppPublisherURL={#MyAppURL}
|
||||
AppSupportURL={#MyAppURL}
|
||||
AppUpdatesURL={#MyAppURL}
|
||||
DefaultDirName={pf}\{#MyAppName}
|
||||
DefaultGroupName={#MyAppName}
|
||||
AllowNoIcons=yes
|
||||
LicenseFile={#MyAppDir}\gpl.txt
|
||||
OutputDir={#MyOutputDir}
|
||||
OutputBaseFilename={#MyOutputFile}
|
||||
SetupIconFile={#MyAppDir}\pyfa.ico
|
||||
Compression=lzma
|
||||
SolidCompression=yes
|
||||
CloseApplications=yes
|
||||
AppReadmeFile=https://github.com/DarkFenX/Pyfa/blob/v{#MyAppVersion}/readme.txt
|
||||
|
||||
[Languages]
|
||||
Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||
|
||||
[Tasks]
|
||||
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
|
||||
Name: "quicklaunchicon"; Description: "{cm:CreateQuickLaunchIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked; OnlyBelowVersion: 0,6.1
|
||||
|
||||
[Files]
|
||||
Source: "{#MyAppDir}\pyfa.exe"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "{#MyAppDir}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
|
||||
|
||||
[Icons]
|
||||
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
|
||||
Name: "{group}\{cm:UninstallProgram,{#MyAppName}}"; Filename: "{uninstallexe}"
|
||||
Name: "{commondesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: desktopicon
|
||||
Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: quicklaunchicon
|
||||
|
||||
[Run]
|
||||
Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent
|
||||
|
||||
[Code]
|
||||
|
||||
function IsAppRunning(const FileName : string): Boolean;
|
||||
var
|
||||
FSWbemLocator: Variant;
|
||||
FWMIService : Variant;
|
||||
FWbemObjectSet: Variant;
|
||||
begin
|
||||
Result := false;
|
||||
FSWbemLocator := CreateOleObject('WBEMScripting.SWBEMLocator');
|
||||
FWMIService := FSWbemLocator.ConnectServer('', 'root\CIMV2', '', '');
|
||||
FWbemObjectSet := FWMIService.ExecQuery(Format('SELECT Name FROM Win32_Process Where Name="%s"',[FileName]));
|
||||
Result := (FWbemObjectSet.Count > 0);
|
||||
FWbemObjectSet := Unassigned;
|
||||
FWMIService := Unassigned;
|
||||
FSWbemLocator := Unassigned;
|
||||
end;
|
||||
|
||||
function PrepareToInstall(var NeedsRestart: Boolean): String;
|
||||
begin
|
||||
if(IsAppRunning( 'pyfa.exe' )) then
|
||||
begin
|
||||
Result := 'Please close pyfa before continuing. When closed, please go back to the previous step and continue.';
|
||||
end
|
||||
else
|
||||
begin
|
||||
Result := '';
|
||||
end
|
||||
end;
|
||||
@@ -32,7 +32,7 @@ import service.conversions as conversions
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
# Event which tells threads dependent on Market that it's initialized
|
||||
mktRdy = threading.Event()
|
||||
|
||||
@@ -27,7 +27,7 @@ import service
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from gui.utils.compat import OrderedDict
|
||||
from utils.compat import OrderedDict
|
||||
|
||||
FIT_WIN_HEADINGS = ["High power", "Medium power", "Low power", "Rig Slot", "Sub System", "Charges"]
|
||||
EFT_SLOT_ORDER = [Slot.LOW, Slot.MED, Slot.HIGH, Slot.RIG, Slot.SUBSYSTEM]
|
||||
|
||||
30
utils/timer.py
Normal file
30
utils/timer.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import time
|
||||
|
||||
class Timer(object):
|
||||
"""
|
||||
Generic timing class for simple profiling.
|
||||
|
||||
Usage:
|
||||
|
||||
with Timer(verbose=True) as t:
|
||||
# code to be timed
|
||||
time.sleep(5)
|
||||
|
||||
Output:
|
||||
elapsed time: 5000.000 ms
|
||||
|
||||
Can also access time with t.secs
|
||||
"""
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
|
||||
def __enter__(self):
|
||||
self.start = time.time()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.end = time.time()
|
||||
self.secs = self.end - self.start
|
||||
self.msecs = self.secs * 1000 # millisecs
|
||||
if self.verbose:
|
||||
print 'elapsed time: %f ms' % self.msecs
|
||||
Reference in New Issue
Block a user