Move scripts directory
This commit is contained in:
1064
scripts/effectUsedBy.py
Executable file
1064
scripts/effectUsedBy.py
Executable file
File diff suppressed because it is too large
Load Diff
430
scripts/findNonMarket.py
Executable file
430
scripts/findNonMarket.py
Executable file
@@ -0,0 +1,430 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import copy
|
||||
import os.path
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
# Connect to database and set up cursor
|
||||
db = sqlite3.connect(os.path.join("..", "..", "..", "staticdata", "eve.db"))
|
||||
cursor = db.cursor()
|
||||
|
||||
# Queries to get raw data
|
||||
QUERY_ALLEFFECTS = 'SELECT effectID, effectName FROM dgmeffects'
|
||||
# Limit categories to
|
||||
# \Modules (7), Charges (8), Drones (18),
|
||||
# Implants (20), Subsystems (32)
|
||||
QUERY_PUBLISHEDTYPEIDS = 'SELECT it.typeID FROM invtypes AS it INNER JOIN \
|
||||
invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON \
|
||||
ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryID IN \
|
||||
(7, 8, 18, 20, 32)'
|
||||
QUERY_TYPEID_GROUPID = 'SELECT groupID FROM invtypes WHERE typeID = ? LIMIT 1'
|
||||
QUERY_GROUPID_CATEGORYID = 'SELECT categoryID FROM invgroups WHERE \
|
||||
groupID = ? LIMIT 1'
|
||||
QUERY_TYPEID_PARENTTYPEID = 'SELECT parentTypeID FROM invmetatypes WHERE \
|
||||
typeID = ? LIMIT 1'
|
||||
QUERY_TYPEID_MARKETGROUPID = 'SELECT marketGroupID FROM invtypes WHERE \
|
||||
typeID = ? LIMIT 1'
|
||||
QUERY_TYPEID_TYPENAME = 'SELECT typeName FROM invtypes WHERE typeID = ? \
|
||||
LIMIT 1'
|
||||
QUERY_MARKETGROUPID_PARENTGROUPID = 'SELECT parentGroupID FROM \
|
||||
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
|
||||
QUERY_EFFECTID_TYPEID = 'SELECT typeID FROM dgmtypeeffects WHERE effectID = ?'
|
||||
# Queries for printing
|
||||
QUERY_GROUPID_GROUPNAME = 'SELECT groupName FROM invgroups WHERE groupID = ? \
|
||||
LIMIT 1'
|
||||
QUERY_CATEGORYID_CATEGORYNAME = 'SELECT categoryName FROM invcategories \
|
||||
WHERE categoryID = ? LIMIT 1'
|
||||
QUERY_MARKETGROUPID_MARKETGROUPNAME = 'SELECT marketGroupName FROM \
|
||||
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
|
||||
QUERY_TYPEID_ATTRIBS = 'SELECT da.attributeName, dta.value FROM dgmattribs AS \
|
||||
da INNER JOIN dgmtypeattribs AS dta ON dta.attributeID = da.attributeID WHERE \
|
||||
dta.typeID = ?'
|
||||
QUERY_TYPEID_BASEATTRIBS = 'SELECT volume, mass, capacity FROM invtypes WHERE \
|
||||
typeID = ?'
|
||||
QUERY_TYPEID_METAGROUPID = 'SELECT metaGroupID FROM invmetatypes WHERE typeID = ?'
|
||||
QUERY_METAGROUPNAME_METAGROUPID = 'SELECT metaGroupName FROM invmetagroups WHERE metaGroupID = ?'
|
||||
|
||||
# Compose list of effects w/o symbols which eos doesn't take into
|
||||
# consideration, we'll use it to find proper effect IDs from file
|
||||
# names
|
||||
globalmap_effectnameeos_effectid = {}
|
||||
STRIPSPEC = "[^A-Za-z0-9]"
|
||||
cursor.execute(QUERY_ALLEFFECTS)
|
||||
for row in cursor:
|
||||
effectid = row[0]
|
||||
effectnamedb = row[1]
|
||||
effectnameeos = re.sub(STRIPSPEC, "", effectnamedb)
|
||||
# There may be different effects with the same name, so form
|
||||
# sets of IDs
|
||||
if not effectnameeos in globalmap_effectnameeos_effectid:
|
||||
globalmap_effectnameeos_effectid[effectnameeos] = set()
|
||||
globalmap_effectnameeos_effectid[effectnameeos].add(effectid)
|
||||
|
||||
# Published types set
|
||||
publishedtypes = set()
|
||||
cursor.execute(QUERY_PUBLISHEDTYPEIDS)
|
||||
for row in cursor:
|
||||
publishedtypes.add(row[0])
|
||||
|
||||
# Compose group maps
|
||||
# { groupid : set(typeid) }
|
||||
globalmap_groupid_typeid = {}
|
||||
# { typeid : groupid }
|
||||
globalmap_typeid_groupid = {}
|
||||
for typeid in publishedtypes:
|
||||
groupid = 0
|
||||
cursor.execute(QUERY_TYPEID_GROUPID, (typeid,))
|
||||
for row in cursor:
|
||||
groupid = row[0]
|
||||
if not groupid in globalmap_groupid_typeid:
|
||||
globalmap_groupid_typeid[groupid] = set()
|
||||
globalmap_groupid_typeid[groupid].add(typeid)
|
||||
globalmap_typeid_groupid[typeid] = groupid
|
||||
|
||||
# Category maps
|
||||
# { categoryid : set(typeid) }
|
||||
globalmap_categoryid_typeid = {}
|
||||
# { typeid : categoryid }
|
||||
globalmap_typeid_categoryid = {}
|
||||
for typeid in publishedtypes:
|
||||
categoryid = 0
|
||||
cursor.execute(QUERY_GROUPID_CATEGORYID,
|
||||
(globalmap_typeid_groupid[typeid],))
|
||||
for row in cursor:
|
||||
categoryid = row[0]
|
||||
if not categoryid in globalmap_categoryid_typeid:
|
||||
globalmap_categoryid_typeid[categoryid] = set()
|
||||
globalmap_categoryid_typeid[categoryid].add(typeid)
|
||||
globalmap_typeid_categoryid[typeid] = categoryid
|
||||
|
||||
# Base type maps
|
||||
# { basetypeid : set(typeid) }
|
||||
globalmap_basetypeid_typeid = {}
|
||||
# { typeid : basetypeid }
|
||||
globalmap_typeid_basetypeid = {}
|
||||
for typeid in publishedtypes:
|
||||
# Not all typeIDs in the database have baseTypeID, so assign some
|
||||
# default value to it
|
||||
basetypeid = 0
|
||||
cursor.execute(QUERY_TYPEID_PARENTTYPEID, (typeid,))
|
||||
for row in cursor:
|
||||
basetypeid = row[0]
|
||||
# If base type is not published or is not set in database, consider
|
||||
# item as variation of self
|
||||
if basetypeid not in publishedtypes:
|
||||
basetypeid = typeid
|
||||
if not basetypeid in globalmap_basetypeid_typeid:
|
||||
globalmap_basetypeid_typeid[basetypeid] = set()
|
||||
globalmap_basetypeid_typeid[basetypeid].add(typeid)
|
||||
globalmap_typeid_basetypeid[typeid] = basetypeid
|
||||
|
||||
# Market group maps - we won't use these for further processing, but
|
||||
# just as helper for composing other maps
|
||||
# { marketgroupid : set(typeid) }
|
||||
globalmap_marketgroupid_typeid = {}
|
||||
# { typeid : set(marketgroupid) }
|
||||
globalmap_typeid_marketgroupid = {}
|
||||
for typeid in publishedtypes:
|
||||
marketgroupid = 0
|
||||
cursor.execute(QUERY_TYPEID_MARKETGROUPID, (typeid,))
|
||||
for row in cursor:
|
||||
marketgroupid = row[0]
|
||||
if not marketgroupid:
|
||||
continue
|
||||
if not marketgroupid in globalmap_marketgroupid_typeid:
|
||||
globalmap_marketgroupid_typeid[marketgroupid] = set()
|
||||
globalmap_marketgroupid_typeid[marketgroupid].add(typeid)
|
||||
# Copy items to all parent market groups
|
||||
INITIALMARKETGROUPIDS = tuple(globalmap_marketgroupid_typeid)
|
||||
for marketgroupid in INITIALMARKETGROUPIDS:
|
||||
# Limit depths for case if database will refer to groups making
|
||||
# the loop
|
||||
cyclingmarketgroupid = marketgroupid
|
||||
for depth in range(20):
|
||||
cursor_parentmarket = db.cursor()
|
||||
cursor_parentmarket.execute(QUERY_MARKETGROUPID_PARENTGROUPID,
|
||||
(cyclingmarketgroupid,))
|
||||
for row in cursor_parentmarket:
|
||||
cyclingmarketgroupid = row[0]
|
||||
if cyclingmarketgroupid:
|
||||
if not cyclingmarketgroupid in globalmap_marketgroupid_typeid:
|
||||
globalmap_marketgroupid_typeid[cyclingmarketgroupid] = set()
|
||||
globalmap_marketgroupid_typeid[cyclingmarketgroupid].update\
|
||||
(globalmap_marketgroupid_typeid[marketgroupid])
|
||||
else: break
|
||||
# Now, make a reverse map
|
||||
for marketgroupid, typeidset in globalmap_marketgroupid_typeid.items():
|
||||
for typeid in typeidset:
|
||||
if not typeid in globalmap_typeid_marketgroupid:
|
||||
globalmap_typeid_marketgroupid[typeid] = set()
|
||||
globalmap_typeid_marketgroupid[typeid].add(marketgroupid)
|
||||
|
||||
# Combine market groups and variations
|
||||
# { marketgroupid : set(typeidwithvariations) }
|
||||
globalmap_marketgroupid_typeidwithvariations = \
|
||||
copy.deepcopy(globalmap_marketgroupid_typeid)
|
||||
# { typeidwithvariations : set(marketgroupid) }
|
||||
globalmap_typeidwithvariations_marketgroupid = {}
|
||||
for marketgroupid in globalmap_marketgroupid_typeidwithvariations:
|
||||
typestoadd = set()
|
||||
for typeid in globalmap_marketgroupid_typeidwithvariations[marketgroupid]:
|
||||
if typeid in globalmap_basetypeid_typeid:
|
||||
for variationid in globalmap_basetypeid_typeid[typeid]:
|
||||
# Do not include items which have market group, even if
|
||||
# they're variation
|
||||
if not variationid in globalmap_typeid_marketgroupid:
|
||||
typestoadd.add(variationid)
|
||||
globalmap_marketgroupid_typeidwithvariations[marketgroupid].update\
|
||||
(typestoadd)
|
||||
# Make reverse map using simple way too
|
||||
for marketgroupid, typeidwithvariationsset in \
|
||||
globalmap_marketgroupid_typeidwithvariations.items():
|
||||
for typeid in typeidwithvariationsset:
|
||||
if not typeid in globalmap_typeidwithvariations_marketgroupid:
|
||||
globalmap_typeidwithvariations_marketgroupid[typeid] = set()
|
||||
globalmap_typeidwithvariations_marketgroupid[typeid].add(marketgroupid)
|
||||
|
||||
|
||||
nonmarket = set()
|
||||
for typeid in publishedtypes:
|
||||
if not typeid in globalmap_typeidwithvariations_marketgroupid:
|
||||
nonmarket.add(typeid)
|
||||
|
||||
def getItemAttrs(typeid):
|
||||
attrs = {}
|
||||
cursor.execute(QUERY_TYPEID_ATTRIBS, (typeid,))
|
||||
for row in cursor:
|
||||
attrs[row[0]] = row[1]
|
||||
cursor.execute(QUERY_TYPEID_BASEATTRIBS, (typeid,))
|
||||
for row in cursor:
|
||||
if row[0] is not None:
|
||||
attrs["volume"] = row[0]
|
||||
if row[1] is not None:
|
||||
attrs["mass"] = row[1]
|
||||
if row[2] is not None:
|
||||
attrs["capacity"] = row[2]
|
||||
return attrs
|
||||
|
||||
def suggestMktGrp(typeid, mode="grp"):
|
||||
typecat = globalmap_typeid_categoryid[typeid]
|
||||
catname = ""
|
||||
cursor.execute(QUERY_CATEGORYID_CATEGORYNAME, (typecat,))
|
||||
for row in cursor:
|
||||
catname = row[0]
|
||||
typename = ""
|
||||
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
|
||||
for row in cursor:
|
||||
typename = row[0]
|
||||
if catname.lower() == "module" and "civilian" in typename.lower():
|
||||
return 760
|
||||
attrs = getItemAttrs(typeid)
|
||||
implantness = None
|
||||
boosterness = None
|
||||
cpu = None
|
||||
power = None
|
||||
droneBandwidthUsed = None
|
||||
volume = None
|
||||
if "implantness" in attrs:
|
||||
implantness = attrs["implantness"]
|
||||
if "boosterness" in attrs:
|
||||
boosterness = attrs["boosterness"]
|
||||
if "cpu" in attrs:
|
||||
cpu = attrs["cpu"]
|
||||
if "power" in attrs:
|
||||
power = attrs["power"]
|
||||
if "droneBandwidthUsed" in attrs:
|
||||
droneBandwidthUsed = attrs["droneBandwidthUsed"]
|
||||
if "volume" in attrs:
|
||||
volume = attrs["volume"]
|
||||
if mode == "grp":
|
||||
grp = globalmap_typeid_groupid[typeid]
|
||||
comrades = globalmap_groupid_typeid[grp]
|
||||
elif mode == "cat":
|
||||
cat = globalmap_typeid_categoryid[typeid]
|
||||
comrades = globalmap_categoryid_typeid[cat]
|
||||
mktgrps_w_cos = {}
|
||||
for co in comrades:
|
||||
marketgroupid = 0
|
||||
cursor.execute(QUERY_TYPEID_MARKETGROUPID, (co,))
|
||||
for row in cursor:
|
||||
marketgroupid = row[0]
|
||||
if not marketgroupid:
|
||||
continue
|
||||
if not marketgroupid in mktgrps_w_cos:
|
||||
mktgrps_w_cos[marketgroupid] = 0.0
|
||||
similarity_factor = 1.0
|
||||
metagrp = 0
|
||||
cursor.execute(QUERY_TYPEID_METAGROUPID, (co,))
|
||||
for row in cursor:
|
||||
metagrp = row[0]
|
||||
if not metagrp in (0,1,2,14):
|
||||
similarity_factor *= 0.01
|
||||
if implantness or boosterness or cpu or power or droneBandwidthUsed or volume:
|
||||
cgrpattrs = getItemAttrs(co)
|
||||
if implantness:
|
||||
if "implantness" in cgrpattrs:
|
||||
if cgrpattrs["implantness"] != implantness:
|
||||
similarity_factor *= 0.1
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
if boosterness:
|
||||
if "boosterness" in cgrpattrs:
|
||||
if cgrpattrs["boosterness"] != boosterness:
|
||||
similarity_factor *= 0.1
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
if cpu:
|
||||
if "cpu" in cgrpattrs and cgrpattrs["cpu"]:
|
||||
fct = cpu / cgrpattrs["cpu"]
|
||||
if fct > 1:
|
||||
fct = 1 / fct
|
||||
similarity_factor *= fct
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
if power:
|
||||
if "power" in cgrpattrs and cgrpattrs["power"]:
|
||||
fct = power / cgrpattrs["power"]
|
||||
if fct > 1:
|
||||
fct = 1 / fct
|
||||
similarity_factor *= fct
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
if droneBandwidthUsed:
|
||||
if "droneBandwidthUsed" in cgrpattrs:
|
||||
fct = droneBandwidthUsed / cgrpattrs["droneBandwidthUsed"]
|
||||
if fct > 1:
|
||||
fct = 1 / fct
|
||||
similarity_factor *= fct
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
if volume:
|
||||
if "volume" in cgrpattrs:
|
||||
fct = volume / cgrpattrs["volume"]
|
||||
if fct > 1:
|
||||
fct = 1 / fct
|
||||
similarity_factor *= fct
|
||||
else:
|
||||
similarity_factor *= 0.01
|
||||
mktgrps_w_cos[marketgroupid] += similarity_factor
|
||||
if mktgrps_w_cos:
|
||||
winner = max(mktgrps_w_cos.keys(), key=lambda k: mktgrps_w_cos[k])
|
||||
else:
|
||||
winner = None
|
||||
return winner
|
||||
|
||||
def suggestMetaGrp(typeid):
|
||||
typename = ""
|
||||
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
|
||||
for row in cursor:
|
||||
typename = row[0]
|
||||
faction_affixes = ("Arch Angel", "Domination", "Blood", "Guristas", "Sansha", "Sanshas", "Shadow", "Guardian", "Serpentis",
|
||||
"Caldari", "Imperial", "Gallente", "Federation", "Republic",
|
||||
"Ammatar", "Khanid", "Thukker", "Syndicate", "Sisters", "Legion", "ORE",
|
||||
"Nugoehuvi")
|
||||
deadspace_affixes = ("Gistii", "Gistum", "Gist",
|
||||
"Corpii", "Corpum", "Corpus",
|
||||
"Pithi", "Pithum", "Pith",
|
||||
"Centii", "Centum", "Centus",
|
||||
"Coreli", "Corelum", "Core")
|
||||
storyline_names = {"Akemon", "Michi", "Ogdin", "Pashan", "Shaqil", "Whelan Machorin", "Numon"}
|
||||
officer_names = ("Ahremen", "Brokara", "Brynn", "Chelm", "Cormack", "Draclira", "Estamel", "Gotan", "Hakim",
|
||||
"Kaikka", "Mizuro", "Raysere", "Selynne", "Setele", "Tairei", "Thon", "Tuvan", "Vizan")
|
||||
storyline_pattern_general = "'[A-Za-z ]+'"
|
||||
storyline_pattern_names = "|".join("{0}".format(name) for name in storyline_names)
|
||||
faction_pattern = "({0}) ".format("|".join(faction_affixes))
|
||||
deadspace_pattern = "({0}) ".format("|".join(deadspace_affixes))
|
||||
officer_pattern = "({0}) ".format("|".join("{0}'s".format(name) for name in officer_names))
|
||||
|
||||
attrs = getItemAttrs(typeid)
|
||||
if attrs.get("metaLevel") is not None:
|
||||
mlvl = attrs["metaLevel"]
|
||||
if mlvl in (0, 1, 2, 3, 4):
|
||||
meta = 1
|
||||
elif mlvl == 5:
|
||||
meta = 2
|
||||
elif mlvl in (6, 7):
|
||||
meta = 3
|
||||
elif mlvl in (8, 9):
|
||||
meta = 4
|
||||
elif mlvl in (11, 12, 13, 14):
|
||||
if re.search(deadspace_pattern, typename):
|
||||
meta = 6
|
||||
else:
|
||||
meta = 5
|
||||
else:
|
||||
meta = 1
|
||||
elif re.search(officer_pattern, typename):
|
||||
meta = 5
|
||||
elif re.search(deadspace_pattern, typename):
|
||||
meta = 6
|
||||
elif re.search(faction_pattern, typename):
|
||||
meta = 4
|
||||
elif re.search(storyline_pattern_names, typename):
|
||||
meta = 3
|
||||
elif re.search(storyline_pattern_general, typename) and not "Hardwiring" in typename:
|
||||
meta = 3
|
||||
else:
|
||||
meta = 1
|
||||
|
||||
return meta
|
||||
|
||||
|
||||
map_typeid_stuff = {}
|
||||
map_typeid_stuff2 = {}
|
||||
|
||||
for typeid in nonmarket:
|
||||
typename = ""
|
||||
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
|
||||
for row in cursor:
|
||||
typename = row[0]
|
||||
grpname = ""
|
||||
cursor.execute(QUERY_GROUPID_GROUPNAME, (globalmap_typeid_groupid[typeid],))
|
||||
for row in cursor:
|
||||
grpname = row[0]
|
||||
mkt = suggestMktGrp(typeid)
|
||||
if mkt is None:
|
||||
mkt = suggestMktGrp(typeid, mode="cat")
|
||||
meta = suggestMetaGrp(typeid)
|
||||
attrs = getItemAttrs(typeid)
|
||||
if mkt:
|
||||
map_typeid_stuff[typeid] = (mkt, meta)
|
||||
marketgroupname = ""
|
||||
cursor.execute(QUERY_MARKETGROUPID_MARKETGROUPNAME,
|
||||
(mkt,))
|
||||
for row in cursor:
|
||||
marketgroupname = row[0]
|
||||
# Prepend market group name with its parents names
|
||||
prependparentid = mkt
|
||||
# Limit depth to avoid looping, as usual
|
||||
for depth in range(20):
|
||||
cursor_parentmarket = db.cursor()
|
||||
cursor_parentmarket.execute(QUERY_MARKETGROUPID_PARENTGROUPID,
|
||||
(prependparentid,))
|
||||
for row in cursor_parentmarket:
|
||||
prependparentid = row[0]
|
||||
if prependparentid:
|
||||
cursor_parentmarket2 = db.cursor()
|
||||
cursor_parentmarket2.execute(QUERY_MARKETGROUPID_MARKETGROUPNAME,
|
||||
(prependparentid,))
|
||||
for row in cursor_parentmarket2:
|
||||
marketgroupname = "{0} > {1}".format(row[0],
|
||||
marketgroupname)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
marketgroupname = "None"
|
||||
|
||||
map_typeid_stuff2[typename] = (mkt, marketgroupname)
|
||||
|
||||
|
||||
metagroupname = ""
|
||||
cursor.execute(QUERY_METAGROUPNAME_METAGROUPID,
|
||||
(meta,))
|
||||
for row in cursor:
|
||||
metagroupname = row[0]
|
||||
|
||||
#print("---\nItem: {0}\nGroup: {1}\nSuggested market group: {2} ({3})\nMeta group: {4}".format(typename, grpname, marketgroupname, mkt, metagroupname))
|
||||
|
||||
#print("\n\nmap = {{ {0} }}".format(", ".join("{0}: ({1}, {2})".format(key, map_typeid_stuff[key][0], map_typeid_stuff[key][1]) for key in sorted(map_typeid_stuff))))
|
||||
print("---\n{0}".format("\n".join("\"{0}\": {1}, # {2}".format(key, map_typeid_stuff2[key][0], map_typeid_stuff2[key][1]) for key in sorted(map_typeid_stuff2))))
|
||||
497
scripts/itemDiff.py
Executable file
497
scripts/itemDiff.py
Executable file
@@ -0,0 +1,497 @@
|
||||
#!/usr/bin/env python3
|
||||
#===============================================================================
|
||||
# Copyright (C) 2010-2011 Anton Vorobyov
|
||||
#
|
||||
# This file is part of eos.
|
||||
#
|
||||
# eos is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# eos is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with eos. If not, see <http://www.gnu.org/licenses/>.
|
||||
#===============================================================================
|
||||
|
||||
|
||||
'''
|
||||
This script is used to compare two different database versions.
|
||||
It shows removed/changed/new items with list of changed effects,
|
||||
changed attributes and effects which were renamed
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
parser = argparse.ArgumentParser(description="Compare two databases generated from eve dump to find eos-related differences")
|
||||
parser.add_argument("-o", "--old", type=str, required=True, help="path to old cache data dump")
|
||||
parser.add_argument("-n", "--new", type=str, required=True, help="path to new cache data dump")
|
||||
parser.add_argument("-g", "--nogroups", action="store_false", default=True, dest="groups", help="don't show changed groups")
|
||||
parser.add_argument("-e", "--noeffects", action="store_false", default=True, dest="effects", help="don't show list of changed effects")
|
||||
parser.add_argument("-a", "--noattributes", action="store_false", default=True, dest="attributes", help="don't show list of changed attributes")
|
||||
parser.add_argument("-r", "--norenames", action="store_false", default=True, dest="renames", help="don't show list of renamed data")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Open both databases and get their cursors
|
||||
old_db = sqlite3.connect(os.path.expanduser(args.old))
|
||||
old_cursor = old_db.cursor()
|
||||
new_db = sqlite3.connect(os.path.expanduser(args.new))
|
||||
new_cursor = new_db.cursor()
|
||||
|
||||
# Force some of the items to make them published
|
||||
FORCEPUB_TYPES = ("Ibis", "Impairor", "Velator", "Reaper")
|
||||
OVERRIDES_TYPEPUB = 'UPDATE invtypes SET published = 1 WHERE typeName = ?'
|
||||
for typename in FORCEPUB_TYPES:
|
||||
old_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
new_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
|
||||
|
||||
# Initialization of few things used by both changed/renamed effects list
|
||||
effectspath = os.path.join("..", "..", "effects")
|
||||
implemented = set()
|
||||
|
||||
for filename in os.listdir(effectspath):
|
||||
basename, extension = filename.rsplit('.', 1)
|
||||
# Ignore non-py files and exclude implementation-specific 'effect'
|
||||
if extension == "py" and basename not in ("__init__",):
|
||||
implemented.add(basename)
|
||||
|
||||
# Effects' names are used w/o any special symbols by eos
|
||||
stripspec = "[^A-Za-z0-9]"
|
||||
|
||||
# Method to get data if effect is implemented in eos or not
|
||||
def geteffst(effectname):
|
||||
eosname = re.sub(stripspec, "", effectname).lower()
|
||||
if eosname in implemented:
|
||||
impstate = True
|
||||
else:
|
||||
impstate = False
|
||||
return impstate
|
||||
|
||||
def findrenames(ren_dict, query, strip=False):
|
||||
|
||||
old_namedata = {}
|
||||
new_namedata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_namedata), (new_cursor, new_namedata)):
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
id = row[0]
|
||||
name = row[1]
|
||||
if strip is True:
|
||||
name = re.sub(stripspec, "", name)
|
||||
dictionary[id] = name
|
||||
|
||||
for id in set(old_namedata.keys()).intersection(new_namedata.keys()):
|
||||
oldname = old_namedata[id]
|
||||
newname = new_namedata[id]
|
||||
if oldname != newname:
|
||||
ren_dict[id] = (oldname, newname)
|
||||
return
|
||||
|
||||
def printrenames(ren_dict, title, implementedtag=False):
|
||||
if len(ren_dict) > 0:
|
||||
print('\nRenamed ' + title + ':')
|
||||
for id in sorted(ren_dict):
|
||||
couple = ren_dict[id]
|
||||
if implementedtag:
|
||||
print("\n[{0}] \"{1}\"\n[{2}] \"{3}\"".format(geteffst(couple[0]), couple[0], geteffst(couple[1]), couple[1]))
|
||||
else:
|
||||
print("\n\"{0}\"\n\"{1}\"".format(couple[0], couple[1]))
|
||||
|
||||
groupcats = {}
|
||||
def getgroupcat(grp):
|
||||
"""Get group category from the new db"""
|
||||
if grp in groupcats:
|
||||
cat = groupcats[grp]
|
||||
else:
|
||||
query = 'SELECT categoryID FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
cat = 0
|
||||
for row in new_cursor:
|
||||
cat = row[0]
|
||||
groupcats[grp] = cat
|
||||
return cat
|
||||
|
||||
itemnames = {}
|
||||
def getitemname(item):
|
||||
"""Get item name from the new db"""
|
||||
if item in itemnames:
|
||||
name = itemnames[item]
|
||||
else:
|
||||
query = 'SELECT typeName FROM invtypes WHERE typeID = ?'
|
||||
new_cursor.execute(query, (item,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (item,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
itemnames[item] = name
|
||||
return name
|
||||
|
||||
groupnames = {}
|
||||
def getgroupname(grp):
|
||||
"""Get group name from the new db"""
|
||||
if grp in groupnames:
|
||||
name = groupnames[grp]
|
||||
else:
|
||||
query = 'SELECT groupName FROM invgroups WHERE groupID = ?'
|
||||
new_cursor.execute(query, (grp,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (grp,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
groupnames[grp] = name
|
||||
return name
|
||||
|
||||
effectnames = {}
|
||||
def geteffectname(effect):
|
||||
"""Get effect name from the new db"""
|
||||
if effect in effectnames:
|
||||
name = effectnames[effect]
|
||||
else:
|
||||
query = 'SELECT effectName FROM dgmeffects WHERE effectID = ?'
|
||||
new_cursor.execute(query, (effect,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (effect,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
effectnames[effect] = name
|
||||
return name
|
||||
|
||||
attrnames = {}
|
||||
def getattrname(attr):
|
||||
"""Get attribute name from the new db"""
|
||||
if attr in attrnames:
|
||||
name = attrnames[attr]
|
||||
else:
|
||||
query = 'SELECT attributeName FROM dgmattribs WHERE attributeID = ?'
|
||||
new_cursor.execute(query, (attr,))
|
||||
name = ""
|
||||
for row in new_cursor:
|
||||
name = row[0]
|
||||
if not name:
|
||||
old_cursor.execute(query, (attr,))
|
||||
for row in old_cursor:
|
||||
name = row[0]
|
||||
attrnames[attr] = name
|
||||
return name
|
||||
|
||||
# State table
|
||||
S = {"unchanged": 0,
|
||||
"removed": 1,
|
||||
"changed": 2,
|
||||
"added": 3 }
|
||||
|
||||
if args.effects or args.attributes or args.groups:
|
||||
# Format:
|
||||
# Key: item id
|
||||
# Value: [groupID, set(effects), {attribute id : value}]
|
||||
old_itmdata = {}
|
||||
new_itmdata = {}
|
||||
|
||||
for cursor, dictionary in ((old_cursor, old_itmdata), (new_cursor, new_itmdata)):
|
||||
# Compose list of items we're interested in, filtered by category
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryName IN ("Ship", "Module", "Charge", "Skill", "Drone", "Implant", "Subsystem")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
# Initialize container for the data for each item with empty stuff besides groupID
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
# Add items filtered by group
|
||||
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon")'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
groupID = row[1]
|
||||
dictionary[itemid] = [groupID, set(), {}]
|
||||
|
||||
if args.effects:
|
||||
# Pull all eff
|
||||
query = 'SELECT it.typeID, de.effectID FROM invtypes AS it INNER JOIN dgmtypeeffects AS dte ON dte.typeID = it.typeID INNER JOIN dgmeffects AS de ON de.effectID = dte.effectID WHERE it.published = 1'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
effectID = row[1]
|
||||
# Process only items we need
|
||||
if itemid in dictionary:
|
||||
# Add effect to the set
|
||||
effectSet = dictionary[itemid][1]
|
||||
effectSet.add(effectID)
|
||||
|
||||
if args.attributes:
|
||||
# Add base attributes to our data
|
||||
query = 'SELECT it.typeID, it.mass, it.capacity, it.volume FROM invtypes AS it'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrdict = dictionary[itemid][2]
|
||||
# Add base attributes: mass (4), capacity (38) and volume (161)
|
||||
attrdict[4] = row[1]
|
||||
attrdict[38] = row[2]
|
||||
attrdict[161] = row[3]
|
||||
|
||||
# Add attribute data for other attributes
|
||||
query = 'SELECT dta.typeID, dta.attributeID, dta.value FROM dgmtypeattribs AS dta'
|
||||
cursor.execute(query)
|
||||
for row in cursor:
|
||||
itemid = row[0]
|
||||
if itemid in dictionary:
|
||||
attrid = row[1]
|
||||
attrval = row[2]
|
||||
attrdict = dictionary[itemid][2]
|
||||
if attrid in attrdict:
|
||||
print("Warning: base attribute is described in non-base attribute table")
|
||||
else:
|
||||
attrdict[attrid] = attrval
|
||||
|
||||
# Get set of IDs from both dictionaries
|
||||
items_old = set(old_itmdata.keys())
|
||||
items_new = set(new_itmdata.keys())
|
||||
|
||||
# Format:
|
||||
# Key: item state
|
||||
# Value: {item id: ((group state, old group, new group), {effect state: set(effects)}, {attribute state: {attributeID: (old value, new value)}})}
|
||||
global_itmdata = {}
|
||||
|
||||
# Initialize it
|
||||
for state in S:
|
||||
global_itmdata[S[state]] = {}
|
||||
|
||||
|
||||
# Fill all the data for removed items
|
||||
for item in items_old.difference(items_new):
|
||||
# Set item state to removed
|
||||
state = S["removed"]
|
||||
# Set only old group for item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], oldgroup, None)
|
||||
# Set old set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if args.effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects)
|
||||
# Set old set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if args.attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
for attr in oldattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = (oldattrs[attr], "NULL")
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
|
||||
# Now, for added items
|
||||
for item in items_new.difference(items_old):
|
||||
# Set item state to added
|
||||
state = S["added"]
|
||||
# Set only new group for item
|
||||
newgroup = new_itmdata[item][0]
|
||||
groupdata = (S["unchanged"], None, newgroup)
|
||||
# Set new set of effects and mark all as unchanged
|
||||
effectsdata = {}
|
||||
effectsdata[S["unchanged"]] = set()
|
||||
if args.effects:
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(neweffects)
|
||||
# Set new set of attributes and mark all as unchanged
|
||||
attrdata = {}
|
||||
attrdata[S["unchanged"]] = {}
|
||||
if args.attributes:
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in newattrs:
|
||||
# NULL will mean there's no such attribute in db
|
||||
attrdata[S["unchanged"]][attr] = ("NULL", newattrs[attr])
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# Now, check all the items which exist in both databases
|
||||
for item in items_old.intersection(items_new):
|
||||
# Set group data for an item
|
||||
oldgroup = old_itmdata[item][0]
|
||||
newgroup = new_itmdata[item][0]
|
||||
# If we're not asked to compare groups, mark them as unchanged anyway
|
||||
groupdata = (S["changed"] if oldgroup != newgroup and args.groups else S["unchanged"], oldgroup, newgroup)
|
||||
# Fill effects data into appropriate groups
|
||||
effectsdata = {}
|
||||
for state in S:
|
||||
# We do not have changed effects whatsoever
|
||||
if state != "changed":
|
||||
effectsdata[S[state]] = set()
|
||||
if args.effects:
|
||||
oldeffects = old_itmdata[item][1]
|
||||
neweffects = new_itmdata[item][1]
|
||||
effectsdata[S["unchanged"]].update(oldeffects.intersection(neweffects))
|
||||
effectsdata[S["removed"]].update(oldeffects.difference(neweffects))
|
||||
effectsdata[S["added"]].update(neweffects.difference(oldeffects))
|
||||
# Go through all attributes, filling global data dictionary
|
||||
attrdata = {}
|
||||
for state in S:
|
||||
attrdata[S[state]] = {}
|
||||
if args.attributes:
|
||||
oldattrs = old_itmdata[item][2]
|
||||
newattrs = new_itmdata[item][2]
|
||||
for attr in set(oldattrs.keys()).union(newattrs.keys()):
|
||||
# NULL will mean there's no such attribute in db
|
||||
oldattr = oldattrs.get(attr, "NULL")
|
||||
newattr = newattrs.get(attr, "NULL")
|
||||
attrstate = S["unchanged"]
|
||||
if oldattr == "NULL" and newattr != "NULL":
|
||||
attrstate = S["added"]
|
||||
elif oldattr != "NULL" and newattr == "NULL":
|
||||
attrstate = S["removed"]
|
||||
elif oldattr != newattr:
|
||||
attrstate = S["changed"]
|
||||
attrdata[attrstate][attr] = (oldattr, newattr)
|
||||
# Consider item as unchanged by default and set it to change when we see any changes in sub-items
|
||||
state = S["unchanged"]
|
||||
if state == S["unchanged"] and groupdata[0] != S["unchanged"]:
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(effectsdata[S["removed"]]) > 0 or len(effectsdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
if state == S["unchanged"] and (len(attrdata[S["removed"]]) > 0 or len(attrdata[S["changed"]]) > 0 or len(attrdata[S["added"]]) > 0):
|
||||
state = S["changed"]
|
||||
# Fill global dictionary with data we've got
|
||||
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
|
||||
|
||||
# As eos uses names as unique IDs in lot of places, we have to keep track of name changes
|
||||
if args.renames:
|
||||
ren_effects = {}
|
||||
query = 'SELECT effectID, effectName FROM dgmeffects'
|
||||
findrenames(ren_effects, query, strip = True)
|
||||
|
||||
ren_attributes = {}
|
||||
query = 'SELECT attributeID, attributeName FROM dgmattribs'
|
||||
findrenames(ren_attributes, query)
|
||||
|
||||
ren_categories = {}
|
||||
query = 'SELECT categoryID, categoryName FROM invcategories'
|
||||
findrenames(ren_categories, query)
|
||||
|
||||
ren_groups = {}
|
||||
query = 'SELECT groupID, groupName FROM invgroups'
|
||||
findrenames(ren_groups, query)
|
||||
|
||||
ren_marketgroups = {}
|
||||
query = 'SELECT marketGroupID, marketGroupName FROM invmarketgroups'
|
||||
findrenames(ren_marketgroups, query)
|
||||
|
||||
ren_items = {}
|
||||
query = 'SELECT typeID, typeName FROM invtypes'
|
||||
findrenames(ren_items, query)
|
||||
|
||||
# Get db metadata
|
||||
old_meta = {}
|
||||
new_meta = {}
|
||||
query = 'SELECT fieldName, fieldValue FROM metadata'
|
||||
old_cursor.execute(query)
|
||||
for row in old_cursor:
|
||||
old_meta[row[0]] = row[1]
|
||||
new_cursor.execute(query)
|
||||
for row in new_cursor:
|
||||
new_meta[row[0]] = row[1]
|
||||
|
||||
# Print jobs
|
||||
print("Comparing databases:\n{0}-{1}\n{2}-{3}\n".format(old_meta.get("version"), old_meta.get("release"),
|
||||
new_meta.get("version"), new_meta.get("release")))
|
||||
if args.effects or args.attributes or args.groups:
|
||||
# Print legend only when there're any interesting changes
|
||||
if len(global_itmdata[S["removed"]]) > 0 or len(global_itmdata[S["changed"]]) > 0 or len(global_itmdata[S["added"]]) > 0:
|
||||
genleg = "[+] - new item\n[-] - removed item\n[*] - changed item\n"
|
||||
grpleg = "(x => y) - group changes\n" if args.groups else ""
|
||||
attreffleg = " [+] - effect or attribute has been added to item\n [-] - effect or attribute has been removed from item\n" if args.attributes or args.effects else ""
|
||||
effleg = " [y] - effect is implemented\n [n] - effect is not implemented\n" if args.effects else ""
|
||||
print("{0}{1}{2}{3}\nItems:".format(genleg, grpleg, attreffleg, effleg))
|
||||
|
||||
# Make sure our states are sorted
|
||||
stateorder = sorted(global_itmdata)
|
||||
|
||||
TG = {S["unchanged"]: "+", S["changed"]: "*",
|
||||
S["removed"]: "-",
|
||||
S["added"]: "+"}
|
||||
|
||||
# Cycle through states
|
||||
for itmstate in stateorder:
|
||||
# Skip unchanged items
|
||||
if itmstate == S["unchanged"]:
|
||||
continue
|
||||
items = global_itmdata[itmstate]
|
||||
# Sort by name first
|
||||
itemorder = sorted(items, key=lambda item: getitemname(item))
|
||||
# Then by group id
|
||||
itemorder = sorted(itemorder, key=lambda item: items[item][0][2] or items[item][0][1])
|
||||
# Then by category id
|
||||
itemorder = sorted(itemorder, key=lambda item: getgroupcat(items[item][0][2] or items[item][0][1]))
|
||||
|
||||
for item in itemorder:
|
||||
groupdata = items[item][0]
|
||||
groupstr = " ({0} => {1})".format(getgroupname(groupdata[1]), getgroupname(groupdata[2])) if groupdata[0] == S["changed"] else ""
|
||||
print("\n[{0}] {1}{2}".format(TG[itmstate], getitemname(item), groupstr))
|
||||
|
||||
effdata = items[item][1]
|
||||
for effstate in stateorder:
|
||||
# Skip unchanged effect sets, but always include them for added or removed ships
|
||||
# Also, always skip empty data
|
||||
if (effstate == S["unchanged"] and itmstate not in (S["removed"], S["added"])) or effstate not in effdata:
|
||||
continue
|
||||
effects = effdata[effstate]
|
||||
efforder = sorted(effects, key=lambda eff: geteffectname(eff))
|
||||
for eff in efforder:
|
||||
# Take tag from item if item was added or removed
|
||||
tag = TG[effstate] if itmstate not in (S["removed"], S["added"]) else TG[itmstate]
|
||||
print(" [{0}|{1}] {2}".format(tag, "y" if geteffst(geteffectname(eff)) else "n", geteffectname(eff)))
|
||||
|
||||
attrdata = items[item][2]
|
||||
for attrstate in stateorder:
|
||||
# Skip unchanged and empty attribute sets, also skip attributes display for added and removed items
|
||||
if (attrstate == S["unchanged"] and itmstate != S["added"]) or itmstate in (S["removed"], ) or attrstate not in attrdata:
|
||||
continue
|
||||
attrs = attrdata[attrstate]
|
||||
attrorder = sorted(attrs, key=lambda attr: getattrname(attr))
|
||||
for attr in attrorder:
|
||||
valline = ""
|
||||
if attrs[attr][0] == "NULL" or itmstate == S["added"]:
|
||||
valline = "{0}".format(attrs[attr][1] or 0)
|
||||
elif attrs[attr][1] == "NULL":
|
||||
valline = "{0}".format(attrs[attr][0] or 0)
|
||||
else:
|
||||
valline = "{0} => {1}".format(attrs[attr][0] or 0, attrs[attr][1] or 0)
|
||||
print(" [{0}] {1}: {2}".format(TG[attrstate], getattrname(attr), valline))
|
||||
|
||||
if args.renames:
|
||||
title = 'effects'
|
||||
printrenames(ren_effects, title, implementedtag=True)
|
||||
|
||||
title = 'attributes'
|
||||
printrenames(ren_attributes, title)
|
||||
|
||||
title = 'categories'
|
||||
printrenames(ren_categories, title)
|
||||
|
||||
title = 'groups'
|
||||
printrenames(ren_groups, title)
|
||||
|
||||
title = 'market groups'
|
||||
printrenames(ren_marketgroups, title)
|
||||
|
||||
title = 'items'
|
||||
printrenames(ren_items, title)
|
||||
186
scripts/jsonToSql.py
Executable file
186
scripts/jsonToSql.py
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python
|
||||
#======================================================================
|
||||
# Copyright (C) 2012 Diego Duclos
|
||||
#
|
||||
# This file is part of eos.
|
||||
#
|
||||
# eos is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of
|
||||
# the License, or (at your option) any later version.
|
||||
#
|
||||
# eos is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with eos. If not, see <http://www.gnu.org/licenses/>.
|
||||
#======================================================================
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add eos root path to sys.path so we can import ourselves
|
||||
path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
|
||||
sys.path.append(os.path.realpath(os.path.join(path, "..", "..", "..")))
|
||||
|
||||
import json
|
||||
import argparse
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="This scripts dumps effects from an sqlite cache dump to mongo")
|
||||
parser.add_argument("-d", "--db", required=True, type=str, help="The sqlalchemy connectionstring, example: sqlite:///c:/tq.db")
|
||||
parser.add_argument("-j", "--json", required=True, type=str, help="The path to the json dum")
|
||||
args = parser.parse_args()
|
||||
|
||||
jsonPath = os.path.expanduser(args.json)
|
||||
|
||||
# Import eos.config first and change it
|
||||
import eos.config
|
||||
eos.config.gamedata_connectionstring = args.db
|
||||
eos.config.debug = False
|
||||
|
||||
# Now thats done, we can import the eos modules using the config
|
||||
import eos.db
|
||||
import eos.gamedata
|
||||
|
||||
# Create the database tables
|
||||
eos.db.gamedata_meta.create_all()
|
||||
|
||||
# Config dict
|
||||
tables = {
|
||||
"dgmattribs": eos.gamedata.AttributeInfo,
|
||||
"dgmeffects": eos.gamedata.EffectInfo,
|
||||
"dgmtypeattribs": eos.gamedata.Attribute,
|
||||
"dgmtypeeffects": eos.gamedata.Effect,
|
||||
"dgmunits": eos.gamedata.Unit,
|
||||
"icons": eos.gamedata.Icon,
|
||||
"invcategories": eos.gamedata.Category,
|
||||
"invgroups": eos.gamedata.Group,
|
||||
"invmetagroups": eos.gamedata.MetaGroup,
|
||||
"invmetatypes": eos.gamedata.MetaType,
|
||||
"invtypes": eos.gamedata.Item,
|
||||
"phbtraits": eos.gamedata.Traits,
|
||||
"mapbulk_marketGroups": eos.gamedata.MarketGroup
|
||||
}
|
||||
|
||||
fieldMapping = {
|
||||
"dgmattribs": {
|
||||
"displayName_en-us": "displayName"
|
||||
},
|
||||
"dgmeffects": {
|
||||
"displayName_en-us": "displayName",
|
||||
"description_en-us": "description"
|
||||
},
|
||||
"dgmunits": {
|
||||
"displayName_en-us": "displayName"
|
||||
},
|
||||
#icons???
|
||||
"invcategories": {
|
||||
"categoryName_en-us": "categoryName"
|
||||
},
|
||||
"invgroups": {
|
||||
"groupName_en-us": "groupName"
|
||||
},
|
||||
"invmetagroups": {
|
||||
"metaGroupName_en-us": "metaGroupName"
|
||||
},
|
||||
"invtypes": {
|
||||
"typeName_en-us": "typeName",
|
||||
"description_en-us": "description"
|
||||
},
|
||||
#phbtraits???
|
||||
"mapbulk_marketGroups": {
|
||||
"marketGroupName_en-us": "marketGroupName",
|
||||
"description_en-us": "description"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
def convertIcons(data):
|
||||
new = []
|
||||
for k, v in data.items():
|
||||
v["iconID"] = k
|
||||
new.append(v)
|
||||
return new
|
||||
|
||||
def convertTraits(data):
|
||||
|
||||
def convertSection(sectionData):
|
||||
sectionLines = []
|
||||
headerText = u"<b>{}</b>".format(sectionData["header"])
|
||||
sectionLines.append(headerText)
|
||||
for bonusData in sectionData["bonuses"]:
|
||||
prefix = u"{} ".format(bonusData["number"]) if "number" in bonusData else ""
|
||||
bonusText = u"{}{}".format(prefix, bonusData["text"])
|
||||
sectionLines.append(bonusText)
|
||||
sectionLine = u"<br />\n".join(sectionLines)
|
||||
return sectionLine
|
||||
|
||||
newData = []
|
||||
for row in data:
|
||||
typeLines = []
|
||||
typeId = row["typeID"]
|
||||
traitData = row["traits_en-us"]
|
||||
for skillData in sorted(traitData.get("skills", ()), key=lambda i: i["header"]):
|
||||
typeLines.append(convertSection(skillData))
|
||||
if "role" in traitData:
|
||||
typeLines.append(convertSection(traitData["role"]))
|
||||
if "misc" in traitData:
|
||||
typeLines.append(convertSection(traitData["misc"]))
|
||||
traitLine = u"<br />\n<br />\n".join(typeLines)
|
||||
newRow = {"typeID": typeId, "traitText": traitLine}
|
||||
newData.append(newRow)
|
||||
return newData
|
||||
|
||||
data = {}
|
||||
|
||||
# Dump all data to memory so we can easely cross check ignored rows
|
||||
for jsonName, cls in tables.iteritems():
|
||||
with open(os.path.join(jsonPath, "{}.json".format(jsonName))) as f:
|
||||
tableData = json.load(f)
|
||||
if jsonName == "icons":
|
||||
tableData = convertIcons(tableData)
|
||||
if jsonName == "phbtraits":
|
||||
tableData = convertTraits(tableData)
|
||||
data[jsonName] = tableData
|
||||
|
||||
# Do some preprocessing to make our job easier
|
||||
invTypes = set()
|
||||
for row in data["invtypes"]:
|
||||
if row["published"]:
|
||||
invTypes.add(row["typeID"])
|
||||
|
||||
# ignore checker
|
||||
def isIgnored(file, row):
|
||||
if file == "invtypes" and not row["published"]:
|
||||
return True
|
||||
elif file == "dgmtypeeffects" and not row["typeID"] in invTypes:
|
||||
return True
|
||||
elif file == "dgmtypeattribs" and not row["typeID"] in invTypes:
|
||||
return True
|
||||
elif file == "invmetatypes" and not row["typeID"] in invTypes:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Loop through each json file and write it away, checking ignored rows
|
||||
for jsonName, table in data.iteritems():
|
||||
fieldMap = fieldMapping.get(jsonName, {})
|
||||
print "processing {}".format(jsonName)
|
||||
for row in table:
|
||||
# We don't care about some kind of rows, filter it out if so
|
||||
if not isIgnored(jsonName, row):
|
||||
instance = tables[jsonName]()
|
||||
# fix for issue 80
|
||||
if jsonName is "icons" and "res:/UI/Texture/Icons/" in str(row["iconFile"]):
|
||||
row["iconFile"] = row["iconFile"].replace("res:/UI/Texture/Icons/","").replace(".png", "")
|
||||
for k, v in row.iteritems():
|
||||
setattr(instance, fieldMap.get(k, k), v)
|
||||
|
||||
eos.db.gamedata_session.add(instance)
|
||||
|
||||
eos.db.gamedata_session.commit()
|
||||
|
||||
print("done")
|
||||
127
scripts/nighty.py
Executable file
127
scripts/nighty.py
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from optparse import OptionParser
|
||||
import os.path
|
||||
import shutil
|
||||
import tempfile
|
||||
import sys
|
||||
import tarfile
|
||||
import datetime
|
||||
import random
|
||||
import string
|
||||
|
||||
class FileStub():
|
||||
def write(self, *args):
|
||||
pass
|
||||
|
||||
def flush(self, *args):
|
||||
pass
|
||||
|
||||
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
|
||||
return ''.join(random.choice(chars) for x in range(size))
|
||||
|
||||
if __name__ == "__main__":
|
||||
oldstd = sys.stdout
|
||||
parser = OptionParser()
|
||||
parser.add_option("-s", "--skeleton", dest="skeleton", help="Location of skeleton directory")
|
||||
parser.add_option("-b", "--base", dest="base", help="location of the base directory")
|
||||
parser.add_option("-d", "--destination", dest="destination", help="where to copy our archive")
|
||||
parser.add_option("-t", "--static", dest="static", help="directory containing static files")
|
||||
parser.add_option("-q", "--quiet", dest="silent", action="store_true")
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if options.skeleton is None or options.base is None or options.destination is None:
|
||||
print "Need --skeleton argument as well as --base and --destination argument"
|
||||
parser.print_help()
|
||||
sys.exit()
|
||||
|
||||
if options.silent:
|
||||
sys.stdout = FileStub()
|
||||
|
||||
randomId = id_generator()
|
||||
infoDict = {}
|
||||
skeleton = os.path.expanduser(options.skeleton)
|
||||
info = execfile(os.path.join(skeleton, "info.py"), infoDict)
|
||||
now = datetime.datetime.now()
|
||||
now = "%04d%02d%02d" % (now.year, now.month, now.day)
|
||||
dirName = "nighty-build-%s-%s" % (now, randomId)
|
||||
dst = os.path.join(os.getcwd(), dirName)
|
||||
tmpFile = os.path.join(os.getcwd(), "nighty-build-%s-%s-%s.tar.bz2" % (now, infoDict["os"], randomId))
|
||||
config = os.path.join(skeleton, "config.py")
|
||||
destination = os.path.expanduser(options.destination)
|
||||
|
||||
i = 0
|
||||
gitData = (".git", ".gitignore", ".gitmodules")
|
||||
def loginfo(path, names):
|
||||
global i
|
||||
i += 1
|
||||
if i % 10 == 0:
|
||||
sys.stdout.write(".")
|
||||
sys.stdout.flush()
|
||||
return gitData
|
||||
|
||||
try:
|
||||
print "copying skeleton to ", dst
|
||||
i = 0
|
||||
shutil.copytree(skeleton, dst, ignore=loginfo)
|
||||
print ""
|
||||
|
||||
base = os.path.join(dst, infoDict["base"])
|
||||
print "copying base to ", base
|
||||
|
||||
i = 0
|
||||
for stuff in os.listdir(os.path.expanduser(options.base)):
|
||||
currSource = os.path.join(os.path.expanduser(options.base), stuff)
|
||||
currDest = os.path.join(base, stuff)
|
||||
if stuff in gitData:
|
||||
continue
|
||||
elif os.path.isdir(currSource):
|
||||
shutil.copytree(currSource, currDest, ignore=loginfo)
|
||||
else:
|
||||
shutil.copy2(currSource, currDest)
|
||||
|
||||
print ""
|
||||
if os.path.exists(config):
|
||||
print "adding skeleton config file"
|
||||
shutil.copy2(config, base)
|
||||
|
||||
|
||||
if options.static is not None and os.path.exists(os.path.expanduser(options.static)):
|
||||
print "copying static data to ", os.path.join(base, "staticdata")
|
||||
static = os.path.expanduser(options.static)
|
||||
shutil.copytree(static, os.path.join(base, "staticdata"), ignore=loginfo)
|
||||
|
||||
print "removing development data"
|
||||
paths = []
|
||||
paths.append(os.path.join(base, "eos", "tests"))
|
||||
paths.append(os.path.join(base, "eos", "utils", "scripts"))
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
print path
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
print "copying done, making archive: ", tmpFile
|
||||
archive = tarfile.open(tmpFile, "w:bz2")
|
||||
print "making archive"
|
||||
archive.add(dst, arcname=infoDict["arcname"])
|
||||
print "closing"
|
||||
archive.close()
|
||||
print "copying archive to ", destination
|
||||
shutil.move(tmpFile, destination)
|
||||
except:
|
||||
print "encountered an error"
|
||||
raise
|
||||
finally:
|
||||
print "deleting tmp files"
|
||||
try:
|
||||
shutil.rmtree(dst)
|
||||
os.unlink(tmpFile)
|
||||
except:
|
||||
pass
|
||||
|
||||
sys.stdout = oldstd
|
||||
if os.path.isdir(destination):
|
||||
print os.path.join(destination, os.path.split(tmpFile)[1])
|
||||
else:
|
||||
print destination
|
||||
Reference in New Issue
Block a user