Replace submodules with actual files

Submodules never were actually useful
This commit is contained in:
DarkPhoenix
2013-06-10 22:12:34 +04:00
parent 91513d7d95
commit fd36a0b172
2940 changed files with 105139 additions and 0 deletions

1055
eos/utils/scripts/effectUsedBy.py Executable file

File diff suppressed because it is too large Load Diff

350
eos/utils/scripts/eveCacheToDb.py Executable file
View File

@@ -0,0 +1,350 @@
#!/usr/bin/env python
#===============================================================================
# Copyright (C) 2010 Anton Vorobyov
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
'''
This script pulls data out of EVE cache and makes a database dump. To get most of the data,
you need to just log into game; however, for some special data sometimes you need to dump
it by executing corresponding action in game, for example - open market tree to get data for
invmarketgroups table.
Reverence library by Entity is used, check http://wiki.github.com/ntt/reverence/ for info
As reverence uses the same Python version as EVE client (2.x series), script cannot be converted to python3
Example commands to run the script under Linux with default eve paths for getting SQLite dump:
Tranquility: python eveCacheToDb.py --eve="~/.wine/drive_c/Program Files/CCP/EVE" --cache="~/.wine/drive_c/users/"$USER"/Local Settings/Application Data/CCP/EVE/c_program_files_ccp_eve_tranquility/cache" --dump="sqlite:////home/"$USER"/Desktop/eve.db"
Singularity: python eveCacheToDb.py --eve="~/.wine/drive_c/Program Files/CCP/Singularity" --cache="~/.wine/drive_c/users/"$USER"/Local Settings/Application Data/CCP/EVE/c_program_files_ccp_singularity_singularity/cache" --sisi --dump="sqlite:////home/"$USER"/Desktop/evetest.db"
'''
import os
import sys
# Add eos root path to sys.path
path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
sys.path.append(os.path.realpath(os.path.join(path, "..", "..", "..")))
def get_map():
"""
Return table name - table class map
"""
return {"allianceshortnames": None,
"billtypes": None,
"certificaterelationships": None,
"certificates": None,
"corptickernames": None,
"dgmattribs": AttributeInfo,
"dgmeffects": EffectInfo,
"dgmtypeattribs": Attribute,
"dgmtypeeffects": Effect,
"evegraphics": None,
"evelocations": None,
"eveowners": None,
"eveunits": Unit,
"groupsByCategories": None,
"icons": Icon,
"invbptypes": None,
"invcategories": Category,
"invcontrabandTypesByFaction": None,
"invcontrabandTypesByType": None,
"invgroups": Group,
"invmetagroups": MetaGroup,
"invmarketgroups": MarketGroup,
"invmetatypes": MetaType,
"invmetatypesByTypeID": None,
"invreactiontypes": None,
"invtypes": Item,
"locationscenes": None,
"locationwormholeclasses": None,
"mapcelestialdescriptions": None,
"ownericons": None,
"ramactivities": None,
"ramaltypes": None,
"ramaltypesdetailpercategory": None,
"ramaltypesdetailpergroup": None,
"ramcompletedstatuses": None,
"ramtyperequirements": None,
"schematics": None,
"schematicsByPin": None,
"schematicsByType": None,
"schematicspinmap": None,
"schematicstypemap": None,
"shiptypes": None,
"sounds": None,
"typesByGroups": None,
"typesByMarketGroups": None}
def get_order():
"""
Return order for table processing
"""
return ("icons",
"invmarketgroups",
"eveunits",
"dgmattribs",
"dgmeffects",
"invcategories",
"invgroups",
"invmetagroups",
"invtypes",
"invmetatypes",
"dgmtypeattribs",
"dgmtypeeffects")
def get_customcalls():
"""
Return custom table - call to get data for it map
"""
return {"invmarketgroups": eve.RemoteSvc("marketProxy").GetMarketGroups()}
def process_table(sourcetable, tablename, tableclass):
"""
Get all data from cache and write it to database
"""
# Get data from source and process it
tabledata = get_table_data(sourcetable, tablename, get_source_headers(sourcetable))
# Insert everything into table
insert_table_values(tabledata, tableclass)
return
def get_source_headers(sourcetable):
"""
Pull list of headers from the source table
"""
sourceheaders = None
guid = getattr(sourcetable, "__guid__", "None")
# For IndexRowset and IndexedRowLists Reverence provides list of headers
if guid in ("util.IndexRowset", "util.FilterRowset"):
sourceheaders = tuple(sourcetable.header)
# For IndexedRowLists, we need to compose list ourselves
elif guid == "util.IndexedRowLists":
headerset = set()
for item in sourcetable:
for row in sourcetable[item]:
for headername in row.__header__.Keys():
headerset.add(headername)
sourceheaders = tuple(headerset)
return sourceheaders
def get_table_data(sourcetable, tablename, headers):
"""
Pull data out of source table
"""
# Each row is enclosed into dictionary, full table is list of these dictionaries
datarows = []
guid = getattr(sourcetable, "__guid__", "None")
# We have Select method for IndexRowset tables
if guid == "util.IndexRowset":
for values in sourcetable.Select(*headers):
# When Select is asked to find single value, it is returned in its raw
# form. Convert is to tuple for proper further processing
if not isinstance(values, (list, tuple, set)):
values = (values,)
headerslen = len(headers)
datarow = {}
# 1 row value should correspond to 1 header, if number or values doesn't
# correspond to number of headers then something went wrong
if headerslen != len(values):
print "Error: malformed data in source table {0}".format(tablename)
return None
# Fill row dictionary with values and append it to list
for i in xrange(headerslen):
# If we've got ASCII string, convert it to Unicode
if isinstance(values[i], str):
datarow[headers[i]] = unicode(values[i], 'ISO-8859-1')
else:
datarow[headers[i]] = values[i]
datarows.append(datarow)
# FilterRowset and IndexedRowLists are accessible almost like dictionaries
elif guid in ("util.FilterRowset", "util.IndexedRowLists"):
# Go through all source table elements
for element in sourcetable.iterkeys():
# Go through all rows of an element
for row in sourcetable[element]:
datarow = {}
# Fill row dictionary with values we need and append it to the list
for header in headers:
value = getattr(row, header, None)
# None and zero values are different, and we want to write zero
# values to database
if value or value in (0, 0.0):
datarow[header] = value
datarows.append(datarow)
return datarows
def insert_table_values(tabledata, tableclass):
"""
Insert values into tables and show progress
"""
rows = 0
rows_skipped = 0
# Go through all table rows
for row in tabledata:
instance = tableclass()
# Print dot each 1k inserted rows
if rows / 1000.0 == int(rows / 1000.0):
sys.stdout.write(".")
sys.stdout.flush()
try:
# Go through all fields of a row, process them and insert
for header in row:
setattr(instance, header, process_value(row[header], tableclass, header))
eos.db.gamedata_session.add(instance)
rows += 1
except ValueError:
rows_skipped += 1
# Print out results and actually commit results to database
print "\nInserted {0} rows. skipped {1} rows".format(rows, rows_skipped)
eos.db.gamedata_session.commit()
def process_value(value, tableclass, header):
# Get column info
info = tableclass._sa_class_manager.mapper.c.get(header)
if info is None:
return
# Null out non-existent foreign key relations
foreign_keys = info.foreign_keys
if len(foreign_keys) > 0:
for key in foreign_keys:
col = key.column
if not query_existence(col, value) and not key.deferrable:
if info.nullable:
return None
else:
raise ValueError("Integrity check failed")
else:
return value
#Turn booleans into actual booleans, don't leave them as integers
elif type(info.type) == Boolean:
return bool(value)
else:
return value
existence_cache = {}
def query_existence(col, value):
key = (col, col.table, value)
info = existence_cache.get(key)
if info is None:
info = eos.db.gamedata_session.query(col.table).filter(col == value).count() > 0
existence_cache[key] = info
return info
if __name__ == "__main__":
from ConfigParser import ConfigParser
from optparse import OptionParser
from reverence import blue
from sqlalchemy import Boolean
from sqlalchemy.orm import class_mapper, ColumnProperty
import eos.config
# Parse command line options
usage = "usage: %prog --eve=EVE --cache=CACHE --dump=DUMP [--release=RELEASE --sisi]"
parser = OptionParser(usage=usage)
parser.add_option("-e", "--eve", help="path to eve folder")
parser.add_option("-c", "--cache", help="path to eve cache folder")
parser.add_option("-d", "--dump", help="the SQL Alchemy connection string of where we should place our final dump")
parser.add_option("-r", "--release", help="database release number, defaults to 1", default="1")
parser.add_option("-s", "--sisi", action="store_true", dest="singularity", help="if you're going to work with Singularity test server data, use this option", default=False)
(options, args) = parser.parse_args()
# Exit if we do not have any of required options
if not options.eve or not options.cache or not options.dump:
sys.stderr.write("You need to specify paths to eve folder, cache folder and SQL Alchemy connection string. Run script with --help option for further info.\n")
sys.exit()
# We can deal either with singularity or tranquility servers
if options.singularity: server = "singularity"
else: server = "tranquility"
# Set static variables for paths
PATH_EVE = os.path.expanduser(options.eve)
PATH_CACHE = os.path.expanduser(options.cache)
eos.config.gamedata_connectionstring = options.dump
eos.config.debug = False
from eos.gamedata import *
import eos.db
# Get version of EVE client
config = ConfigParser()
config.read(os.path.join(PATH_EVE, "common.ini"))
# Form metadata dictionary for corresponding table
metadata = {}
metadata["version"] = config.getint("main", "build")
metadata["release"] = options.release
# Initialize Reverence cache manager
eve = blue.EVE(PATH_EVE, cachepath=PATH_CACHE, server=server)
cfg = eve.getconfigmgr()
# Create all tables we need
eos.db.gamedata_meta.create_all()
# Add versioning info to the metadata table
for fieldname in metadata:
eos.db.gamedata_session.add(MetaData(fieldname, metadata[fieldname]))
eos.db.gamedata_session.commit()
# Get table map, processing order and special table data
TABLE_MAP = get_map()
TABLE_ORDER = get_order()
CUSTOM_CALLS = get_customcalls()
# Warn about various stuff
for table in cfg.tables:
if not table in TABLE_MAP:
# Warn about new tables in cache which are still not described by table map
print "Warning: unmapped table {0} found in cache".format(table)
for table in TABLE_MAP:
if not table in cfg.tables and not table in CUSTOM_CALLS:
# Warn about mapped tables which are missing in cache
print "Warning: mapped table {0} cannot be found in cache".format(table)
if not table in TABLE_ORDER and TABLE_MAP[table] is not None:
# Warn about mapped tables not specified in processing order
print "Warning: mapped table {0} is missing in processing order".format(table)
for table in TABLE_ORDER:
if not table in TABLE_MAP:
# Warn about unmapped tables in processing order
print "Warning: unmapped table {0} is specified in processing order".format(table)
# Get data from reverence and write it
for tablename in TABLE_ORDER:
tableclass = TABLE_MAP[tablename]
if tableclass is not None:
# Print currently processed table name
print "Processing: {0}".format(tablename)
# Get table object from the Reverence and process it
source_table = getattr(cfg, tablename) if tablename not in CUSTOM_CALLS else CUSTOM_CALLS[tablename]
# Gather data regarding columns for current table in cache and eos
cols_eos = set(prop.key for prop in class_mapper(TABLE_MAP[tablename]).iterate_properties if isinstance(prop, ColumnProperty))
cols_rev = set(get_source_headers(source_table))
notineos = cols_rev.difference(cols_eos)
notinrev = cols_eos.difference(cols_rev)
if notineos:
print "Warning: {0} found in cache but missing in eos definitions: {1}".format("column" if len(notineos) == 1 else "columns", ", ".join(sorted(notineos)))
if notinrev:
print "Warning: {0} found in eos definitions but missing in cache: {1}".format("column" if len(notinrev) == 1 else "columns", ", ".join(sorted(notinrev)))
process_table(source_table, tablename, tableclass)

View File

@@ -0,0 +1,430 @@
#!/usr/bin/env python3
import copy
import os.path
import re
import sqlite3
# Connect to database and set up cursor
db = sqlite3.connect(os.path.join("..", "..", "..", "staticdata", "eve.db"))
cursor = db.cursor()
# Queries to get raw data
QUERY_ALLEFFECTS = 'SELECT effectID, effectName FROM dgmeffects'
# Limit categories to
# \Modules (7), Charges (8), Drones (18),
# Implants (20), Subsystems (32)
QUERY_PUBLISHEDTYPEIDS = 'SELECT it.typeID FROM invtypes AS it INNER JOIN \
invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON \
ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryID IN \
(7, 8, 18, 20, 32)'
QUERY_TYPEID_GROUPID = 'SELECT groupID FROM invtypes WHERE typeID = ? LIMIT 1'
QUERY_GROUPID_CATEGORYID = 'SELECT categoryID FROM invgroups WHERE \
groupID = ? LIMIT 1'
QUERY_TYPEID_PARENTTYPEID = 'SELECT parentTypeID FROM invmetatypes WHERE \
typeID = ? LIMIT 1'
QUERY_TYPEID_MARKETGROUPID = 'SELECT marketGroupID FROM invtypes WHERE \
typeID = ? LIMIT 1'
QUERY_TYPEID_TYPENAME = 'SELECT typeName FROM invtypes WHERE typeID = ? \
LIMIT 1'
QUERY_MARKETGROUPID_PARENTGROUPID = 'SELECT parentGroupID FROM \
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
QUERY_EFFECTID_TYPEID = 'SELECT typeID FROM dgmtypeeffects WHERE effectID = ?'
# Queries for printing
QUERY_GROUPID_GROUPNAME = 'SELECT groupName FROM invgroups WHERE groupID = ? \
LIMIT 1'
QUERY_CATEGORYID_CATEGORYNAME = 'SELECT categoryName FROM invcategories \
WHERE categoryID = ? LIMIT 1'
QUERY_MARKETGROUPID_MARKETGROUPNAME = 'SELECT marketGroupName FROM \
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
QUERY_TYPEID_ATTRIBS = 'SELECT da.attributeName, dta.value FROM dgmattribs AS \
da INNER JOIN dgmtypeattribs AS dta ON dta.attributeID = da.attributeID WHERE \
dta.typeID = ?'
QUERY_TYPEID_BASEATTRIBS = 'SELECT volume, mass, capacity FROM invtypes WHERE \
typeID = ?'
QUERY_TYPEID_METAGROUPID = 'SELECT metaGroupID FROM invmetatypes WHERE typeID = ?'
QUERY_METAGROUPNAME_METAGROUPID = 'SELECT metaGroupName FROM invmetagroups WHERE metaGroupID = ?'
# Compose list of effects w/o symbols which eos doesn't take into
# consideration, we'll use it to find proper effect IDs from file
# names
globalmap_effectnameeos_effectid = {}
STRIPSPEC = "[^A-Za-z0-9]"
cursor.execute(QUERY_ALLEFFECTS)
for row in cursor:
effectid = row[0]
effectnamedb = row[1]
effectnameeos = re.sub(STRIPSPEC, "", effectnamedb)
# There may be different effects with the same name, so form
# sets of IDs
if not effectnameeos in globalmap_effectnameeos_effectid:
globalmap_effectnameeos_effectid[effectnameeos] = set()
globalmap_effectnameeos_effectid[effectnameeos].add(effectid)
# Published types set
publishedtypes = set()
cursor.execute(QUERY_PUBLISHEDTYPEIDS)
for row in cursor:
publishedtypes.add(row[0])
# Compose group maps
# { groupid : set(typeid) }
globalmap_groupid_typeid = {}
# { typeid : groupid }
globalmap_typeid_groupid = {}
for typeid in publishedtypes:
groupid = 0
cursor.execute(QUERY_TYPEID_GROUPID, (typeid,))
for row in cursor:
groupid = row[0]
if not groupid in globalmap_groupid_typeid:
globalmap_groupid_typeid[groupid] = set()
globalmap_groupid_typeid[groupid].add(typeid)
globalmap_typeid_groupid[typeid] = groupid
# Category maps
# { categoryid : set(typeid) }
globalmap_categoryid_typeid = {}
# { typeid : categoryid }
globalmap_typeid_categoryid = {}
for typeid in publishedtypes:
categoryid = 0
cursor.execute(QUERY_GROUPID_CATEGORYID,
(globalmap_typeid_groupid[typeid],))
for row in cursor:
categoryid = row[0]
if not categoryid in globalmap_categoryid_typeid:
globalmap_categoryid_typeid[categoryid] = set()
globalmap_categoryid_typeid[categoryid].add(typeid)
globalmap_typeid_categoryid[typeid] = categoryid
# Base type maps
# { basetypeid : set(typeid) }
globalmap_basetypeid_typeid = {}
# { typeid : basetypeid }
globalmap_typeid_basetypeid = {}
for typeid in publishedtypes:
# Not all typeIDs in the database have baseTypeID, so assign some
# default value to it
basetypeid = 0
cursor.execute(QUERY_TYPEID_PARENTTYPEID, (typeid,))
for row in cursor:
basetypeid = row[0]
# If base type is not published or is not set in database, consider
# item as variation of self
if basetypeid not in publishedtypes:
basetypeid = typeid
if not basetypeid in globalmap_basetypeid_typeid:
globalmap_basetypeid_typeid[basetypeid] = set()
globalmap_basetypeid_typeid[basetypeid].add(typeid)
globalmap_typeid_basetypeid[typeid] = basetypeid
# Market group maps - we won't use these for further processing, but
# just as helper for composing other maps
# { marketgroupid : set(typeid) }
globalmap_marketgroupid_typeid = {}
# { typeid : set(marketgroupid) }
globalmap_typeid_marketgroupid = {}
for typeid in publishedtypes:
marketgroupid = 0
cursor.execute(QUERY_TYPEID_MARKETGROUPID, (typeid,))
for row in cursor:
marketgroupid = row[0]
if not marketgroupid:
continue
if not marketgroupid in globalmap_marketgroupid_typeid:
globalmap_marketgroupid_typeid[marketgroupid] = set()
globalmap_marketgroupid_typeid[marketgroupid].add(typeid)
# Copy items to all parent market groups
INITIALMARKETGROUPIDS = tuple(globalmap_marketgroupid_typeid)
for marketgroupid in INITIALMARKETGROUPIDS:
# Limit depths for case if database will refer to groups making
# the loop
cyclingmarketgroupid = marketgroupid
for depth in range(20):
cursor_parentmarket = db.cursor()
cursor_parentmarket.execute(QUERY_MARKETGROUPID_PARENTGROUPID,
(cyclingmarketgroupid,))
for row in cursor_parentmarket:
cyclingmarketgroupid = row[0]
if cyclingmarketgroupid:
if not cyclingmarketgroupid in globalmap_marketgroupid_typeid:
globalmap_marketgroupid_typeid[cyclingmarketgroupid] = set()
globalmap_marketgroupid_typeid[cyclingmarketgroupid].update\
(globalmap_marketgroupid_typeid[marketgroupid])
else: break
# Now, make a reverse map
for marketgroupid, typeidset in globalmap_marketgroupid_typeid.items():
for typeid in typeidset:
if not typeid in globalmap_typeid_marketgroupid:
globalmap_typeid_marketgroupid[typeid] = set()
globalmap_typeid_marketgroupid[typeid].add(marketgroupid)
# Combine market groups and variations
# { marketgroupid : set(typeidwithvariations) }
globalmap_marketgroupid_typeidwithvariations = \
copy.deepcopy(globalmap_marketgroupid_typeid)
# { typeidwithvariations : set(marketgroupid) }
globalmap_typeidwithvariations_marketgroupid = {}
for marketgroupid in globalmap_marketgroupid_typeidwithvariations:
typestoadd = set()
for typeid in globalmap_marketgroupid_typeidwithvariations[marketgroupid]:
if typeid in globalmap_basetypeid_typeid:
for variationid in globalmap_basetypeid_typeid[typeid]:
# Do not include items which have market group, even if
# they're variation
if not variationid in globalmap_typeid_marketgroupid:
typestoadd.add(variationid)
globalmap_marketgroupid_typeidwithvariations[marketgroupid].update\
(typestoadd)
# Make reverse map using simple way too
for marketgroupid, typeidwithvariationsset in \
globalmap_marketgroupid_typeidwithvariations.items():
for typeid in typeidwithvariationsset:
if not typeid in globalmap_typeidwithvariations_marketgroupid:
globalmap_typeidwithvariations_marketgroupid[typeid] = set()
globalmap_typeidwithvariations_marketgroupid[typeid].add(marketgroupid)
nonmarket = set()
for typeid in publishedtypes:
if not typeid in globalmap_typeidwithvariations_marketgroupid:
nonmarket.add(typeid)
def getItemAttrs(typeid):
attrs = {}
cursor.execute(QUERY_TYPEID_ATTRIBS, (typeid,))
for row in cursor:
attrs[row[0]] = row[1]
cursor.execute(QUERY_TYPEID_BASEATTRIBS, (typeid,))
for row in cursor:
if row[0] is not None:
attrs["volume"] = row[0]
if row[1] is not None:
attrs["mass"] = row[1]
if row[2] is not None:
attrs["capacity"] = row[2]
return attrs
def suggestMktGrp(typeid, mode="grp"):
typecat = globalmap_typeid_categoryid[typeid]
catname = ""
cursor.execute(QUERY_CATEGORYID_CATEGORYNAME, (typecat,))
for row in cursor:
catname = row[0]
typename = ""
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
for row in cursor:
typename = row[0]
if catname.lower() == "module" and "civilian" in typename.lower():
return 760
attrs = getItemAttrs(typeid)
implantness = None
boosterness = None
cpu = None
power = None
droneBandwidthUsed = None
volume = None
if "implantness" in attrs:
implantness = attrs["implantness"]
if "boosterness" in attrs:
boosterness = attrs["boosterness"]
if "cpu" in attrs:
cpu = attrs["cpu"]
if "power" in attrs:
power = attrs["power"]
if "droneBandwidthUsed" in attrs:
droneBandwidthUsed = attrs["droneBandwidthUsed"]
if "volume" in attrs:
volume = attrs["volume"]
if mode == "grp":
grp = globalmap_typeid_groupid[typeid]
comrades = globalmap_groupid_typeid[grp]
elif mode == "cat":
cat = globalmap_typeid_categoryid[typeid]
comrades = globalmap_categoryid_typeid[cat]
mktgrps_w_cos = {}
for co in comrades:
marketgroupid = 0
cursor.execute(QUERY_TYPEID_MARKETGROUPID, (co,))
for row in cursor:
marketgroupid = row[0]
if not marketgroupid:
continue
if not marketgroupid in mktgrps_w_cos:
mktgrps_w_cos[marketgroupid] = 0.0
similarity_factor = 1.0
metagrp = 0
cursor.execute(QUERY_TYPEID_METAGROUPID, (co,))
for row in cursor:
metagrp = row[0]
if not metagrp in (0,1,2,14):
similarity_factor *= 0.01
if implantness or boosterness or cpu or power or droneBandwidthUsed or volume:
cgrpattrs = getItemAttrs(co)
if implantness:
if "implantness" in cgrpattrs:
if cgrpattrs["implantness"] != implantness:
similarity_factor *= 0.1
else:
similarity_factor *= 0.01
if boosterness:
if "boosterness" in cgrpattrs:
if cgrpattrs["boosterness"] != boosterness:
similarity_factor *= 0.1
else:
similarity_factor *= 0.01
if cpu:
if "cpu" in cgrpattrs and cgrpattrs["cpu"]:
fct = cpu / cgrpattrs["cpu"]
if fct > 1:
fct = 1 / fct
similarity_factor *= fct
else:
similarity_factor *= 0.01
if power:
if "power" in cgrpattrs and cgrpattrs["power"]:
fct = power / cgrpattrs["power"]
if fct > 1:
fct = 1 / fct
similarity_factor *= fct
else:
similarity_factor *= 0.01
if droneBandwidthUsed:
if "droneBandwidthUsed" in cgrpattrs:
fct = droneBandwidthUsed / cgrpattrs["droneBandwidthUsed"]
if fct > 1:
fct = 1 / fct
similarity_factor *= fct
else:
similarity_factor *= 0.01
if volume:
if "volume" in cgrpattrs:
fct = volume / cgrpattrs["volume"]
if fct > 1:
fct = 1 / fct
similarity_factor *= fct
else:
similarity_factor *= 0.01
mktgrps_w_cos[marketgroupid] += similarity_factor
if mktgrps_w_cos:
winner = max(mktgrps_w_cos.keys(), key=lambda k: mktgrps_w_cos[k])
else:
winner = None
return winner
def suggestMetaGrp(typeid):
typename = ""
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
for row in cursor:
typename = row[0]
faction_affixes = ("Arch Angel", "Domination", "Blood", "Guristas", "Sansha", "Sanshas", "Shadow", "Guardian", "Serpentis",
"Caldari", "Imperial", "Gallente", "Federation", "Republic",
"Ammatar", "Khanid", "Thukker", "Syndicate", "Sisters", "Legion", "ORE",
"Nugoehuvi")
deadspace_affixes = ("Gistii", "Gistum", "Gist",
"Corpii", "Corpum", "Corpus",
"Pithi", "Pithum", "Pith",
"Centii", "Centum", "Centus",
"Coreli", "Corelum", "Core")
storyline_names = {"Akemon", "Michi", "Ogdin", "Pashan", "Shaqil", "Whelan Machorin", "Numon"}
officer_names = ("Ahremen", "Brokara", "Brynn", "Chelm", "Cormack", "Draclira", "Estamel", "Gotan", "Hakim",
"Kaikka", "Mizuro", "Raysere", "Selynne", "Setele", "Tairei", "Thon", "Tuvan", "Vizan")
storyline_pattern_general = "'[A-Za-z ]+'"
storyline_pattern_names = "|".join("{0}".format(name) for name in storyline_names)
faction_pattern = "({0}) ".format("|".join(faction_affixes))
deadspace_pattern = "({0}) ".format("|".join(deadspace_affixes))
officer_pattern = "({0}) ".format("|".join("{0}'s".format(name) for name in officer_names))
attrs = getItemAttrs(typeid)
if attrs.get("metaLevel") is not None:
mlvl = attrs["metaLevel"]
if mlvl in (0, 1, 2, 3, 4):
meta = 1
elif mlvl == 5:
meta = 2
elif mlvl in (6, 7):
meta = 3
elif mlvl in (8, 9):
meta = 4
elif mlvl in (11, 12, 13, 14):
if re.search(deadspace_pattern, typename):
meta = 6
else:
meta = 5
else:
meta = 1
elif re.search(officer_pattern, typename):
meta = 5
elif re.search(deadspace_pattern, typename):
meta = 6
elif re.search(faction_pattern, typename):
meta = 4
elif re.search(storyline_pattern_names, typename):
meta = 3
elif re.search(storyline_pattern_general, typename) and not "Hardwiring" in typename:
meta = 3
else:
meta = 1
return meta
map_typeid_stuff = {}
map_typeid_stuff2 = {}
for typeid in nonmarket:
typename = ""
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
for row in cursor:
typename = row[0]
grpname = ""
cursor.execute(QUERY_GROUPID_GROUPNAME, (globalmap_typeid_groupid[typeid],))
for row in cursor:
grpname = row[0]
mkt = suggestMktGrp(typeid)
if mkt is None:
mkt = suggestMktGrp(typeid, mode="cat")
meta = suggestMetaGrp(typeid)
attrs = getItemAttrs(typeid)
if mkt:
map_typeid_stuff[typeid] = (mkt, meta)
marketgroupname = ""
cursor.execute(QUERY_MARKETGROUPID_MARKETGROUPNAME,
(mkt,))
for row in cursor:
marketgroupname = row[0]
# Prepend market group name with its parents names
prependparentid = mkt
# Limit depth to avoid looping, as usual
for depth in range(20):
cursor_parentmarket = db.cursor()
cursor_parentmarket.execute(QUERY_MARKETGROUPID_PARENTGROUPID,
(prependparentid,))
for row in cursor_parentmarket:
prependparentid = row[0]
if prependparentid:
cursor_parentmarket2 = db.cursor()
cursor_parentmarket2.execute(QUERY_MARKETGROUPID_MARKETGROUPNAME,
(prependparentid,))
for row in cursor_parentmarket2:
marketgroupname = "{0} > {1}".format(row[0],
marketgroupname)
else:
break
else:
marketgroupname = "None"
map_typeid_stuff2[typename] = (mkt, marketgroupname)
metagroupname = ""
cursor.execute(QUERY_METAGROUPNAME_METAGROUPID,
(meta,))
for row in cursor:
metagroupname = row[0]
#print("---\nItem: {0}\nGroup: {1}\nSuggested market group: {2} ({3})\nMeta group: {4}".format(typename, grpname, marketgroupname, mkt, metagroupname))
#print("\n\nmap = {{ {0} }}".format(", ".join("{0}: ({1}, {2})".format(key, map_typeid_stuff[key][0], map_typeid_stuff[key][1]) for key in sorted(map_typeid_stuff))))
print("---\n{0}".format("\n".join("\"{0}\": {1}, # {2}".format(key, map_typeid_stuff2[key][0], map_typeid_stuff2[key][1]) for key in sorted(map_typeid_stuff2))))

497
eos/utils/scripts/itemDiff.py Executable file
View File

@@ -0,0 +1,497 @@
#!/usr/bin/env python3
#===============================================================================
# Copyright (C) 2010-2011 Anton Vorobyov
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
'''
This script is used to compare two different database versions.
It shows removed/changed/new items with list of changed effects,
changed attributes and effects which were renamed
'''
import argparse
import os.path
import re
import sqlite3
parser = argparse.ArgumentParser(description="Compare two databases generated from eve dump to find eos-related differences")
parser.add_argument("-o", "--old", type=str, required=True, help="path to old cache data dump")
parser.add_argument("-n", "--new", type=str, required=True, help="path to new cache data dump")
parser.add_argument("-g", "--nogroups", action="store_false", default=True, dest="groups", help="don't show changed groups")
parser.add_argument("-e", "--noeffects", action="store_false", default=True, dest="effects", help="don't show list of changed effects")
parser.add_argument("-a", "--noattributes", action="store_false", default=True, dest="attributes", help="don't show list of changed attributes")
parser.add_argument("-r", "--norenames", action="store_false", default=True, dest="renames", help="don't show list of renamed data")
args = parser.parse_args()
# Open both databases and get their cursors
old_db = sqlite3.connect(os.path.expanduser(args.old))
old_cursor = old_db.cursor()
new_db = sqlite3.connect(os.path.expanduser(args.new))
new_cursor = new_db.cursor()
# Force some of the items to make them published
FORCEPUB_TYPES = ("Ibis", "Impairor", "Velator", "Reaper")
OVERRIDES_TYPEPUB = 'UPDATE invtypes SET published = 1 WHERE typeName = ?'
for typename in FORCEPUB_TYPES:
old_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
new_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
# Initialization of few things used by both changed/renamed effects list
effectspath = os.path.join("..", "..", "effects")
implemented = set()
for filename in os.listdir(effectspath):
basename, extension = filename.rsplit('.', 1)
# Ignore non-py files and exclude implementation-specific 'effect'
if extension == "py" and basename not in ("__init__",):
implemented.add(basename)
# Effects' names are used w/o any special symbols by eos
stripspec = "[^A-Za-z0-9]"
# Method to get data if effect is implemented in eos or not
def geteffst(effectname):
eosname = re.sub(stripspec, "", effectname).lower()
if eosname in implemented:
impstate = True
else:
impstate = False
return impstate
def findrenames(ren_dict, query, strip=False):
old_namedata = {}
new_namedata = {}
for cursor, dictionary in ((old_cursor, old_namedata), (new_cursor, new_namedata)):
cursor.execute(query)
for row in cursor:
id = row[0]
name = row[1]
if strip is True:
name = re.sub(stripspec, "", name)
dictionary[id] = name
for id in set(old_namedata.keys()).intersection(new_namedata.keys()):
oldname = old_namedata[id]
newname = new_namedata[id]
if oldname != newname:
ren_dict[id] = (oldname, newname)
return
def printrenames(ren_dict, title, implementedtag=False):
if len(ren_dict) > 0:
print('\nRenamed ' + title + ':')
for id in sorted(ren_dict):
couple = ren_dict[id]
if implementedtag:
print("\n[{0}] \"{1}\"\n[{2}] \"{3}\"".format(geteffst(couple[0]), couple[0], geteffst(couple[1]), couple[1]))
else:
print("\n\"{0}\"\n\"{1}\"".format(couple[0], couple[1]))
groupcats = {}
def getgroupcat(grp):
"""Get group category from the new db"""
if grp in groupcats:
cat = groupcats[grp]
else:
query = 'SELECT categoryID FROM invgroups WHERE groupID = ?'
new_cursor.execute(query, (grp,))
cat = 0
for row in new_cursor:
cat = row[0]
groupcats[grp] = cat
return cat
itemnames = {}
def getitemname(item):
"""Get item name from the new db"""
if item in itemnames:
name = itemnames[item]
else:
query = 'SELECT typeName FROM invtypes WHERE typeID = ?'
new_cursor.execute(query, (item,))
name = ""
for row in new_cursor:
name = row[0]
if not name:
old_cursor.execute(query, (item,))
for row in old_cursor:
name = row[0]
itemnames[item] = name
return name
groupnames = {}
def getgroupname(grp):
"""Get group name from the new db"""
if grp in groupnames:
name = groupnames[grp]
else:
query = 'SELECT groupName FROM invgroups WHERE groupID = ?'
new_cursor.execute(query, (grp,))
name = ""
for row in new_cursor:
name = row[0]
if not name:
old_cursor.execute(query, (grp,))
for row in old_cursor:
name = row[0]
groupnames[grp] = name
return name
effectnames = {}
def geteffectname(effect):
"""Get effect name from the new db"""
if effect in effectnames:
name = effectnames[effect]
else:
query = 'SELECT effectName FROM dgmeffects WHERE effectID = ?'
new_cursor.execute(query, (effect,))
name = ""
for row in new_cursor:
name = row[0]
if not name:
old_cursor.execute(query, (effect,))
for row in old_cursor:
name = row[0]
effectnames[effect] = name
return name
attrnames = {}
def getattrname(attr):
"""Get attribute name from the new db"""
if attr in attrnames:
name = attrnames[attr]
else:
query = 'SELECT attributeName FROM dgmattribs WHERE attributeID = ?'
new_cursor.execute(query, (attr,))
name = ""
for row in new_cursor:
name = row[0]
if not name:
old_cursor.execute(query, (attr,))
for row in old_cursor:
name = row[0]
attrnames[attr] = name
return name
# State table
S = {"unchanged": 0,
"removed": 1,
"changed": 2,
"added": 3 }
if args.effects or args.attributes or args.groups:
# Format:
# Key: item id
# Value: [groupID, set(effects), {attribute id : value}]
old_itmdata = {}
new_itmdata = {}
for cursor, dictionary in ((old_cursor, old_itmdata), (new_cursor, new_itmdata)):
# Compose list of items we're interested in, filtered by category
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryName IN ("Ship", "Module", "Charge", "Skill", "Drone", "Implant", "Subsystem")'
cursor.execute(query)
for row in cursor:
itemid = row[0]
groupID = row[1]
# Initialize container for the data for each item with empty stuff besides groupID
dictionary[itemid] = [groupID, set(), {}]
# Add items filtered by group
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon")'
cursor.execute(query)
for row in cursor:
itemid = row[0]
groupID = row[1]
dictionary[itemid] = [groupID, set(), {}]
if args.effects:
# Pull all eff
query = 'SELECT it.typeID, de.effectID FROM invtypes AS it INNER JOIN dgmtypeeffects AS dte ON dte.typeID = it.typeID INNER JOIN dgmeffects AS de ON de.effectID = dte.effectID WHERE it.published = 1'
cursor.execute(query)
for row in cursor:
itemid = row[0]
effectID = row[1]
# Process only items we need
if itemid in dictionary:
# Add effect to the set
effectSet = dictionary[itemid][1]
effectSet.add(effectID)
if args.attributes:
# Add base attributes to our data
query = 'SELECT it.typeID, it.mass, it.capacity, it.volume FROM invtypes AS it'
cursor.execute(query)
for row in cursor:
itemid = row[0]
if itemid in dictionary:
attrdict = dictionary[itemid][2]
# Add base attributes: mass (4), capacity (38) and volume (161)
attrdict[4] = row[1]
attrdict[38] = row[2]
attrdict[161] = row[3]
# Add attribute data for other attributes
query = 'SELECT dta.typeID, dta.attributeID, dta.value FROM dgmtypeattribs AS dta'
cursor.execute(query)
for row in cursor:
itemid = row[0]
if itemid in dictionary:
attrid = row[1]
attrval = row[2]
attrdict = dictionary[itemid][2]
if attrid in attrdict:
print("Warning: base attribute is described in non-base attribute table")
else:
attrdict[attrid] = attrval
# Get set of IDs from both dictionaries
items_old = set(old_itmdata.keys())
items_new = set(new_itmdata.keys())
# Format:
# Key: item state
# Value: {item id: ((group state, old group, new group), {effect state: set(effects)}, {attribute state: {attributeID: (old value, new value)}})}
global_itmdata = {}
# Initialize it
for state in S:
global_itmdata[S[state]] = {}
# Fill all the data for removed items
for item in items_old.difference(items_new):
# Set item state to removed
state = S["removed"]
# Set only old group for item
oldgroup = old_itmdata[item][0]
groupdata = (S["unchanged"], oldgroup, None)
# Set old set of effects and mark all as unchanged
effectsdata = {}
effectsdata[S["unchanged"]] = set()
if args.effects:
oldeffects = old_itmdata[item][1]
effectsdata[S["unchanged"]].update(oldeffects)
# Set old set of attributes and mark all as unchanged
attrdata = {}
attrdata[S["unchanged"]] = {}
if args.attributes:
oldattrs = old_itmdata[item][2]
for attr in oldattrs:
# NULL will mean there's no such attribute in db
attrdata[S["unchanged"]][attr] = (oldattrs[attr], "NULL")
# Fill global dictionary with data we've got
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
# Now, for added items
for item in items_new.difference(items_old):
# Set item state to added
state = S["added"]
# Set only new group for item
newgroup = new_itmdata[item][0]
groupdata = (S["unchanged"], None, newgroup)
# Set new set of effects and mark all as unchanged
effectsdata = {}
effectsdata[S["unchanged"]] = set()
if args.effects:
neweffects = new_itmdata[item][1]
effectsdata[S["unchanged"]].update(neweffects)
# Set new set of attributes and mark all as unchanged
attrdata = {}
attrdata[S["unchanged"]] = {}
if args.attributes:
newattrs = new_itmdata[item][2]
for attr in newattrs:
# NULL will mean there's no such attribute in db
attrdata[S["unchanged"]][attr] = ("NULL", newattrs[attr])
# Fill global dictionary with data we've got
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
# Now, check all the items which exist in both databases
for item in items_old.intersection(items_new):
# Set group data for an item
oldgroup = old_itmdata[item][0]
newgroup = new_itmdata[item][0]
# If we're not asked to compare groups, mark them as unchanged anyway
groupdata = (S["changed"] if oldgroup != newgroup and args.groups else S["unchanged"], oldgroup, newgroup)
# Fill effects data into appropriate groups
effectsdata = {}
for state in S:
# We do not have changed effects whatsoever
if state != "changed":
effectsdata[S[state]] = set()
if args.effects:
oldeffects = old_itmdata[item][1]
neweffects = new_itmdata[item][1]
effectsdata[S["unchanged"]].update(oldeffects.intersection(neweffects))
effectsdata[S["removed"]].update(oldeffects.difference(neweffects))
effectsdata[S["added"]].update(neweffects.difference(oldeffects))
# Go through all attributes, filling global data dictionary
attrdata = {}
for state in S:
attrdata[S[state]] = {}
if args.attributes:
oldattrs = old_itmdata[item][2]
newattrs = new_itmdata[item][2]
for attr in set(oldattrs.keys()).union(newattrs.keys()):
# NULL will mean there's no such attribute in db
oldattr = oldattrs.get(attr, "NULL")
newattr = newattrs.get(attr, "NULL")
attrstate = S["unchanged"]
if oldattr == "NULL" and newattr != "NULL":
attrstate = S["added"]
elif oldattr != "NULL" and newattr == "NULL":
attrstate = S["removed"]
elif oldattr != newattr:
attrstate = S["changed"]
attrdata[attrstate][attr] = (oldattr, newattr)
# Consider item as unchanged by default and set it to change when we see any changes in sub-items
state = S["unchanged"]
if state == S["unchanged"] and groupdata[0] != S["unchanged"]:
state = S["changed"]
if state == S["unchanged"] and (len(effectsdata[S["removed"]]) > 0 or len(effectsdata[S["added"]]) > 0):
state = S["changed"]
if state == S["unchanged"] and (len(attrdata[S["removed"]]) > 0 or len(attrdata[S["changed"]]) > 0 or len(attrdata[S["added"]]) > 0):
state = S["changed"]
# Fill global dictionary with data we've got
global_itmdata[state][item] = (groupdata, effectsdata, attrdata)
# As eos uses names as unique IDs in lot of places, we have to keep track of name changes
if args.renames:
ren_effects = {}
query = 'SELECT effectID, effectName FROM dgmeffects'
findrenames(ren_effects, query, strip = True)
ren_attributes = {}
query = 'SELECT attributeID, attributeName FROM dgmattribs'
findrenames(ren_attributes, query)
ren_categories = {}
query = 'SELECT categoryID, categoryName FROM invcategories'
findrenames(ren_categories, query)
ren_groups = {}
query = 'SELECT groupID, groupName FROM invgroups'
findrenames(ren_groups, query)
ren_marketgroups = {}
query = 'SELECT marketGroupID, marketGroupName FROM invmarketgroups'
findrenames(ren_marketgroups, query)
ren_items = {}
query = 'SELECT typeID, typeName FROM invtypes'
findrenames(ren_items, query)
# Get db metadata
old_meta = {}
new_meta = {}
query = 'SELECT fieldName, fieldValue FROM metadata'
old_cursor.execute(query)
for row in old_cursor:
old_meta[row[0]] = row[1]
new_cursor.execute(query)
for row in new_cursor:
new_meta[row[0]] = row[1]
# Print jobs
print("Comparing databases:\n{0}-{1}\n{2}-{3}\n".format(old_meta.get("version"), old_meta.get("release"),
new_meta.get("version"), new_meta.get("release")))
if args.effects or args.attributes or args.groups:
# Print legend only when there're any interesting changes
if len(global_itmdata[S["removed"]]) > 0 or len(global_itmdata[S["changed"]]) > 0 or len(global_itmdata[S["added"]]) > 0:
genleg = "[+] - new item\n[-] - removed item\n[*] - changed item\n"
grpleg = "(x => y) - group changes\n" if args.groups else ""
attreffleg = " [+] - effect or attribute has been added to item\n [-] - effect or attribute has been removed from item\n" if args.attributes or args.effects else ""
effleg = " [y] - effect is implemented\n [n] - effect is not implemented\n" if args.effects else ""
print("{0}{1}{2}{3}\nItems:".format(genleg, grpleg, attreffleg, effleg))
# Make sure our states are sorted
stateorder = sorted(global_itmdata)
TG = {S["unchanged"]: "+", S["changed"]: "*",
S["removed"]: "-",
S["added"]: "+"}
# Cycle through states
for itmstate in stateorder:
# Skip unchanged items
if itmstate == S["unchanged"]:
continue
items = global_itmdata[itmstate]
# Sort by name first
itemorder = sorted(items, key=lambda item: getitemname(item))
# Then by group id
itemorder = sorted(itemorder, key=lambda item: items[item][0][2] or items[item][0][1])
# Then by category id
itemorder = sorted(itemorder, key=lambda item: getgroupcat(items[item][0][2] or items[item][0][1]))
for item in itemorder:
groupdata = items[item][0]
groupstr = " ({0} => {1})".format(getgroupname(groupdata[1]), getgroupname(groupdata[2])) if groupdata[0] == S["changed"] else ""
print("\n[{0}] {1}{2}".format(TG[itmstate], getitemname(item), groupstr))
effdata = items[item][1]
for effstate in stateorder:
# Skip unchanged effect sets, but always include them for added or removed ships
# Also, always skip empty data
if (effstate == S["unchanged"] and itmstate not in (S["removed"], S["added"])) or effstate not in effdata:
continue
effects = effdata[effstate]
efforder = sorted(effects, key=lambda eff: geteffectname(eff))
for eff in efforder:
# Take tag from item if item was added or removed
tag = TG[effstate] if itmstate not in (S["removed"], S["added"]) else TG[itmstate]
print(" [{0}|{1}] {2}".format(tag, "y" if geteffst(geteffectname(eff)) else "n", geteffectname(eff)))
attrdata = items[item][2]
for attrstate in stateorder:
# Skip unchanged and empty attribute sets, also skip attributes display for added and removed items
if (attrstate == S["unchanged"] and itmstate != S["added"]) or itmstate in (S["removed"], ) or attrstate not in attrdata:
continue
attrs = attrdata[attrstate]
attrorder = sorted(attrs, key=lambda attr: getattrname(attr))
for attr in attrorder:
valline = ""
if attrs[attr][0] == "NULL" or itmstate == S["added"]:
valline = "{0}".format(attrs[attr][1] or 0)
elif attrs[attr][1] == "NULL":
valline = "{0}".format(attrs[attr][0] or 0)
else:
valline = "{0} => {1}".format(attrs[attr][0] or 0, attrs[attr][1] or 0)
print(" [{0}] {1}: {2}".format(TG[attrstate], getattrname(attr), valline))
if args.renames:
title = 'effects'
printrenames(ren_effects, title, implementedtag=True)
title = 'attributes'
printrenames(ren_attributes, title)
title = 'categories'
printrenames(ren_categories, title)
title = 'groups'
printrenames(ren_groups, title)
title = 'market groups'
printrenames(ren_marketgroups, title)
title = 'items'
printrenames(ren_items, title)

106
eos/utils/scripts/jsonToSql.py Executable file
View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
#======================================================================
# Copyright (C) 2012 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with eos. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
import os
import sys
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
sys.path.append(os.path.realpath(os.path.join(path, "..", "..", "..")))
import sqlite3
import json
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="This scripts dumps effects from an sqlite cache dump to mongo")
parser.add_argument("-d", "--db", required=True, type=str, help="The sqlalchemy connectionstring, example: sqlite:///c:/tq.db")
parser.add_argument("-j", "--json", required=True, type=str, help="The path to the json dum")
args = parser.parse_args()
# Import eos.config first and change it
import eos.config
eos.config.gamedata_connectionstring = args.db
eos.config.debug = False
# Now thats done, we can import the eos modules using the config
import eos.db
import eos.gamedata
# Create the database tables
eos.db.gamedata_meta.create_all()
# Config dict
tables = {"dgmattribs": eos.gamedata.AttributeInfo,
"dgmeffects": eos.gamedata.EffectInfo,
"dgmtypeattribs": eos.gamedata.Attribute,
"dgmtypeeffects": eos.gamedata.Effect,
"dgmunits": eos.gamedata.Unit,
"icons": eos.gamedata.Icon,
"invcategories": eos.gamedata.Category,
"invgroups": eos.gamedata.Group,
"invmetagroups": eos.gamedata.MetaGroup,
"invmetatypes": eos.gamedata.MetaType,
"invtypes": eos.gamedata.Item,
"marketProxy_GetMarketGroups": eos.gamedata.MarketGroup}
fieldMapping = {"icons": {"id": "iconID"}}
data = {}
# Dump all data to memory so we can easely cross check ignored rows
for jsonName, cls in tables.iteritems():
f = open(os.path.join(args.json, "{}.json".format(jsonName)))
data[jsonName] = json.load(f, encoding='cp1252')
# Do some preprocessing to make our job easier
invTypes = set()
for row in data["invtypes"]:
if row["published"]:
invTypes.add(row["typeID"])
# ignore checker
def isIgnored(file, row):
if file == "invtypes" and not row["published"]:
return True
elif file == "dgmtypeeffects" and not row["typeID"] in invTypes:
return True
elif file == "dgmtypeattribs" and not row["typeID"] in invTypes:
return True
elif file == "invmetatypes" and not row["typeID"] in invTypes:
return True
return False
# Loop through each json file and write it away, checking ignored rows
for jsonName, table in data.iteritems():
fieldMap = fieldMapping.get(jsonName, {})
print "processing {}".format(jsonName)
for row in table:
# We don't care about some kind of rows, filter it out if so
if not isIgnored(jsonName, row):
instance = tables[jsonName]()
for k, v in row.iteritems():
setattr(instance, fieldMap.get(k, k), v)
eos.db.gamedata_session.add(instance)
eos.db.gamedata_session.commit()
print("done")

127
eos/utils/scripts/nighty.py Executable file
View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python
from optparse import OptionParser
import os.path
import shutil
import tempfile
import sys
import tarfile
import datetime
import random
import string
class FileStub():
def write(self, *args):
pass
def flush(self, *args):
pass
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
if __name__ == "__main__":
oldstd = sys.stdout
parser = OptionParser()
parser.add_option("-s", "--skeleton", dest="skeleton", help="Location of skeleton directory")
parser.add_option("-b", "--base", dest="base", help="location of the base directory")
parser.add_option("-d", "--destination", dest="destination", help="where to copy our archive")
parser.add_option("-t", "--static", dest="static", help="directory containing static files")
parser.add_option("-q", "--quiet", dest="silent", action="store_true")
options, args = parser.parse_args()
if options.skeleton is None or options.base is None or options.destination is None:
print "Need --skeleton argument as well as --base and --destination argument"
parser.print_help()
sys.exit()
if options.silent:
sys.stdout = FileStub()
randomId = id_generator()
infoDict = {}
skeleton = os.path.expanduser(options.skeleton)
info = execfile(os.path.join(skeleton, "info.py"), infoDict)
now = datetime.datetime.now()
now = "%04d%02d%02d" % (now.year, now.month, now.day)
dirName = "nighty-build-%s-%s" % (now, randomId)
dst = os.path.join(os.getcwd(), dirName)
tmpFile = os.path.join(os.getcwd(), "nighty-build-%s-%s-%s.tar.bz2" % (now, infoDict["os"], randomId))
config = os.path.join(skeleton, "config.py")
destination = os.path.expanduser(options.destination)
i = 0
gitData = (".git", ".gitignore", ".gitmodules")
def loginfo(path, names):
global i
i += 1
if i % 10 == 0:
sys.stdout.write(".")
sys.stdout.flush()
return gitData
try:
print "copying skeleton to ", dst
i = 0
shutil.copytree(skeleton, dst, ignore=loginfo)
print ""
base = os.path.join(dst, infoDict["base"])
print "copying base to ", base
i = 0
for stuff in os.listdir(os.path.expanduser(options.base)):
currSource = os.path.join(os.path.expanduser(options.base), stuff)
currDest = os.path.join(base, stuff)
if stuff in gitData:
continue
elif os.path.isdir(currSource):
shutil.copytree(currSource, currDest, ignore=loginfo)
else:
shutil.copy2(currSource, currDest)
print ""
if os.path.exists(config):
print "adding skeleton config file"
shutil.copy2(config, base)
if options.static is not None and os.path.exists(os.path.expanduser(options.static)):
print "copying static data to ", os.path.join(base, "staticdata")
static = os.path.expanduser(options.static)
shutil.copytree(static, os.path.join(base, "staticdata"), ignore=loginfo)
print "removing development data"
paths = []
paths.append(os.path.join(base, "eos", "tests"))
paths.append(os.path.join(base, "eos", "utils", "scripts"))
for path in paths:
if os.path.exists(path):
print path
shutil.rmtree(path)
print "copying done, making archive: ", tmpFile
archive = tarfile.open(tmpFile, "w:bz2")
print "making archive"
archive.add(dst, arcname=infoDict["arcname"])
print "closing"
archive.close()
print "copying archive to ", destination
shutil.move(tmpFile, destination)
except:
print "encountered an error"
raise
finally:
print "deleting tmp files"
try:
shutil.rmtree(dst)
os.unlink(tmpFile)
except:
pass
sys.stdout = oldstd
if os.path.isdir(destination):
print os.path.join(destination, os.path.split(tmpFile)[1])
else:
print destination

565
eos/utils/scripts/riskItems.py Executable file
View File

@@ -0,0 +1,565 @@
"""
This is ugly, tricky and unreadable script which helps to detect which items should be tested,
based on how its current effects work.
"""
import sqlite3
import os.path
import copy
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--database", help="path to eve cache data dump in \
sqlite format, default eos database path is used if none specified",
type="string", default=os.path.join("~", ".pyfa","eve.db"))
parser.add_option("-a", "--attr", help="find items with all of these attributes",
type="string", default="")
parser.add_option("-s", "--srq", help="find items with any of these skill requirements",
type="string", default="")
parser.add_option("-g", "--grp", help="find items from any of these groups",
type="string", default="")
parser.add_option("-z", "--nozero", action="store_true", help="ignore attributes with zero values",
default=False)
parser.add_option("-o", "--noone", action="store_true", help="ignore attributes with value equal to 1",
default=False)
parser.add_option("-t", "--tech12", action="store_true", help="show only t12 items (with exception for items with no t1 variations)",
default=False)
(options, args) = parser.parse_args()
if not options.attr:
import sys
sys.stderr.write("You need to specify an attribute name.\n")
sys.exit()
# Connect to database and set up cursor
db = sqlite3.connect(os.path.expanduser(options.database))
cursor = db.cursor()
# As we don't rely on eos's overrides, we need to set them manually
OVERRIDES = '''
UPDATE invtypes SET published = '1' WHERE typeName = 'Freki';
UPDATE invtypes SET published = '1' WHERE typeName = 'Mimir';
UPDATE invtypes SET published = '1' WHERE typeName = 'Utu';
UPDATE invtypes SET published = '1' WHERE typeName = 'Adrestia';
'''
for statement in OVERRIDES.split(";\n"):
cursor.execute(statement)
# Queries to get raw data
# Limit categories to Celestials (2, only for wormhole effects),
# Ships (6), Modules (7), Charges (8), Skills (16), Drones (18),
# Implants (20), Subsystems (32)
QUERY_PUBLISHEDTYPEIDS = 'SELECT it.typeID FROM invtypes AS it INNER JOIN \
invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON \
ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryID IN \
(2, 6, 7, 8, 16, 18, 20, 32)'
QUERY_ATTRIBUTEID_TYPEID = "SELECT it.typeID, dta.value FROM invtypes AS it INNER JOIN \
dgmtypeattribs AS dta ON it.typeID = dta.typeID INNER JOIN dgmattribs AS da \
ON dta.attributeID = da.attributeID WHERE da.attributeID = ?"
QUERY_TYPEID_GROUPID = 'SELECT groupID FROM invtypes WHERE typeID = ? LIMIT 1'
QUERY_GROUPID_CATEGORYID = 'SELECT categoryID FROM invgroups WHERE \
groupID = ? LIMIT 1'
QUERY_TYPEID_PARENTTYPEID = 'SELECT parentTypeID FROM invmetatypes WHERE \
typeID = ? LIMIT 1'
QUERY_TYPEID_METAGROUPID = 'SELECT metaGroupID FROM invmetatypes WHERE \
typeID = ? LIMIT 1'
QUERY_TYPEID_SKILLRQ = 'SELECT dta.value FROM dgmtypeattribs AS dta INNER JOIN \
dgmattribs AS da ON da.attributeID = dta.attributeID WHERE (da.attributeName = \
"requiredSkill1" OR da.attributeName = "requiredSkill2" OR da.attributeName = \
"requiredSkill3") AND dta.typeID = ?'
QUERY_TYPEID_MARKETGROUPID = 'SELECT marketGroupID FROM invtypes WHERE \
typeID = ? LIMIT 1'
QUERY_TYPEID_TYPENAME = 'SELECT typeName FROM invtypes WHERE typeID = ? \
LIMIT 1'
QUERY_MARKETGROUPID_PARENTGROUPID = 'SELECT parentGroupID FROM \
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
QUERY_EFFECTID_TYPEID = 'SELECT typeID FROM dgmtypeeffects WHERE effectID = ?'
# Queries for printing
QUERY_GROUPID_GROUPNAME = 'SELECT groupName FROM invgroups WHERE groupID = ? \
LIMIT 1'
QUERY_CATEGORYID_CATEGORYNAME = 'SELECT categoryName FROM invcategories \
WHERE categoryID = ? LIMIT 1'
QUERY_MARKETGROUPID_MARKETGROUPNAME = 'SELECT marketGroupName FROM \
invmarketgroups WHERE marketGroupID = ? LIMIT 1'
QUERY_ATTRIBUTENAME_ATTRIBUTEID = 'SELECT attributeID FROM dgmattribs WHERE attributeName = ?'
QUERY_TYPENAME_TYPEID = 'SELECT typeID FROM invtypes WHERE typeName = ?'
QUERY_GROUPNAME_GROUPID = 'SELECT groupID FROM invgroups WHERE groupName = ?'
if options.srq:
global_skillrqids = set()
for srq in options.srq.split(","):
srqid = 0
cursor.execute(QUERY_TYPENAME_TYPEID, (srq,))
for row in cursor:
srqid = row[0]
if not srqid:
import sys
sys.stderr.write("You need to specify proper skill requirement name.\n")
sys.exit()
else:
global_skillrqids.add(srqid)
if options.grp:
global_groupids = set()
for grp in options.grp.split(","):
grouplist = []
cursor.execute(QUERY_GROUPNAME_GROUPID, (grp,))
for row in cursor:
grouplist.append(row[0])
if len(grouplist) > 1:
print("Warning: multiple groups found, using ID", grouplist[0])
elif len(grouplist) == 0:
import sys
sys.stderr.write("You need to specify proper group name.\n")
sys.exit()
global_groupids.add(grouplist[0])
# Published types set
publishedtypes = set()
cursor.execute(QUERY_PUBLISHEDTYPEIDS)
for row in cursor:
publishedtypes.add(row[0])
# We'll use list of items with given attributes as base for any operations
# Term item means item with given attribute
typeswithattr = set()
first = True
for attr in options.attr.split(","):
tmp = set()
cursor.execute(QUERY_ATTRIBUTENAME_ATTRIBUTEID, (attr,))
noattr = True
for row in cursor:
noattr = False
attrid = row[0]
if noattr:
import sys
sys.stderr.write("No \"{0}\" attribute found.\n".format(attr))
sys.exit()
cursor.execute(QUERY_ATTRIBUTEID_TYPEID, (attrid,))
for row in cursor:
if options.nozero:
if row[0] in publishedtypes and row[1] not in (None, 0, 0.0):
tmp.add(row[0])
elif options.noone:
if row[0] in publishedtypes and row[1] != 1.0:
tmp.add(row[0])
else:
if row[0] in publishedtypes:
tmp.add(row[0])
if first:
first = False
typeswithattr = copy.deepcopy(tmp)
else:
typeswithattr.intersection_update(tmp)
if len(typeswithattr) == 0:
import sys
sys.stderr.write("No items found with all of supplied attributes.\n")
sys.exit()
# Base type maps
# { basetypeid : set(typeid) }
map_basetypeid_typeid = {}
# { typeid : basetypeid }
map_typeid_basetypeid = {}
for typeid in typeswithattr:
# Not all typeIDs in the database have baseTypeID, so assign some
# default value to it
basetypeid = 0
cursor.execute(QUERY_TYPEID_PARENTTYPEID, (typeid,))
for row in cursor:
basetypeid = row[0]
# If base type is not published or is not set in database, consider
# item as variation of self
if basetypeid not in typeswithattr:
basetypeid = typeid
if not basetypeid in map_basetypeid_typeid:
map_basetypeid_typeid[basetypeid] = set()
map_basetypeid_typeid[basetypeid].add(typeid)
map_typeid_basetypeid[typeid] = basetypeid
# Meta group maps
# { metagroupid : set(typeid) }
map_metagroupid_typeid = {}
# { typeid : metagroupid }
map_typeid_metagroupid = {}
for typeid in typeswithattr:
# Assume items are tech 1 by default
metagroupid = 1
cursor.execute(QUERY_TYPEID_METAGROUPID, (typeid,))
for row in cursor:
metagroupid = row[0]
if not metagroupid in map_metagroupid_typeid:
map_metagroupid_typeid[metagroupid] = set()
map_metagroupid_typeid[metagroupid].add(typeid)
map_typeid_metagroupid[typeid] = metagroupid
# Filter out non-t1/t2 items if we're asked to do so
if options.tech12:
toremove = set()
for typeid in typeswithattr:
if map_typeid_basetypeid[typeid] != typeid and map_typeid_metagroupid[typeid] != 2:
toremove.add(typeid)
for id in toremove:
typeswithattr.remove(id)
print("Attributes:")
for attr in sorted(options.attr.split(",")):
print(attr)
print("")
# Compose group maps
# { groupid : set(typeid) }
map_groupid_typeid = {}
# { typeid : groupid }
map_typeid_groupid = {}
for typeid in typeswithattr:
groupid = 0
cursor.execute(QUERY_TYPEID_GROUPID, (typeid,))
for row in cursor:
groupid = row[0]
if not groupid in map_groupid_typeid:
map_groupid_typeid[groupid] = set()
map_groupid_typeid[groupid].add(typeid)
map_typeid_groupid[typeid] = groupid
# Category maps
# { categoryid : set(typeid) }
map_categoryid_typeid = {}
# { typeid : categoryid }
map_typeid_categoryid = {}
for typeid in typeswithattr:
categoryid = 0
cursor.execute(QUERY_GROUPID_CATEGORYID,
(map_typeid_groupid[typeid],))
for row in cursor:
categoryid = row[0]
if not categoryid in map_categoryid_typeid:
map_categoryid_typeid[categoryid] = set()
map_categoryid_typeid[categoryid].add(typeid)
map_typeid_categoryid[typeid] = categoryid
# { categoryid : set(groupid) }
map_categoryid_groupid = {}
# { groupid : categoryid }
map_groupid_categoryid = {}
for groupid in map_groupid_typeid:
categoryid = 0
cursor.execute(QUERY_GROUPID_CATEGORYID,
(groupid,))
for row in cursor:
categoryid = row[0]
if not categoryid in map_categoryid_groupid:
map_categoryid_groupid[categoryid] = set()
map_categoryid_groupid[categoryid].add(groupid)
map_groupid_categoryid[groupid] = categoryid
# Skill required maps
# { skillid : set(typeid) }
map_skillrq_typeid = {}
# { typeid : set(skillid) }
map_typeid_skillrq = {}
# list of items without skill requirements
set_typeid_noskillrq = set()
for typeid in typeswithattr:
map_typeid_skillrq[typeid] = set()
cursor.execute(QUERY_TYPEID_SKILLRQ, (typeid,))
no_rqs = True
for row in cursor:
no_rqs = False
skillid = row[0]
if not skillid in map_skillrq_typeid:
map_skillrq_typeid[skillid] = set()
map_skillrq_typeid[skillid].add(typeid)
map_typeid_skillrq[typeid].add(skillid)
if no_rqs:
set_typeid_noskillrq.add(typeid)
def gettypename(typeid):
typename = ""
cursor.execute(QUERY_TYPEID_TYPENAME, (typeid,))
for row in cursor:
typename = row[0]
return typename
def getgroupname(grpid):
grpname = ""
cursor.execute(QUERY_GROUPID_GROUPNAME, (grpid,))
for row in cursor:
grpname = row[0]
return grpname
def getcatname(catid):
catname = ""
cursor.execute(QUERY_CATEGORYID_CATEGORYNAME, (catid,))
for row in cursor:
catname = row[0]
return catname
if options.grp and options.srq:
# Set of items which are supposed to be affected
targetitems = set()
for groupid in global_groupids:
for srqid in global_skillrqids:
if groupid in map_groupid_typeid and srqid in map_skillrq_typeid:
targetitems.update(map_groupid_typeid[groupid].intersection(map_skillrq_typeid[srqid]))
targetitems_noskillrqs = targetitems.intersection(set_typeid_noskillrq)
# All skill requirements of items which are supposed to be affected
targetitems_skillrqs = set()
for itemid in targetitems:
targetitems_skillrqs.update(map_typeid_skillrq[itemid])
# Remove skill requirement supplied as argument to script
# we can use that argument when needed manually, and it
# covers all targetitems which we don't want to do with single skill
for srqid in global_skillrqids:
targetitems_skillrqs.remove(srqid)
if targetitems:
# Print items which are supposed to be affected
print("Affected items:")
for groupid in sorted(global_groupids, key=lambda grid: getgroupname(grid)):
targetitems_grp = targetitems.intersection(map_groupid_typeid[groupid])
print(" Items from {0} group:".format(getgroupname(groupid)))
# Cycle through all required skills
targetitems_skillrqs_withgiven = copy.deepcopy(targetitems_skillrqs)
for srqid in global_skillrqids:
targetitems_skillrqs_withgiven.add(srqid)
for skillrq in sorted(targetitems_skillrqs_withgiven, key=lambda sk: gettypename(sk)):
targetitems_grp_srq = targetitems_grp.intersection(map_skillrq_typeid[skillrq])
if targetitems_grp_srq:
print(" Items requiring {0} skill:".format(gettypename(skillrq)))
for item in sorted(targetitems_grp_srq, key=lambda item: gettypename(item)):
# If item has 3rd skill requirement (besides supplied as argument and
# included into header of current section), mention it
if len(map_typeid_skillrq[item]) in (2, 3):
otherskillrq = copy.deepcopy(map_typeid_skillrq[item])
otherskillrq.discard(skillrq)
print(" {0} ({1})".format(gettypename(item), ", ".join(sorted(gettypename(id) for id in otherskillrq))))
# Just print item names if there's only 1 skill requirement
elif len(map_typeid_skillrq[item]) == 1:
print(" {0}".format(gettypename(item)))
else:
print("WARNING: Bad things happened, we never should get here")
print("\nUnaffected items")
items_in_groups = set()
for groupid in global_groupids:
items_in_groups.update(map_groupid_typeid[groupid])
items_with_skillrqs = set()
for srqid in global_skillrqids:
items_with_skillrqs.update(map_skillrq_typeid[srqid])
# List items which do not belong to given group, but have given skill requirement
wskill = typeswithattr.intersection(items_with_skillrqs)
wogroup = typeswithattr.difference(items_in_groups)
nontarget_wskill_wogroup = wskill.intersection(wogroup)
if nontarget_wskill_wogroup:
print(" With {0} skill requirements, not belonging to {1} groups:".format(", ".join(sorted(gettypename(id) for id in global_skillrqids)), ", ".join(sorted(getgroupname(grid) for grid in global_groupids))))
for item in sorted(nontarget_wskill_wogroup, key=lambda item: gettypename(item)):
print(" {0}".format(gettypename(item)))
# List items which belong to given group, but do not have given skill requirement
woskill = typeswithattr.difference(items_with_skillrqs)
wgroup = typeswithattr.intersection(items_in_groups)
nontarget_woskill_wgroup = woskill.intersection(wgroup)
if nontarget_woskill_wgroup:
print(" Without {0} skill requirement, belonging to {1} group:".format(", ".join(sorted(gettypename(id) for id in global_skillrqids)), ", ".join(sorted(getgroupname(grid) for grid in global_groupids))))
for item in sorted(nontarget_woskill_wgroup, key=lambda item: gettypename(item)):
print(" {0}".format(gettypename(item)))
# If any of the above lists is missing, list all unaffected items
if not nontarget_wskill_wogroup or not nontarget_woskill_wgroup:
nontarget = typeswithattr.difference(items_in_groups)
for srqid in global_skillrqids:
nontarget.difference_update(map_skillrq_typeid[srqid])
if nontarget_wskill_wogroup:
nontarget.difference_update(nontarget_wskill_wogroup)
if nontarget_woskill_wgroup:
nontarget.difference_update(nontarget_woskill_wgroup)
nontarget_groups = set()
nontarget_cats = set()
print(" Plain list:")
for item in sorted(nontarget, key=lambda item: gettypename(item)):
nontarget_groups.add(map_typeid_groupid[item])
print(" {0} ({1})".format(gettypename(item), getgroupname(map_typeid_groupid[item])))
#print(" Groups:")
#for group in sorted(nontarget_groups, key=lambda grp: getgroupname(grp)):
# nontarget_cats.add(map_groupid_categoryid[group])
# print(" {0} ({1})".format(getgroupname(group), getcatname(map_groupid_categoryid[group])))
#print(" Categories:")
#for cat in sorted(nontarget_cats, key=lambda cat: getcatname(cat)):
# print(" {0}".format(getcatname(cat)))
elif options.grp:
# Set of items which are supposed to be affected
targetitems = set()
for groupid in global_groupids:
if groupid in map_groupid_typeid:
targetitems.update(map_groupid_typeid[groupid])
# All skill requirements of items which are supposed to be affected
targetitems_skillrqs = set()
for itemid in targetitems:
targetitems_skillrqs.update(map_typeid_skillrq[itemid])
targetitems_noskillrqs = targetitems.intersection(set_typeid_noskillrq)
if targetitems:
# Print items which are supposed to be affected
print("Affected items:")
for groupid in sorted(global_groupids, key=lambda grid: getgroupname(grid)):
print(" From {0} group:".format(getgroupname(groupid)))
targetitems_grp = targetitems.intersection(map_groupid_typeid[groupid])
targetitems_noskillrqs_grp = targetitems_noskillrqs.intersection(map_groupid_typeid[groupid])
# Cycle through all required skills
for skillrq in sorted(targetitems_skillrqs, key=lambda sk: gettypename(sk)):
items_grpsrq = targetitems_grp.intersection(map_skillrq_typeid[skillrq])
if items_grpsrq:
print(" Requiring {0} skill:".format(gettypename(skillrq)))
for item in sorted(items_grpsrq, key=lambda item: gettypename(item)):
# If item has other skill requirements, print them
if len(map_typeid_skillrq[item]) == 3 or len(map_typeid_skillrq[item]) == 2:
otherskillrq = copy.deepcopy(map_typeid_skillrq[item])
otherskillrq.discard(skillrq)
print(" {0} ({1})".format(gettypename(item), ", ".join(sorted(gettypename(id) for id in otherskillrq))))
# Just print item names if there're only 2 skill requirements
elif len(map_typeid_skillrq[item]) == 1:
print(" {0}".format(gettypename(item)))
else:
print("WARNING: Bad things happened, we never should get here")
if targetitems_noskillrqs:
print(" Requiring no skills:")
for item in sorted(targetitems_noskillrqs_grp, key=lambda item: gettypename(item)):
print(" {0}".format(gettypename(item)))
print("\nUnaffected items")
# List items which are supposed to be unaffected
nontarget = typeswithattr.difference(targetitems)
nontarget_groups = set()
nontarget_cats = set()
print(" Not belonging to groups {0}:".format(", ".join(getgroupname(id) for id in global_groupids)))
removeitms = set()
# Check 1 unaffected item with each skill requirement, if some items with it were affected
for skillrq in sorted(targetitems_skillrqs, key=lambda srq: gettypename(srq)):
if nontarget.intersection(map_skillrq_typeid[skillrq]):
print(" With {0} skill requirement:".format(gettypename(skillrq)))
for item in sorted(nontarget.intersection(map_skillrq_typeid[skillrq]), key=lambda item: gettypename(item)):
print(" {0}".format(gettypename(item)))
removeitms.update(map_skillrq_typeid[skillrq])
nontarget.difference_update(removeitms)
print(" With other or no skill requirements:")
for item in sorted(nontarget, key=lambda item: gettypename(item)):
nontarget_groups.add(map_typeid_groupid[item])
print(" {0} ({1})".format(gettypename(item), getgroupname(map_typeid_groupid[item])))
#print(" Groups:")
#for group in sorted(nontarget_groups, key=lambda grp: getgroupname(grp)):
# nontarget_cats.add(map_groupid_categoryid[group])
# print(" {0} ({1})".format(getgroupname(group), getcatname(map_groupid_categoryid[group])))
#print(" Categories:")
#for cat in sorted(nontarget_cats, key=lambda cat: getcatname(cat)):
# print(" {0}".format(getcatname(cat)))
elif options.srq:
# Set of items which are supposed to be affected
targetitems = set()
for srqid in global_skillrqids:
if srqid in map_skillrq_typeid:
targetitems.update(map_skillrq_typeid[srqid])
# All groups of items which are supposed to be affected
targetitems_groups = set()
targetitems_srqs = set()
targetitems_cats = set()
for itemid in targetitems:
targetitems_groups.add(map_typeid_groupid[itemid])
targetitems_srqs.update(map_typeid_skillrq[itemid])
targetitems_cats.add(map_typeid_categoryid[itemid])
if targetitems:
# Print items which are supposed to be affected
print("Affected items:")
for srqid in sorted(global_skillrqids, key=lambda itm: gettypename(itm)):
print(" With {0} skill requirements:".format(gettypename(srqid)))
targetitems_srq = targetitems.intersection(map_skillrq_typeid[srqid])
targetitems_srq_groups = set()
targetitems_srq_cats = set()
for itemid in targetitems_srq:
targetitems_srq_groups.add(map_typeid_groupid[itemid])
targetitems_srq_cats.add(map_typeid_categoryid[itemid])
# Cycle through groups
for groupid in sorted(targetitems_srq_groups, key=lambda grp: getgroupname(grp)):
print(" From {0} group:".format(getgroupname(groupid)))
for item in sorted(targetitems_srq.intersection(map_groupid_typeid[groupid]), key=lambda item: gettypename(item)):
print(" {0} ({1})".format(gettypename(item), ", ".join(sorted(gettypename(itm) for itm in map_typeid_skillrq[item].difference(global_skillrqids))) or "None"))
print("\nUnaffected items")
# List items which are supposed to be unaffected
nontarget = typeswithattr.difference(targetitems)
nontarget_groups = set()
nontarget_cats = set()
print(" Without {0} skills requirement:".format(", ".join(gettypename(id) for id in global_skillrqids)))
removeitms = set()
# Check 1 unaffected item from each group where some items were affected
for groupid in sorted(targetitems_groups, key=lambda grp: getgroupname(grp)):
if nontarget.intersection(map_groupid_typeid[groupid]):
print(" From {0} group:".format(getgroupname(groupid)))
for skillrqid in sorted(targetitems_srqs.difference(global_skillrqids), key=lambda srq: gettypename(srq)):
itmset = nontarget.intersection(map_groupid_typeid[groupid]).intersection(map_skillrq_typeid[skillrqid])
if itmset:
print(" Items with {0} skill requirement:".format(gettypename(skillrqid)))
for item in sorted(itmset, key=lambda itm: gettypename(itm)):
otherskrqs = map_typeid_skillrq[item].difference(global_skillrqids)
otherskrqs.remove(skillrqid)
print(" {0} ({1})".format(gettypename(item), ", ".join(sorted(gettypename(itm) for itm in otherskrqs)) or "None"))
removeitms.update(itmset)
nontarget.difference_update(removeitms)
otsk = nontarget.intersection(map_groupid_typeid[groupid]).difference(set_typeid_noskillrq)
if otsk:
print(" Items with other skill requirements:")
for item in sorted(otsk, key=lambda itm: gettypename(itm)):
print(" {0} (None)".format(gettypename(item)))
removeitms.update(otsk)
nosk = nontarget.intersection(map_groupid_typeid[groupid]).intersection(set_typeid_noskillrq)
if nosk:
print(" Items with no skill requirement:")
for item in sorted(nosk, key=lambda itm: gettypename(itm)):
print(" {0} (None)".format(gettypename(item)))
removeitms.update(nosk)
nontarget.difference_update(removeitms)
for catid in sorted(targetitems_cats, key=lambda cat: getcatname(cat)):
if nontarget.intersection(map_categoryid_typeid[catid]):
print(" From {0} category:".format(getcatname(catid)))
for item in sorted(nontarget.intersection(map_categoryid_typeid[catid]), key=lambda item: gettypename(item)):
print(" {0}".format(gettypename(item)))
removeitms.update(map_categoryid_typeid[catid])
nontarget.difference_update(removeitms)
if nontarget:
# Check any other unaffected item
print(" Remaining items:")
for item in sorted(nontarget, key=lambda item: gettypename(item)):
nontarget_groups.add(map_typeid_groupid[item])
print(" {0} ({1})".format(gettypename(item), getgroupname(map_typeid_groupid[item])))
#print(" Groups:")
#for group in sorted(nontarget_groups, key=lambda grp: getgroupname(grp)):
# nontarget_cats.add(map_groupid_categoryid[group])
# print(" {0} ({1})".format(getgroupname(group), getcatname(map_groupid_categoryid[group])))
#print(" Categories:")
#for cat in sorted(nontarget_cats, key=lambda cat: getcatname(cat)):
# print(" {0}".format(getcatname(cat)))
else:
print("Affected items")
targetitems = typeswithattr
targetitems_groups = set()
targetitems_cats = set()
print(" Assumed set of items:")
for item in sorted(targetitems, key=lambda item: gettypename(item)):
targetitems_groups.add(map_typeid_groupid[item])
print(" {0} ({1})".format(gettypename(item), getgroupname(map_typeid_groupid[item])))
print(" Groups:")
for group in sorted(targetitems_groups, key=lambda grp: getgroupname(grp)):
targetitems_cats.add(map_groupid_categoryid[group])
print(" {0} ({1})".format(getgroupname(group), getcatname(map_groupid_categoryid[group])))
print(" Categories:")
for cat in sorted(targetitems_cats, key=lambda cat: getcatname(cat)):
print(" {0}".format(getcatname(cat)))