Merge branch 'master' into singularity

This commit is contained in:
DarkPhoenix
2018-08-21 19:40:39 +03:00
1963 changed files with 2602 additions and 523 deletions

View File

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python2.7
"""
This script will generate a dynamicItemAttributes.json file using res files
"""
import argparse
import os
import re
import sqlite3
import json
from PIL import Image
from shutil import copyfile
parser = argparse.ArgumentParser(description='This script updates module icons for pyfa')
parser.add_argument('-e', '--eve', required=True, type=str, help='path to eve\'s ')
parser.add_argument('-s', '--server', required=False, default='tq', type=str, help='which server to use (defaults to tq)')
args = parser.parse_args()
LOADER_FILE = 'app:/bin/dynamicItemAttributesLoader.pyd'
RES_FILE = 'res:/staticdata/dynamicitemattributes.fsdbinary'
binaryfile = os.path.split(RES_FILE)[1]
eve_path = os.path.join(args.eve, 'index_{}.txt'.format(args.server))
with open(eve_path, 'r') as f:
lines = f.readlines()
file_index = {x.split(',')[0]: x.split(',') for x in lines}
resfileindex = file_index['app:/resfileindex.txt']
res_cache = os.path.join(args.eve, 'ResFiles')
with open(os.path.join(res_cache, resfileindex[1]), 'r') as f:
lines = f.readlines()
res_index = {x.split(',')[0].lower(): x.split(',') for x in lines}
# Need to copy the file to our cuirrent directory
attribute_loader_file = os.path.join(res_cache, file_index[LOADER_FILE][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(attribute_loader_file, os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.split(LOADER_FILE)[1]))
# The loader expect it to be the correct filename, so copy trhe file as well
dynattribute_file = os.path.join(res_cache, res_index[RES_FILE.lower()][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(dynattribute_file, os.path.join(os.path.dirname(os.path.abspath(__file__)), binaryfile))
import dynamicItemAttributesLoader
attributes = dynamicItemAttributesLoader.load(os.path.join(to_path, binaryfile))
attributes_obj = {}
# convert top level to dict
attributes = dict(attributes)
# This is such a brute force method. todo: recursively generate this by inspecting the objects
for k, v in attributes.items():
attributes_obj[k] = {
'attributeIDs': dict(v.attributeIDs),
'inputOutputMapping': list(v.inputOutputMapping)
}
for i, x in enumerate(v.inputOutputMapping):
attributes_obj[k]['inputOutputMapping'][i] = {
'resultingType': x.resultingType,
'applicableTypes': list(x.applicableTypes)
}
for k2, v2 in v.attributeIDs.items():
attributes_obj[k]['attributeIDs'][k2] = {
'min': v2.min,
'max': v2.max
}
with open('dynamicattributes.json', 'w') as outfile:
json.dump(attributes_obj, outfile)

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2.7
"""
This script updates only market/item icons.
@@ -9,50 +9,66 @@ import argparse
import os
import re
import sqlite3
import json
from PIL import Image
from shutil import copyfile
parser = argparse.ArgumentParser(description='This script updates module icons for pyfa')
parser.add_argument('-i', '--icons', required=True, type=str, help='path to unpacked Icons folder from CCP\'s image export')
parser.add_argument('-e', '--eve', required=True, type=str, help='path to eve\'s ')
parser.add_argument('-s', '--server', required=False, default='tq', type=str, help='which server to use (defaults to tq)')
parser.add_argument('-i', '--icons', required=True, type=str, help='Path to icons .json')
args = parser.parse_args()
script_dir = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.abspath(os.path.join(script_dir, '..', 'eve.db'))
icons_dir = os.path.abspath(os.path.join(script_dir, '..', 'imgs', 'icons'))
export_dir = os.path.abspath(os.path.expanduser(os.path.join(args.icons, 'items')))
dirs = [export_dir, os.path.join(export_dir, "Modules")]
render_dir = os.path.abspath(os.path.join(script_dir, '..', 'imgs', 'renders'))
db = sqlite3.connect(db_path)
cursor = db.cursor()
ICON_SIZE = (16, 16)
RENDER_SIZE = (32, 32)
ITEM_CATEGORIES = (
2, # Celestial
6, # Ship
7, # Module
8, # Charge
16, # Skill
18, # Drone
20, # Implant
32, # Subsystem
66, # Structure Module
)
with open(args.icons, 'r') as f:
icon_json = json.load(f)
MARKET_ROOTS = {
9, # Modules
1111, # Rigs
157, # Drones
11, # Ammo
1112, # Subsystems
24, # Implants & Boosters
404, # Deployables
2202, # Structure Equipment
2203 # Structure Modifications (Rigs)
}
eve_path = os.path.join(args.eve, 'index_{}.txt'.format(args.server))
with open(eve_path, 'r') as f:
lines = f.readlines()
file_index = {x.split(',')[0]: x.split(',') for x in lines}
resfileindex = file_index['app:/resfileindex.txt']
res_cache = os.path.join(args.eve, 'ResFiles')
with open(os.path.join(res_cache, resfileindex[1]), 'r') as f:
lines = f.readlines()
res_index = {x.split(',')[0].lower(): x.split(',') for x in lines}
# Need to copy the file to our cuirrent directory
graphics_loader_file = os.path.join(res_cache, file_index['app:/bin/graphicIDsLoader.pyd'][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(graphics_loader_file, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'graphicIDsLoader.pyd'))
# The loader expect it to be the correct filename, so copy trhe file as well
graphics_file = os.path.join(res_cache, res_index['res:/staticdata/graphicIDs.fsdbinary'.lower()][1])
to_path = os.path.dirname(os.path.abspath(__file__))
copyfile(graphics_file, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'graphicIDs.fsdbinary'))
import graphicIDsLoader
print(dir(graphicIDsLoader))
graphics = graphicIDsLoader.load(os.path.join(to_path, 'graphicIDs.fsdbinary'))
graphics_py_ob = {}
for x, v in graphics.items():
if (hasattr(v, 'iconFolder')):
graphics_py_ob[x] = v.iconFolder
# Add children to market group list
# {parent: {children}}
@@ -75,101 +91,35 @@ def get_children(parent):
return children
market_groups = set()
for root in MARKET_ROOTS:
market_groups.add(root)
market_groups.update(get_children(root))
query_items = 'select distinct i.iconFile from icons as i inner join invtypes as it on it.iconID = i.iconID inner join invgroups as ig on it.groupID = ig.groupID where ig.categoryID in ({})'.format(', '.join(str(i) for i in ITEM_CATEGORIES))
query_groups = 'select distinct i.iconFile from icons as i inner join invgroups as ig on ig.iconID = i.iconID where ig.categoryID in ({})'.format(', '.join(str(i) for i in ITEM_CATEGORIES))
query_cats = 'select distinct i.iconFile from icons as i inner join invcategories as ic on ic.iconID = i.iconID where ic.categoryID in ({})'.format(', '.join(str(i) for i in ITEM_CATEGORIES))
query_market = 'select distinct i.iconFile from icons as i inner join invmarketgroups as img on img.iconID = i.iconID where img.marketGroupID in ({})'.format(', '.join(str(i) for i in market_groups))
query_attrib = 'select distinct i.iconFile from icons as i inner join dgmattribs as da on da.iconID = i.iconID'
query_items = 'select distinct iconID from invtypes'
query_groups = 'select distinct iconID from invgroups'
query_cats = 'select distinct iconID from invcategories'
query_market = 'select distinct iconID from invmarketgroups'
query_attrib = 'select distinct iconID from dgmattribs'
query_ships = 'select it.graphicID from invtypes as it inner join invgroups as ig on it.groupID = ig.groupID where ig.categoryID in (6, 65)'
needed = set()
existing = set()
export = {}
def strip_path(fname):
"""
Here we extract 'core' of icon name. Path and
extension are sometimes specified in database
but we don't need them.
"""
# Path before the icon file name
fname = fname.split('/')[-1]
# Extension
fname = fname.rsplit('.', 1)[0]
return fname
def unzero(fname):
"""
Get rid of leading zeros in triplet. They are often specified in DB
but almost never in actual files.
"""
m = re.match(r'^(?P<prefix>[^_\.]+)_((?P<size>\d+)_)?(?P<suffix>[^_\.]+)(?P<tail>\..*)?$', fname)
if m:
prefix = m.group('prefix')
size = m.group('size')
suffix = m.group('suffix')
tail = m.group('tail')
try:
prefix = int(prefix)
except (TypeError, ValueError):
pass
try:
size = int(size)
except (TypeError, ValueError):
pass
try:
suffix = int(suffix)
except (TypeError, ValueError):
pass
if size is None:
fname = '{}_{}'.format(prefix, suffix)
else:
fname = '{}_{}_{}'.format(prefix, size, suffix)
return fname
else:
return fname
# Get a list of needed icons based on the items / attributes / etc from the database
for query in (query_items, query_groups, query_cats, query_market, query_attrib):
for row in cursor.execute(query):
fname = row[0]
if not fname:
if fname is None:
continue
fname = strip_path(fname)
needed.add(fname)
# Get a list of all the icons we currently have
for fname in os.listdir(icons_dir):
if not os.path.isfile(os.path.join(icons_dir, fname)):
continue
fname = strip_path(fname)
fname = os.path.splitext(fname)[0]
# Get rid of "icon" prefix as well
#fname = re.sub('^icon', '', fname)
print(fname,"exists")
existing.add(fname)
# Get a list of all the icons currently available in export
for dir in dirs:
for fname in os.listdir(dir):
if not os.path.isfile(os.path.join(dir, fname)):
continue
stripped = strip_path(fname)
stripped = unzero(stripped)
# Icons in export often specify size in their name, but references often use
# convention without size specification
sizeless = re.sub('^(?P<prefix>[^_]+)_(?P<size>\d+)_(?P<suffix>[^_]+)$', r'\1_\3', stripped)
# Often items referred to with 01_01 format,
fnames = export.setdefault(sizeless.lower(), set())
fnames.add(fname)
def crop_image(img):
w, h = img.size
if h == w:
@@ -185,38 +135,25 @@ def crop_image(img):
return img.crop(box)
def get_icon_file(request):
def get_icon_file(res_path, size):
"""
Get the iconFile field value and find proper
icon for it. Return as PIL image object down-
scaled for use in pyfa.
"""
rq = strip_path(request)
rq = unzero(rq)
try:
fnames = export[rq]
except KeyError:
if res_path not in res_index:
return None
# {(h, w): source full path}
sizes = {}
for dir in dirs:
for fname in fnames:
fullpath = os.path.join(dir, fname)
if not os.path.isfile(fullpath):
continue
img = Image.open(fullpath)
sizes[img.size] = fullpath
# Try to return image which is already in necessary format
try:
fullpath = sizes[ICON_SIZE]
# Otherwise, convert biggest image
except KeyError:
fullpath = sizes[max(sizes)]
img = Image.open(fullpath)
img = crop_image(img)
img.thumbnail(ICON_SIZE, Image.ANTIALIAS)
else:
img = Image.open(fullpath)
res_icon = res_index[res_path]
icon_path = res_icon[1]
fullpath = os.path.join(res_cache, icon_path)
if not os.path.isfile(fullpath):
return None
img = Image.open(fullpath)
img = crop_image(img)
img.thumbnail(size, Image.ANTIALIAS)
# Strip all additional image info (mostly for ICC color
# profiles, see issue #337)
img.info.clear()
@@ -232,7 +169,6 @@ if toremove:
print('Some icons are not used and will be removed:')
for fname in sorted(toremove):
fullname = '{}.png'.format(fname)
print((' {}'.format(fullname)))
fullpath = os.path.join(icons_dir, fullname)
os.remove(fullpath)
@@ -256,7 +192,9 @@ if toadd:
print(('Adding {} icons...'.format(len(toadd))))
missing = set()
for fname in sorted(toadd):
icon = get_icon_file(fname)
icon = icon_json[str(fname)]
key = icon['iconFile'].lower()
icon = get_icon_file(key, ICON_SIZE)
if icon is None:
missing.add(fname)
continue
@@ -267,3 +205,36 @@ if toadd:
print((' {} icons are missing in export:'.format(len(missing))))
for fname in sorted(missing):
print((' {}'.format(fname)))
print(missing)
print("Doing renders")
needed.clear()
existing.clear()
toremove.clear()
for row in cursor.execute(query_ships):
needed.add(row[0])
toremove = existing.difference(needed)
toupdate = existing.intersection(needed)
toadd = needed.difference(existing)
if toadd:
print(('Adding {} icons...'.format(len(toadd))))
missing = set()
for fname in sorted(toadd):
icon = graphics_py_ob[int(fname)]
icon = "{}/{}_64.png".format(icon, fname)
icon = get_icon_file(icon, RENDER_SIZE)
if icon is None:
missing.add(fname)
continue
fullname = '{}.png'.format(fname)
fullpath = os.path.join(render_dir, fullname)
icon.save(fullpath, 'png')
if missing:
print((' {} icons are missing in export:'.format(len(missing))))
for fname in sorted(missing):
print((' {}'.format(fname)))

View File

@@ -216,7 +216,7 @@ def main(old, new, groups=True, effects=True, attributes=True, renames=True):
# Initialize container for the data for each item with empty stuff besides groupID
dictionary[itemid] = [groupID, set(), {}]
# Add items filtered by group
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon", "Ship Modifiers", "Mutaplasmids", "MassiveEnvironments", "Uninteractable Localized Effect Beacon", "Non-Interactable Object")'
query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon", "Ship Modifiers", "Mutaplasmids", "MassiveEnvironments", "Abyssal Hazards", "Non-Interactable Object")'
cursor.execute(query)
for row in cursor:
itemid = row[0]

View File

@@ -25,11 +25,19 @@ import re
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(__file__)
<<<<<<< HEAD
sys.path.insert(0, os.path.realpath(os.path.join(path, '..')))
=======
sys.path.insert(0, os.path.realpath(os.path.join(path, "..")))
>>>>>>> master
import json
import argparse
CATEGORIES_TO_REMOVE = [
30 # Apparel
]
def main(db, json_path):
if os.path.isfile(db):
os.remove(db)
@@ -50,6 +58,7 @@ def main(db, json_path):
# Config dict
tables = {
<<<<<<< HEAD
'clonegrades': eos.gamedata.AlphaCloneSkill,
'dgmattribs': eos.gamedata.AttributeInfo,
'dgmeffects': eos.gamedata.Effect,
@@ -65,6 +74,22 @@ def main(db, json_path):
'phbtraits': eos.gamedata.Traits,
'phbmetadata': eos.gamedata.MetaData,
'mapbulk_marketGroups': eos.gamedata.MarketGroup,
=======
"clonegrades": eos.gamedata.AlphaCloneSkill,
"dgmattribs": eos.gamedata.AttributeInfo,
"dgmeffects": eos.gamedata.Effect,
"dgmtypeattribs": eos.gamedata.Attribute,
"dgmtypeeffects": eos.gamedata.ItemEffect,
"dgmunits": eos.gamedata.Unit,
"evecategories": eos.gamedata.Category,
"evegroups": eos.gamedata.Group,
"invmetagroups": eos.gamedata.MetaGroup,
"invmetatypes": eos.gamedata.MetaType,
"evetypes": eos.gamedata.Item,
"phbtraits": eos.gamedata.Traits,
"phbmetadata": eos.gamedata.MetaData,
"mapbulk_marketGroups": eos.gamedata.MarketGroup,
>>>>>>> master
}
fieldMapping = {
@@ -184,7 +209,11 @@ def main(db, json_path):
tableData = convertIcons(tableData)
if jsonName == 'phbtraits':
tableData = convertTraits(tableData)
<<<<<<< HEAD
if jsonName == 'clonegrades':
=======
if jsonName == "clonegrades":
>>>>>>> master
tableData = convertClones(tableData)
data[jsonName] = tableData
@@ -252,6 +281,29 @@ def main(db, json_path):
eos.db.gamedata_session.add(instance)
# quick and dirty hack to get this data in
with open(os.path.join(jsonPath, "dynamicAttributes.json"), encoding="utf-8") as f:
bulkdata = json.load(f)
for mutaID, data in bulkdata.items():
muta = eos.gamedata.DynamicItem()
muta.typeID = mutaID
muta.resultingTypeID = data['inputOutputMapping'][0]['resultingType']
eos.db.gamedata_session.add(muta)
for x in data['inputOutputMapping'][0]['applicableTypes']:
item = eos.gamedata.DynamicItemItem()
item.typeID = mutaID
item.applicableTypeID = x
eos.db.gamedata_session.add(item)
for attrID, attrData in data['attributeIDs'].items():
attr = eos.gamedata.DynamicItemAttribute()
attr.typeID = mutaID
attr.attributeID = attrID
attr.min = attrData['min']
attr.max = attrData['max']
eos.db.gamedata_session.add(attr)
eos.db.gamedata_session.commit()
# CCP still has 5 subsystems assigned to T3Cs, even though only 4 are available / usable. They probably have some
@@ -259,8 +311,23 @@ def main(db, json_path):
# pyfa, we can do it here as a post-processing step
eos.db.gamedata_engine.execute('UPDATE dgmtypeattribs SET value = 4.0 WHERE attributeID = ?', (1367,))
<<<<<<< HEAD
eos.db.gamedata_engine.execute('UPDATE invtypes SET published = 0 WHERE typeName LIKE '%abyssal%'')
print('done')
=======
eos.db.gamedata_engine.execute("UPDATE invtypes SET published = 0 WHERE typeName LIKE '%abyssal%'")
print()
for x in CATEGORIES_TO_REMOVE:
cat = eos.db.gamedata_session.query(eos.gamedata.Category).filter(eos.gamedata.Category.ID == x).first()
print ("Removing Category: {}".format(cat.name))
eos.db.gamedata_session.delete(cat)
eos.db.gamedata_session.commit()
eos.db.gamedata_engine.execute("VACUUM")
print("done")
>>>>>>> master
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This scripts dumps effects from an sqlite cache dump to mongo')
@@ -269,3 +336,4 @@ if __name__ == '__main__':
args = parser.parse_args()
main(args.db, args.json)

View File

@@ -9,14 +9,14 @@ import json
iconDict = {}
stream = open(r"C:\Users\Ryan\Sync\Git\blitzmann\Pyfa\scripts\iconIDs.yaml", "r")
stream = open('iconIDs.yaml', 'r')
docs = yaml.load_all(stream)
for doc in docs:
for k,v in list(doc.items()):
iconDict[str(k)] = {"iconFile": v['iconFile']}
iconDict[str(k)] = {'iconFile': v['iconFile']}
with open('icons.json', 'w') as outfile:
json.dump(iconDict, outfile)
print("done")
print('done')