Merge branch 'master' into singularity

This commit is contained in:
DarkPhoenix
2014-08-26 22:47:50 +04:00
29 changed files with 703 additions and 566 deletions

View File

@@ -2,5 +2,9 @@
# Modules from group: ECM (44 of 44)
# Drones named like: EC (3 of 3)
type = "projected", "active"
def handler(fit, container, context):
pass
def handler(fit, module, context):
if "projected" in context:
# jam formula: 1 - (1- (jammer str/ship str))^(# of jam mods with same str))
strModifier = 1 - module.getModifiedItemAttr("scan{0}StrengthBonus".format(fit.scanType))/fit.scanStrength
fit.ecmProjectedStr *= strModifier

View File

@@ -3,7 +3,8 @@
# Modules from group: ECM Burst (7 of 7)
type = "overheat"
def handler(fit, module, context):
for scanType in ("Gravimetric", "Magnetometric", "Radar", "Ladar"):
module.boostItemAttr("scan{0}StrengthBonus".format(scanType),
module.getModifiedItemAttr("overloadECMStrengthBonus"),
stackingPenalties = True)
if "projected" not in context:
for scanType in ("Gravimetric", "Magnetometric", "Radar", "Ladar"):
module.boostItemAttr("scan{0}StrengthBonus".format(scanType),
module.getModifiedItemAttr("overloadECMStrengthBonus"),
stackingPenalties = True)

View File

@@ -1,884 +0,0 @@
#-----------------------------------------------------------------------------
# eveapi - EVE Online API access
#
# Copyright (c)2007 Jamie "Entity" van den Berge <entity@vapor.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE
#
#-----------------------------------------------------------------------------
# Version: 1.1.9-2 - 30 September 2011
# - merge workaround provided by Entity to make it work with http proxies
# Version: 1.1.9 - 2 September 2011
# - added workaround for row tags with attributes that were not defined
# in their rowset (this should fix AssetList)
#
# Version: 1.1.8 - 1 September 2011
# - fix for inconsistent columns attribute in rowsets.
#
# Version: 1.1.7 - 1 September 2011
# - auth() method updated to work with the new authentication scheme.
#
# Version: 1.1.6 - 27 May 2011
# - Now supports composite keys for IndexRowsets.
# - Fixed calls not working if a path was specified in the root url.
#
# Version: 1.1.5 - 27 Januari 2011
# - Now supports (and defaults to) HTTPS. Non-SSL proxies will still work by
# explicitly specifying http:// in the url.
#
# Version: 1.1.4 - 1 December 2010
# - Empty explicit CDATA tags are now properly handled.
# - _autocast now receives the name of the variable it's trying to typecast,
# enabling custom/future casting functions to make smarter decisions.
#
# Version: 1.1.3 - 6 November 2010
# - Added support for anonymous CDATA inside row tags. This makes the body of
# mails in the rows of char/MailBodies available through the .data attribute.
#
# Version: 1.1.2 - 2 July 2010
# - Fixed __str__ on row objects to work properly with unicode strings.
#
# Version: 1.1.1 - 10 Januari 2010
# - Fixed bug that causes nested tags to not appear in rows of rowsets created
# from normal Elements. This should fix the corp.MemberSecurity method,
# which now returns all data for members. [jehed]
#
# Version: 1.1.0 - 15 Januari 2009
# - Added Select() method to Rowset class. Using it avoids the creation of
# temporary row instances, speeding up iteration considerably.
# - Added ParseXML() function, which can be passed arbitrary API XML file or
# string objects.
# - Added support for proxy servers. A proxy can be specified globally or
# per api connection instance. [suggestion by graalman]
# - Some minor refactoring.
# - Fixed deprecation warning when using Python 2.6.
#
# Version: 1.0.7 - 14 November 2008
# - Added workaround for rowsets that are missing the (required!) columns
# attribute. If missing, it will use the columns found in the first row.
# Note that this is will still break when expecting columns, if the rowset
# is empty. [Flux/Entity]
#
# Version: 1.0.6 - 18 July 2008
# - Enabled expat text buffering to avoid content breaking up. [BigWhale]
#
# Version: 1.0.5 - 03 February 2008
# - Added workaround to make broken XML responses (like the "row:name" bug in
# eve/CharacterID) work as intended.
# - Bogus datestamps before the epoch in XML responses are now set to 0 to
# avoid breaking certain date/time functions. [Anathema Matou]
#
# Version: 1.0.4 - 23 December 2007
# - Changed _autocast() to use timegm() instead of mktime(). [Invisible Hand]
# - Fixed missing attributes of elements inside rows. [Elandra Tenari]
#
# Version: 1.0.3 - 13 December 2007
# - Fixed keyless columns bugging out the parser (in CorporationSheet for ex.)
#
# Version: 1.0.2 - 12 December 2007
# - Fixed parser not working with indented XML.
#
# Version: 1.0.1
# - Some micro optimizations
#
# Version: 1.0
# - Initial release
#
# Requirements:
# Python 2.4+
#
#-----------------------------------------------------------------------------
import httplib
import urlparse
import urllib
import copy
from xml.parsers import expat
from time import strptime
from calendar import timegm
proxy = None
#-----------------------------------------------------------------------------
class Error(StandardError):
def __init__(self, code, message):
self.code = code
self.args = (message.rstrip("."),)
def EVEAPIConnection(url="api.eveonline.com", cacheHandler=None, proxy=None):
# Creates an API object through which you can call remote functions.
#
# The following optional arguments may be provided:
#
# url - root location of the EVEAPI server
#
# proxy - (host,port) specifying a proxy server through which to request
# the API pages. Specifying a proxy overrides default proxy.
#
# cacheHandler - an object which must support the following interface:
#
# retrieve(host, path, params)
#
# Called when eveapi wants to fetch a document.
# host is the address of the server, path is the full path to
# the requested document, and params is a dict containing the
# parameters passed to this api call (keyID, vCode, etc).
# The method MUST return one of the following types:
#
# None - if your cache did not contain this entry
# str/unicode - eveapi will parse this as XML
# Element - previously stored object as provided to store()
# file-like object - eveapi will read() XML from the stream.
#
# store(host, path, params, doc, obj)
#
# Called when eveapi wants you to cache this item.
# You can use obj to get the info about the object (cachedUntil
# and currentTime, etc) doc is the XML document the object
# was generated from. It's generally best to cache the XML, not
# the object, unless you pickle the object. Note that this method
# will only be called if you returned None in the retrieve() for
# this object.
#
if not url.startswith("http"):
url = "https://" + url
p = urlparse.urlparse(url, "https")
if p.path and p.path[-1] == "/":
p.path = p.path[:-1]
ctx = _RootContext(None, p.path, {}, {})
ctx._handler = cacheHandler
ctx._scheme = p.scheme
ctx._host = p.netloc
ctx._proxy = proxy or globals()["proxy"]
return ctx
def ParseXML(file_or_string):
try:
return _ParseXML(file_or_string, False, None)
except TypeError:
raise TypeError("XML data must be provided as string or file-like object")
def _ParseXML(response, fromContext, storeFunc):
# pre/post-process XML or Element data
if fromContext and isinstance(response, Element):
obj = response
elif type(response) in (str, unicode):
obj = _Parser().Parse(response, False)
elif hasattr(response, "read"):
obj = _Parser().Parse(response, True)
else:
raise TypeError("retrieve method must return None, string, file-like object or an Element instance")
error = getattr(obj, "error", False)
if error:
raise Error(error.code, error.data)
result = getattr(obj, "result", False)
if not result:
raise RuntimeError("API object does not contain result")
if fromContext and storeFunc:
# call the cache handler to store this object
storeFunc(obj)
# make metadata available to caller somehow
result._meta = obj
return result
#-----------------------------------------------------------------------------
# API Classes
#-----------------------------------------------------------------------------
_listtypes = (list, tuple, dict)
_unspecified = []
class _Context(object):
def __init__(self, root, path, parentDict, newKeywords=None):
self._root = root or self
self._path = path
if newKeywords:
if parentDict:
self.parameters = parentDict.copy()
else:
self.parameters = {}
self.parameters.update(newKeywords)
else:
self.parameters = parentDict or {}
def context(self, *args, **kw):
if kw or args:
path = self._path
if args:
path += "/" + "/".join(args)
return self.__class__(self._root, path, self.parameters, kw)
else:
return self
def __getattr__(self, this):
# perform arcane attribute majick trick
return _Context(self._root, self._path + "/" + this, self.parameters)
def __call__(self, **kw):
if kw:
# specified keywords override contextual ones
for k, v in self.parameters.iteritems():
if k not in kw:
kw[k] = v
else:
# no keywords provided, just update with contextual ones.
kw.update(self.parameters)
# now let the root context handle it further
return self._root(self._path, **kw)
class _AuthContext(_Context):
def character(self, characterID):
# returns a copy of this connection object but for every call made
# through it, it will add the folder "/char" to the url, and the
# characterID to the parameters passed.
return _Context(self._root, self._path + "/char", self.parameters, {"characterID":characterID})
def corporation(self, characterID):
# same as character except for the folder "/corp"
return _Context(self._root, self._path + "/corp", self.parameters, {"characterID":characterID})
class _RootContext(_Context):
def auth(self, **kw):
if len(kw) == 2 and (("keyID" in kw and "vCode" in kw) or ("userID" in kw and "apiKey" in kw)):
return _AuthContext(self._root, self._path, self.parameters, kw)
raise ValueError("Must specify keyID and vCode")
def setcachehandler(self, handler):
self._root._handler = handler
def __call__(self, path, **kw):
# convert list type arguments to something the API likes
for k, v in kw.iteritems():
if isinstance(v, _listtypes):
kw[k] = ','.join(map(str, list(v)))
cache = self._root._handler
# now send the request
path += ".xml.aspx"
if cache:
response = cache.retrieve(self._host, path, kw)
else:
response = None
if response is None:
if self._scheme == "https":
connectionclass = httplib.HTTPSConnection
else:
connectionclass = httplib.HTTPConnection
if self._proxy is None:
if self._scheme == "https":
connectionclass = httplib.HTTPSConnection
else:
connectionclass = httplib.HTTPConnection
http = connectionclass(self._host)
if kw:
http.request("POST", path, urllib.urlencode(kw), {"Content-type": "application/x-www-form-urlencoded"})
else:
http.request("GET", path)
else:
connectionclass = httplib.HTTPConnection
http = connectionclass(*self._proxy)
if kw:
http.request("POST", self._scheme+'://'+self._host+path, urllib.urlencode(kw), {"Content-type": "application/x-www-form-urlencoded"})
else:
http.request("GET", self._scheme+'://'+self._host+path)
response = http.getresponse()
if response.status != 200:
if response.status == httplib.NOT_FOUND:
raise AttributeError("'%s' not available on API server (404 Not Found)" % path)
else:
raise RuntimeError("'%s' request failed (%d %s)" % (path, response.status, response.reason))
if cache:
store = True
response = response.read()
else:
store = False
else:
store = False
retrieve_fallback = cache and getattr(cache, "retrieve_fallback", False)
if retrieve_fallback:
# implementor is handling fallbacks...
try:
return _ParseXML(response, True, store and (lambda obj: cache.store(self._host, path, kw, response, obj)))
except Error, reason:
response = retrieve_fallback(self._host, path, kw, reason=e)
if response is not None:
return response
raise
else:
# implementor is not handling fallbacks...
return _ParseXML(response, True, store and (lambda obj: cache.store(self._host, path, kw, response, obj)))
#-----------------------------------------------------------------------------
# XML Parser
#-----------------------------------------------------------------------------
def _autocast(key, value):
# attempts to cast an XML string to the most probable type.
try:
if value.strip("-").isdigit():
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if len(value) == 19 and value[10] == ' ':
# it could be a date string
try:
return max(0, int(timegm(strptime(value, "%Y-%m-%d %H:%M:%S"))))
except OverflowError:
pass
except ValueError:
pass
# couldn't cast. return string unchanged.
return value
class _Parser(object):
def Parse(self, data, isStream=False):
self.container = self.root = None
self._cdata = False
p = expat.ParserCreate()
p.StartElementHandler = self.tag_start
p.CharacterDataHandler = self.tag_cdata
p.StartCdataSectionHandler = self.tag_cdatasection_enter
p.EndCdataSectionHandler = self.tag_cdatasection_exit
p.EndElementHandler = self.tag_end
p.ordered_attributes = True
p.buffer_text = True
if isStream:
p.ParseFile(data)
else:
p.Parse(data, True)
return self.root
def tag_cdatasection_enter(self):
# encountered an explicit CDATA tag.
self._cdata = True
def tag_cdatasection_exit(self):
if self._cdata:
# explicit CDATA without actual data. expat doesn't seem
# to trigger an event for this case, so do it manually.
# (_cdata is set False by this call)
self.tag_cdata("")
else:
self._cdata = False
def tag_start(self, name, attributes):
# <hack>
# If there's a colon in the tag name, cut off the name from the colon
# onward. This is a workaround to make certain bugged XML responses
# (such as eve/CharacterID.xml.aspx) work.
if ":" in name:
name = name[:name.index(":")]
# </hack>
if name == "rowset":
# for rowsets, use the given name
try:
columns = attributes[attributes.index('columns')+1].replace(" ", "").split(",")
except ValueError:
# rowset did not have columns tag set (this is a bug in API)
# columns will be extracted from first row instead.
columns = []
try:
priKey = attributes[attributes.index('key')+1]
this = IndexRowset(cols=columns, key=priKey)
except ValueError:
this = Rowset(cols=columns)
this._name = attributes[attributes.index('name')+1]
this.__catch = "row" # tag to auto-add to rowset.
else:
this = Element()
this._name = name
this.__parent = self.container
if self.root is None:
# We're at the root. The first tag has to be "eveapi" or we can't
# really assume the rest of the xml is going to be what we expect.
if name != "eveapi":
raise RuntimeError("Invalid API response")
self.root = this
if isinstance(self.container, Rowset) and (self.container.__catch == this._name):
# <hack>
# - check for missing columns attribute (see above)
# - check for extra attributes that were not defined in the rowset,
# such as rawQuantity in the assets lists.
# In either case the tag is assumed to be correct and the rowset's
# columns are overwritten with the tag's version.
if not self.container._cols or (len(attributes)/2 > len(self.container._cols)):
self.container._cols = attributes[0::2]
# </hack>
self.container.append([_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)])
this._isrow = True
this._attributes = this._attributes2 = None
else:
this._isrow = False
this._attributes = attributes
this._attributes2 = []
self.container = this
def tag_cdata(self, data):
if self._cdata:
# unset cdata flag to indicate it's been handled.
self._cdata = False
else:
if data in ("\r\n", "\n") or data.strip() != data:
return
this = self.container
data = _autocast(this._name, data)
if this._isrow:
# sigh. anonymous data inside rows makes Entity cry.
# for the love of Jove, CCP, learn how to use rowsets.
parent = this.__parent
_row = parent._rows[-1]
_row.append(data)
if len(parent._cols) < len(_row):
parent._cols.append("data")
elif this._attributes:
# this tag has attributes, so we can't simply assign the cdata
# as an attribute to the parent tag, as we'll lose the current
# tag's attributes then. instead, we'll assign the data as
# attribute of this tag.
this.data = data
else:
# this was a simple <tag>data</tag> without attributes.
# we won't be doing anything with this actual tag so we can just
# bind it to its parent (done by __tag_end)
setattr(this.__parent, this._name, data)
def tag_end(self, name):
this = self.container
if this is self.root:
del this._attributes
#this.__dict__.pop("_attributes", None)
return
# we're done with current tag, so we can pop it off. This means that
# self.container will now point to the container of element 'this'.
self.container = this.__parent
del this.__parent
attributes = this.__dict__.pop("_attributes")
attributes2 = this.__dict__.pop("_attributes2")
if attributes is None:
# already processed this tag's closure early, in tag_start()
return
if self.container._isrow:
# Special case here. tags inside a row! Such tags have to be
# added as attributes of the row.
parent = self.container.__parent
# get the row line for this element from its parent rowset
_row = parent._rows[-1]
# add this tag's value to the end of the row
_row.append(getattr(self.container, this._name, this))
# fix columns if neccessary.
if len(parent._cols) < len(_row):
parent._cols.append(this._name)
else:
# see if there's already an attribute with this name (this shouldn't
# really happen, but it doesn't hurt to handle this case!
sibling = getattr(self.container, this._name, None)
if sibling is None:
self.container._attributes2.append(this._name)
setattr(self.container, this._name, this)
# Note: there aren't supposed to be any NON-rowset tags containing
# multiples of some tag or attribute. Code below handles this case.
elif isinstance(sibling, Rowset):
# its doppelganger is a rowset, append this as a row to that.
row = [_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)]
row.extend([getattr(this, col) for col in attributes2])
sibling.append(row)
elif isinstance(sibling, Element):
# parent attribute is an element. This means we're dealing
# with multiple of the same sub-tag. Change the attribute
# into a Rowset, adding the sibling element and this one.
rs = Rowset()
rs.__catch = rs._name = this._name
row = [_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)]+[getattr(this, col) for col in attributes2]
rs.append(row)
row = [getattr(sibling, attributes[i]) for i in xrange(0, len(attributes), 2)]+[getattr(sibling, col) for col in attributes2]
rs.append(row)
rs._cols = [attributes[i] for i in xrange(0, len(attributes), 2)]+[col for col in attributes2]
setattr(self.container, this._name, rs)
else:
# something else must have set this attribute already.
# (typically the <tag>data</tag> case in tag_data())
pass
# Now fix up the attributes and be done with it.
for i in xrange(0, len(attributes), 2):
this.__dict__[attributes[i]] = _autocast(attributes[i], attributes[i+1])
return
#-----------------------------------------------------------------------------
# XML Data Containers
#-----------------------------------------------------------------------------
# The following classes are the various container types the XML data is
# unpacked into.
#
# Note that objects returned by API calls are to be treated as read-only. This
# is not enforced, but you have been warned.
#-----------------------------------------------------------------------------
class Element(object):
# Element is a namespace for attributes and nested tags
def __str__(self):
return "<Element '%s'>" % self._name
_fmt = u"%s:%s".__mod__
class Row(object):
# A Row is a single database record associated with a Rowset.
# The fields in the record are accessed as attributes by their respective
# column name.
#
# To conserve resources, Row objects are only created on-demand. This is
# typically done by Rowsets (e.g. when iterating over the rowset).
def __init__(self, cols=None, row=None):
self._cols = cols or []
self._row = row or []
def __nonzero__(self):
return True
def __ne__(self, other):
return self.__cmp__(other)
def __eq__(self, other):
return self.__cmp__(other) == 0
def __cmp__(self, other):
if type(other) != type(self):
raise TypeError("Incompatible comparison type")
return cmp(self._cols, other._cols) or cmp(self._row, other._row)
def __getattr__(self, this):
try:
return self._row[self._cols.index(this)]
except:
raise AttributeError, this
def __getitem__(self, this):
return self._row[self._cols.index(this)]
def __str__(self):
return "Row(" + ','.join(map(_fmt, zip(self._cols, self._row))) + ")"
class Rowset(object):
# Rowsets are collections of Row objects.
#
# Rowsets support most of the list interface:
# iteration, indexing and slicing
#
# As well as the following methods:
#
# IndexedBy(column)
# Returns an IndexRowset keyed on given column. Requires the column to
# be usable as primary key.
#
# GroupedBy(column)
# Returns a FilterRowset keyed on given column. FilterRowset objects
# can be accessed like dicts. See FilterRowset class below.
#
# SortBy(column, reverse=True)
# Sorts rowset in-place on given column. for a descending sort,
# specify reversed=True.
#
# SortedBy(column, reverse=True)
# Same as SortBy, except this returns a new rowset object instead of
# sorting in-place.
#
# Select(columns, row=False)
# Yields a column values tuple (value, ...) for each row in the rowset.
# If only one column is requested, then just the column value is
# provided instead of the values tuple.
# When row=True, each result will be decorated with the entire row.
#
def IndexedBy(self, column):
return IndexRowset(self._cols, self._rows, column)
def GroupedBy(self, column):
return FilterRowset(self._cols, self._rows, column)
def SortBy(self, column, reverse=False):
ix = self._cols.index(column)
self.sort(key=lambda e: e[ix], reverse=reverse)
def SortedBy(self, column, reverse=False):
rs = self[:]
rs.SortBy(column, reverse)
return rs
def Select(self, *columns, **options):
if len(columns) == 1:
i = self._cols.index(columns[0])
if options.get("row", False):
for line in self._rows:
yield (line, line[i])
else:
for line in self._rows:
yield line[i]
else:
i = map(self._cols.index, columns)
if options.get("row", False):
for line in self._rows:
yield line, [line[x] for x in i]
else:
for line in self._rows:
yield [line[x] for x in i]
# -------------
def __init__(self, cols=None, rows=None):
self._cols = cols or []
self._rows = rows or []
def append(self, row):
if isinstance(row, list):
self._rows.append(row)
elif isinstance(row, Row) and len(row._cols) == len(self._cols):
self._rows.append(row._row)
else:
raise TypeError("incompatible row type")
def __add__(self, other):
if isinstance(other, Rowset):
if len(other._cols) == len(self._cols):
self._rows += other._rows
raise TypeError("rowset instance expected")
def __nonzero__(self):
return not not self._rows
def __len__(self):
return len(self._rows)
def copy(self):
return self[:]
def __getitem__(self, ix):
if type(ix) is slice:
return Rowset(self._cols, self._rows[ix])
return Row(self._cols, self._rows[ix])
def sort(self, *args, **kw):
self._rows.sort(*args, **kw)
def __str__(self):
return ("Rowset(columns=[%s], rows=%d)" % (','.join(self._cols), len(self)))
def __getstate__(self):
return (self._cols, self._rows)
def __setstate__(self, state):
self._cols, self._rows = state
class IndexRowset(Rowset):
# An IndexRowset is a Rowset that keeps an index on a column.
#
# The interface is the same as Rowset, but provides an additional method:
#
# Get(key [, default])
# Returns the Row mapped to provided key in the index. If there is no
# such key in the index, KeyError is raised unless a default value was
# specified.
#
def Get(self, key, *default):
row = self._items.get(key, None)
if row is None:
if default:
return default[0]
raise KeyError, key
return Row(self._cols, row)
# -------------
def __init__(self, cols=None, rows=None, key=None):
try:
if "," in key:
self._ki = ki = [cols.index(k) for k in key.split(",")]
self.composite = True
else:
self._ki = ki = cols.index(key)
self.composite = False
except IndexError:
raise ValueError("Rowset has no column %s" % key)
Rowset.__init__(self, cols, rows)
self._key = key
if self.composite:
self._items = dict((tuple([row[k] for k in ki]), row) for row in self._rows)
else:
self._items = dict((row[ki], row) for row in self._rows)
def __getitem__(self, ix):
if type(ix) is slice:
return IndexRowset(self._cols, self._rows[ix], self._key)
return Rowset.__getitem__(self, ix)
def append(self, row):
Rowset.append(self, row)
if self.composite:
self._items[tuple([row[k] for k in self._ki])] = row
else:
self._items[row[self._ki]] = row
def __getstate__(self):
return (Rowset.__getstate__(self), self._items, self._ki)
def __setstate__(self, state):
state, self._items, self._ki = state
Rowset.__setstate__(self, state)
class FilterRowset(object):
# A FilterRowset works much like an IndexRowset, with the following
# differences:
# - FilterRowsets are accessed much like dicts
# - Each key maps to a Rowset, containing only the rows where the value
# of the column this FilterRowset was made on matches the key.
def __init__(self, cols=None, rows=None, key=None, key2=None, dict=None):
if dict is not None:
self._items = items = dict
elif cols is not None:
self._items = items = {}
idfield = cols.index(key)
if not key2:
for row in rows:
id = row[idfield]
if id in items:
items[id].append(row)
else:
items[id] = [row]
else:
idfield2 = cols.index(key2)
for row in rows:
id = row[idfield]
if id in items:
items[id][row[idfield2]] = row
else:
items[id] = {row[idfield2]:row}
self._cols = cols
self.key = key
self.key2 = key2
self._bind()
def _bind(self):
items = self._items
self.keys = items.keys
self.iterkeys = items.iterkeys
self.__contains__ = items.__contains__
self.has_key = items.has_key
self.__len__ = items.__len__
self.__iter__ = items.__iter__
def copy(self):
return FilterRowset(self._cols[:], None, self.key, self.key2, dict=copy.deepcopy(self._items))
def get(self, key, default=_unspecified):
try:
return self[key]
except KeyError:
if default is _unspecified:
raise
return default
def __getitem__(self, i):
if self.key2:
return IndexRowset(self._cols, None, self.key2, self._items.get(i, {}))
return Rowset(self._cols, self._items[i])
def __getstate__(self):
return (self._cols, self._rows, self._items, self.key, self.key2)
def __setstate__(self, state):
self._cols, self._rows, self._items, self.key, self.key2 = state
self._bind()

View File

@@ -195,13 +195,13 @@ class ModifiedAttributeDict(collections.MutableMapping):
affs = self.__affectedBy[attributeName]
# If there's no set for current fit in dictionary, create it
if self.fit not in affs:
affs[self.fit] = set()
# Reassign alias to set
affs[self.fit] = []
# Reassign alias to list
affs = affs[self.fit]
# Get modifier which helps to compose 'Affected by' map
modifier = self.fit.getModifier()
# Add current affliction to set
affs.add((modifier, operation, bonus, used))
# Add current affliction to list
affs.append((modifier, operation, bonus, used))
def preAssign(self, attributeName, value):
"""Overwrites original value of the entity with given one, allowing further modification"""

View File

@@ -18,12 +18,9 @@
#===============================================================================
import urllib2
from sqlalchemy.orm import validates, reconstructor
from eos.effectHandlerHelpers import HandledItem
from sqlalchemy.orm import validates, reconstructor
import sqlalchemy.orm.exc as exc
from eos import eveapi
import eos
class Character(object):
@@ -108,27 +105,6 @@ class Character(object):
for skill in self.__skills:
self.__skillIdMap[skill.itemID] = skill
def apiCharList(self, proxy=None):
api = eveapi.EVEAPIConnection(proxy=proxy)
auth = api.auth(keyID=self.apiID, vCode=self.apiKey)
apiResult = auth.account.Characters()
return map(lambda c: unicode(c.name), apiResult.characters)
def apiFetch(self, charName, proxy=None):
api = eveapi.EVEAPIConnection(proxy=proxy)
auth = api.auth(keyID=self.apiID, vCode=self.apiKey)
apiResult = auth.account.Characters()
charID = None
for char in apiResult.characters:
if char.name == charName:
charID = char.characterID
if charID == None:
return
sheet = auth.character(charID).CharacterSheet()
self.apiUpdateCharSheet(sheet)
def apiUpdateCharSheet(self, sheet):
del self.__skills[:]
self.__skillIdMap.clear()

View File

@@ -65,6 +65,7 @@ class Fit(object):
self.boostsFits = set()
self.gangBoosts = None
self.timestamp = time.time()
self.ecmProjectedStr = 1
self.build()
@reconstructor
@@ -93,6 +94,7 @@ class Fit(object):
self.fleet = None
self.boostsFits = set()
self.gangBoosts = None
self.ecmProjectedStr = 1
self.extraAttributes = ModifiedAttributeDict(self)
self.extraAttributes.original = self.EXTRA_ATTRIBUTES
self.ship = Ship(db.getItem(self.shipID)) if self.shipID is not None else None
@@ -226,6 +228,10 @@ class Fit(object):
return type
@property
def jamChance(self):
return (1-self.ecmProjectedStr)*100
@property
def alignTime(self):
agility = self.ship.getModifiedItemAttr("agility")
@@ -269,6 +275,7 @@ class Fit(object):
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.ecmProjectedStr = 1
del self.__calculatedTargets[:]
del self.__extraDrains[:]
@@ -341,7 +348,7 @@ class Fit(object):
else:
c = chain((self.character, self.ship), self.drones, self.boosters, self.appliedImplants, self.modules,
self.projectedDrones, self.projectedModules)
if self.gangBoosts is not None:
contextMap = {Skill: "skill",
Ship: "ship",
@@ -366,7 +373,7 @@ class Fit(object):
effect.handler(self, thing, context)
except:
pass
for item in c:
# Registering the item about to affect the fit allows us to track "Affected By" relations correctly
if item is not None:
@@ -375,7 +382,7 @@ class Fit(object):
if forceProjected is True:
targetFit.register(item)
item.calculateModifiedAttributes(targetFit, runTime, True)
for fit in self.projectedFits:
fit.calculateModifiedAttributes(self, withBoosters=withBoosters, dirtyStorage=dirtyStorage)

View File

@@ -19,18 +19,9 @@
#===============================================================================
import time
import urllib2
from xml.dom import minidom
from sqlalchemy.orm import reconstructor
import eos.db
class Price(object):
# Price validity period, 24 hours
VALIDITY = 24*60*60
# Re-request delay for failed fetches, 4 hours
REREQUEST = 4*60*60
def __init__(self, typeID):
self.typeID = typeID
@@ -42,307 +33,6 @@ class Price(object):
def init(self):
self.__item = None
def isValid(self, rqtime=time.time()):
updateAge = rqtime - self.time
# Mark price as invalid if it is expired
validity = updateAge <= self.VALIDITY
# Price is considered as valid, if it's expired but we had failed
# fetch attempt recently
if validity is False and self.failed is not None:
failedAge = rqtime - self.failed
validity = failedAge <= self.REREQUEST
# If it's already invalid, it can't get any better
if validity is False:
return validity
# If failed timestamp refers to future relatively to current
# system clock, mark price as invalid
if self.failed > rqtime:
return False
# Do the same for last updated timestamp
if self.time > rqtime:
return False
return validity
@classmethod
def fetchPrices(cls, prices, proxy=None):
"""Fetch all prices passed to this method"""
# Set time of the request
# We have to pass this time to all of our used methods and validity checks
# Using time of check instead can make extremely rare edge-case bugs to appear
# (e.g. when item price is already considered as outdated, but c0rp fetch is still
# valid, just because their update time has been set using slightly older timestamp)
rqtime = time.time()
# Dictionary for our price objects
priceMap = {}
# Check all provided price objects, and add invalid ones to dictionary
for price in prices:
if not price.isValid(rqtime=rqtime):
priceMap[price.typeID] = price
# List our price service methods
services = ((cls.fetchEveCentral, (priceMap,), {"rqtime": rqtime, "proxy": proxy}),
(cls.fetchC0rporation, (priceMap,), {"rqtime": rqtime, "proxy": proxy}))
# Cycle through services
for svc, args, kwargs in services:
# Stop cycling if we don't need price data anymore
if len(priceMap) == 0:
break
# Request prices and get some feedback
noData, abortedData = svc(*args, **kwargs)
# Mark items with some failure occurred during fetching
for typeID in abortedData:
priceMap[typeID].failed = rqtime
# Clear map from the fetched and failed items, leaving only items
# for which we've got no data
toRemove = set()
for typeID in priceMap:
if typeID not in noData:
toRemove.add(typeID)
for typeID in toRemove:
del priceMap[typeID]
# After we've checked all possible services, assign zero price for items
# which were not found on any service to avoid re-fetches during validity
# period
for typeID in priceMap:
priceobj = priceMap[typeID]
priceobj.price = 0
priceobj.time = rqtime
priceobj.failed = None
@classmethod
def fetchEveCentral(cls, priceMap, rqtime=time.time(), proxy=None):
"""Use Eve-Central price service provider"""
# This set will contain typeIDs which were requested but no data has been fetched for them
noData = set()
# This set will contain items for which data fetch was aborted due to technical reasons
abortedData = set()
# Set of items which are still to be requested from this service
toRequestSvc = set()
# Compose list of items we're going to request
for typeID in priceMap:
# Get item object
item = eos.db.getItem(typeID)
# We're not going to request items only with market group, as eve-central
# doesn't provide any data for items not on the market
# Items w/o market group will be added to noData in the very end
if item.marketGroupID:
toRequestSvc.add(typeID)
# Do not waste our time if all items are not on the market
if len(toRequestSvc) == 0:
noData.update(priceMap.iterkeys())
return (noData, abortedData)
# This set will contain typeIDs for which we've got useful data
fetchedTypeIDs = set()
# Base request URL
baseurl = "http://api.eve-central.com/api/marketstat"
# Area limitation list
areas = ("usesystem=30000142", # Jita
None) # Global
# Fetch prices from Jita market, if no data was available - check global data
for area in areas:
# Append area limitations to base URL
areaurl = "{0}&{1}".format(baseurl, area) if area else baseurl
# Set which contains IDs of items which we will fetch for given area
toRequestArea = toRequestSvc.difference(fetchedTypeIDs).difference(abortedData)
# As length of URL is limited, make a loop to make sure we request all data
while(len(toRequestArea) > 0):
# Set of items we're requesting during this cycle
requestedThisUrl = set()
# Always start composing our URL from area-limited URL
requrl = areaurl
# Generate final URL, making sure it isn't longer than 255 characters
for typeID in toRequestArea:
# Try to add new typeID argument
newrequrl = "{0}&typeid={1}".format(requrl, typeID)
# If we didn't exceed our limits
if len(newrequrl) <= 255:
# Accept new URL
requrl = newrequrl
# Fill the set for the utility needs
requestedThisUrl.add(typeID)
# Use previously generated URL if new is out of bounds
else:
break
# Do not request same items from the same area
toRequestArea.difference_update(requestedThisUrl)
# Replace first ampersand with question mark to separate arguments
# from URL itself
requrl = requrl.replace("&", "?", 1)
# Make the request object
request = urllib2.Request(requrl, headers={"User-Agent" : "eos"})
# Attempt to send request and process it
try:
if proxy is not None:
proxyHandler = urllib2.ProxyHandler({"http": proxy})
opener = urllib2.build_opener(proxyHandler)
urllib2.install_opener(opener)
data = urllib2.urlopen(request)
xml = minidom.parse(data)
types = xml.getElementsByTagName("marketstat").item(0).getElementsByTagName("type")
# Cycle through all types we've got from request
for type in types:
# Get data out of each typeID details tree
typeID = int(type.getAttribute("id"))
sell = type.getElementsByTagName("sell").item(0)
# If price data wasn't there, set price to zero
try:
percprice = float(sell.getElementsByTagName("percentile").item(0).firstChild.data)
except (TypeError, ValueError):
percprice = 0
# Eve-central returns zero price if there was no data, thus modify price
# object only if we've got non-zero price
if percprice:
# Add item id to list of fetched items
fetchedTypeIDs.add(typeID)
# Fill price data
priceobj = priceMap[typeID]
priceobj.price = percprice
priceobj.time = rqtime
priceobj.failed = None
# If getting or processing data returned any errors
except:
# Consider fetch as aborted
abortedData.update(requestedThisUrl)
# Get actual list of items for which we didn't get data; it includes all requested items
# (even those which didn't pass filter), excluding items which had problems during fetching
# and items for which we've successfully fetched price
noData.update(set(priceMap.iterkeys()).difference(fetchedTypeIDs).difference(abortedData))
# And return it for future use
return (noData, abortedData)
@classmethod
def fetchC0rporation(cls, priceMap, rqtime=time.time(), proxy=None):
"""Use c0rporation.com price service provider"""
# it must be here, otherwise eos doesn't load miscData in time
from eos.types import MiscData
# Set-container for requested items w/o any data returned
noData = set()
# Container for items which had errors during fetching
abortedData = set()
# Set with types for which we've got data
fetchedTypeIDs = set()
# Container for prices we'll re-request from eve central.
eveCentralUpdate = {}
# Check when we updated prices last time
fieldName = "priceC0rpTime"
lastUpdatedField = eos.db.getMiscData(fieldName)
# If this field isn't available, create and add it to session
if lastUpdatedField is None:
lastUpdatedField = MiscData(fieldName)
eos.db.add(lastUpdatedField)
# Convert field value to float, assigning it zero on any errors
try:
lastUpdated = float(lastUpdatedField.fieldValue)
except (TypeError, ValueError):
lastUpdated = 0
# Get age of price
updateAge = rqtime - lastUpdated
# Using timestamp we've got, check if fetch results are still valid and make
# sure system clock hasn't been changed to past
c0rpValidityUpd = updateAge <= cls.VALIDITY and lastUpdated <= rqtime
# If prices should be valid according to miscdata last update timestamp,
# but method was requested to provide prices for some items, we can
# safely assume that these items are not on the XML (to be more accurate,
# on its previously fetched version), because all items which are valid
# (and they are valid only when xml is valid) should be filtered out before
# passing them to this method
if c0rpValidityUpd is True:
noData.update(set(priceMap.iterkeys()))
return (noData, abortedData)
# Check when price fetching failed last time
fieldName = "priceC0rpFailed"
# If it doesn't exist, add this one to the session too
lastFailedField = eos.db.getMiscData(fieldName)
if lastFailedField is None:
lastFailedField = MiscData(fieldName)
eos.db.add(lastFailedField)
# Convert field value to float, assigning it none on any errors
try:
lastFailed = float(lastFailedField.fieldValue)
except (TypeError, ValueError):
lastFailed = None
# If we had failed fetch attempt at some point
if lastFailed is not None:
failedAge = rqtime - lastFailed
# Check if we should refetch data now or not (we do not want to do anything until
# refetch timeout is reached or we have failed timestamp referencing some future time)
c0rpValidityFail = failedAge <= cls.REREQUEST and lastFailed <= rqtime
# If it seems we're not willing to fetch any data
if c0rpValidityFail is True:
# Consider all requested items as aborted. As we don't store list of items
# provided by this service, this will include anything passed to this service,
# even items which are usually not included in xml
abortedData.update(set(priceMap.iterkeys()))
return (noData, abortedData)
# Our request URL
requrl = "http://prices.c0rporation.com/faction.xml"
# Generate request
request = urllib2.Request(requrl, headers={"User-Agent" : "eos"})
# Attempt to send request and process returned data
try:
if proxy is not None:
proxyHandler = urllib2.ProxyHandler({"http": proxy})
opener = urllib2.build_opener(proxyHandler)
urllib2.install_opener(opener)
data = urllib2.urlopen(request)
# Parse the data we've got
xml = minidom.parse(data)
rowsets = xml.getElementsByTagName("rowset")
for rowset in rowsets:
rows = rowset.getElementsByTagName("row")
# Go through all given data rows; as we don't want to request and process whole xml
# for each price request, we need to process it in one single run
for row in rows:
typeID = int(row.getAttribute("typeID"))
# Median price field may be absent or empty, assign 0 in this case
try:
medprice = float(row.getAttribute("median"))
except (TypeError, ValueError):
medprice = 0
# Process price only if it's non-zero
if medprice:
# Add current typeID to the set of fetched types
fetchedTypeIDs.add(typeID)
# If we have given typeID in the map we've got, pull price object out of it
if typeID in priceMap:
priceobj = priceMap[typeID]
# If we don't, request it from database
else:
priceobj = eos.db.getPrice(typeID)
# If everything failed
if priceobj is None:
# Create price object ourselves
priceobj = Price(typeID)
# And let database know that we'd like to keep it
eos.db.add(priceobj)
# Finally, fill object with data
priceobj.price = medprice
priceobj.time = rqtime
priceobj.failed = None
# Check if item has market group assigned
item = eos.db.getItem(typeID)
if item is not None and item.marketGroupID:
eveCentralUpdate[typeID] = priceobj
# If any items need to be re-requested from EVE-Central, do so
# We need to do this because c0rp returns prices for lot of items;
# if returned price is one of requested, it's fetched from eve-central
# first, which is okay; if it's not, price from c0rp will be written
# which will prevent further updates from eve-central. As we consider
# eve-central as more accurate source, ask to update prices for all
# items we got
if eveCentralUpdate:
# We do not need any feedback from it, we just want it to update
# prices
cls.fetchEveCentral(eveCentralUpdate, rqtime=rqtime, proxy=proxy)
# Save current time for the future use
lastUpdatedField.fieldValue = rqtime
# Clear the last failed field
lastFailedField.fieldValue = None
# Find which items were requested but no data has been returned
noData.update(set(priceMap.iterkeys()).difference(fetchedTypeIDs))
# If we failed somewhere during fetching or processing
except:
# Consider all items as aborted
abortedData.update(set(priceMap.iterkeys()))
# And whole fetch too
lastFailedField.fieldValue = rqtime
return (noData, abortedData)
@property
def isValid(self):
return self.time >= time.time()