import os
import sys
cwd = os.getcwd()
sys.path.append( cwd )
new_path = [ os.path.join( cwd, "lib" ),
os.path.join( cwd, 'test' ),
os.path.join( cwd, 'lib', 'tool_shed', 'scripts', 'api' ) ]
new_path.extend( sys.path )
sys.path = new_path
from galaxy import eggs
eggs.require( 'mercurial' )
eggs.require( "nose" )
import json
import logging
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import nose
import platform
import string
import time
import tool_shed.repository_types.util as rt_util
import tool_shed.util.shed_util_common as suc
import urllib
from datetime import datetime
from datetime import timedelta
from common import get_api_url
from common import get_latest_downloadable_changeset_revision_via_api
from common import get_repository_dict
from common import json_from_url
from common import submit
from common import update
from galaxy.util import asbool
from galaxy.util import listify
from galaxy.util import unicodify
import galaxy.webapps.tool_shed.model.mapping
from nose.plugins import Plugin
from tool_shed.util import common_util
from tool_shed.util import hg_util
from tool_shed.util import tool_dependency_util
from tool_shed.util.xml_util import parse_xml
from mercurial import hg
from mercurial import ui
log = logging.getLogger(__name__)
# Set up a job_conf.xml that explicitly limits jobs to 10 minutes.
job_conf_xml = '''
00:10:00
'''
# Create a blank shed_tool_conf.xml to define the installed repositories.
shed_tool_conf_xml_template = '''
'''
# Since we will be running functional tests we'll need the upload tool, but the rest can be omitted.
tool_conf_xml = '''
'''
# Set up an empty shed_tool_data_table_conf.xml.
tool_data_table_conf_xml_template = '''
'''
# Optionally set the environment variable GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF to the location of a
# tool shed's configuration file that includes the tool shed from which repositories will be installed.
tool_sheds_conf_xml = '''
'''
# Should this serve static resources (scripts, images, styles, etc.)?
STATIC_ENABLED = True
# If we have a tool_data_table_conf.test.xml, set it up to be loaded when the UniverseApplication is started.
# This allows one to specify a set of tool data that is used exclusively for testing, and not loaded into any
# Galaxy instance. By default, this will be in the test-data-repo/location directory generated by buildbot_setup.sh.
if os.path.exists( 'tool_data_table_conf.test.xml' ):
additional_tool_data_tables = 'tool_data_table_conf.test.xml'
additional_tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_EXTRA_TOOL_DATA_PATH',
os.path.join( 'test-data-repo', 'location' ) )
else:
additional_tool_data_tables = None
additional_tool_data_path = None
tool_data_table_conf = None
# Set up default tool data tables. If a non-sample version is in config/, use that. Otherwise iterate through lower
# priority versions.
for conf in [ 'tool_data_table_conf.test.xml',
'config/tool_data_table_conf.xml',
'config/tool_data_table_conf.xml.sample',
'tool_data_table_conf.xml',
'tool_data_table_conf.xml.sample' ]:
if os.path.exists( conf ):
tool_data_table_conf = conf
break
# The GALAXY_INSTALL_TEST_TOOL_SHED_URL and GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY environment variables must be
# set for this script to work correctly. If the value of GALAXY_INSTALL_TEST_TOOL_SHED_URL does not refer to one
# of the defaults, the GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF must refer to a tool shed configuration file that contains
# a definition for that tool shed.
galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', None )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
galaxy_encode_secret = 'changethisinproductiontoo'
os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ] = galaxy_encode_secret
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
default_galaxy_test_port_min = 10000
default_galaxy_test_port_max = 10999
default_galaxy_test_host = '127.0.0.1'
# The following should be an actual value (not None). If developers manually specify their
# tests to use the API it will not work unless a master API key is specified.
default_galaxy_master_api_key = '123456'
testing_single_repository_dict = {}
if 'repository_name' in os.environ and 'repository_owner' in os.environ:
testing_single_repository_dict[ 'name' ] = str( os.environ[ 'repository_name' ] )
testing_single_repository_dict[ 'owner' ] = str( os.environ[ 'repository_owner' ] )
if 'repository_revision' in os.environ:
testing_single_repository_dict[ 'changeset_revision' ] = str( os.environ[ 'repository_revision' ] )
else:
testing_single_repository_dict[ 'changeset_revision' ] = None
# Test frameworks that use this utility module.
REPOSITORIES_WITH_TOOLS = 'repositories_with_tools'
TOOL_DEPENDENCY_DEFINITIONS = 'tool_dependency_definitions'
class ReportResults( Plugin ):
'''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
name = "reportresults"
passed = {}
def options( self, parser, env=os.environ ):
super( ReportResults, self ).options( parser, env=env )
def configure(self, options, conf):
super( ReportResults, self ).configure( options, conf )
if not self.enabled:
return
def addSuccess( self, test ):
'''Only record test IDs that correspond to tool functional tests.'''
if 'TestForTool' in test.id():
test_id = test.id()
# Rearrange the test ID to match the format that is produced in test_results.failures
test_id_parts = test_id.split( '.' )
fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) )
test_parts = fixed_test_id.split( '/' )
owner = test_parts[ -4 ]
name = test_parts[ -3 ]
test_identifier = '%s/%s' % ( owner, name )
if test_identifier not in self.passed:
self.passed[ test_identifier ] = []
self.passed[ test_identifier ].append( fixed_test_id )
def getTestStatus( self, test_identifier ):
if test_identifier in self.passed:
passed_tests = self.passed[ test_identifier ]
del self.passed[ test_identifier ]
return passed_tests
return []
class RepositoryMetadataApplication( object ):
"""Application that enables updating repository_metadata table records in the Tool Shed."""
def __init__( self, config ):
self.config = config
if self.config.database_connection is False:
self.config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % str( config.database )
log.debug( 'Using database connection: %s' % str( self.config.database_connection ) )
# Setup the database engine and ORM
self.model = galaxy.webapps.tool_shed.model.mapping.init( self.config.file_path,
self.config.database_connection,
engine_options={},
create_tables=False )
self.hgweb_config_manager = self.model.hgweb_config_manager
self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
log.debug( 'Using hgweb.config file: %s' % str( self.hgweb_config_manager.hgweb_config ) )
@property
def sa_session( self ):
"""Returns a SQLAlchemy session."""
return self.model.context.current
def shutdown( self ):
pass
def display_repositories_by_owner( repository_tups ):
"""Group summary display by repository owner."""
repository_tups_by_owner = {}
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
if owner:
if owner in repository_tups_by_owner:
processed_repository_tups_by_owner = repository_tups_by_owner.get( owner, [] )
if repository_tup not in processed_repository_tups_by_owner:
repository_tups_by_owner[ owner ].append( repository_tup )
else:
repository_tups_by_owner[ owner ] = [ repository_tup ]
# Display grouped summary.
for owner, repository_tups in repository_tups_by_owner.items():
print "# "
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
print "# Revision %s of repository %s owned by %s" % ( changeset_revision, name, owner )
def display_tool_dependencies_by_name( tool_dependency_tups ):
"""Group summary display by repository owner."""
tool_dependency_tups_by_name = {}
for tool_dependency_tup in tool_dependency_tups:
name, type, version = tool_dependency_tup
if name:
if name in tool_dependency_tups_by_name:
processed_tool_dependency_tups_by_name = tool_dependency_tups_by_name.get( name, [] )
if tool_dependency_tup not in processed_tool_dependency_tups_by_name:
tool_dependency_tups_by_name[ name ].append( tool_dependency_tup )
else:
tool_dependency_tups_by_name[ name ] = [ tool_dependency_tup ]
# Display grouped summary.
for name, tool_dependency_tups in tool_dependency_tups_by_name.items():
print "# "
for tool_dependency_tup in tool_dependency_tups:
name, type, version = tool_dependency_tup
print "# %s %s version %s" % ( type, name, version )
def get_database_version( app ):
'''
This method returns the value of the version column from the
migrate_version table, using the provided app's SQLAlchemy session to
determine which table to get that from. This way, it's provided with an
instance of a Galaxy UniverseApplication, it will return the Galaxy
instance's database migration version. If a tool shed UniverseApplication
is provided, it returns the tool shed's database migration version.
'''
sa_session = app.model.context.current
result = sa_session.execute( 'SELECT version FROM migrate_version LIMIT 1' )
# This query will return the following structure:
# row = [ column 0, column 1, ..., column n ]
# rows = [ row 0, row 1, ..., row n ]
# The first column in the first row is the version number we want.
for row in result:
version = row[ 0 ]
break
return version
def get_missing_repository_dependencies( repository, all_missing_repository_dependencies=None ):
"""
Return the entire list of missing repository dependencies for the received repository. The entire
dependency tree will be inspected.
"""
if all_missing_repository_dependencies is None:
all_missing_repository_dependencies = []
print 'Checking revision %s of repository %s owned by %s for missing repository dependencies.' % \
( repository.changeset_revision, repository.name, repository.owner )
all_missing_repository_dependencies.extend( repository.missing_repository_dependencies )
for missing_required_repository in repository.missing_repository_dependencies:
print 'Revision %s of required repository %s owned by %s has status %s.' % \
( missing_required_repository.changeset_revision,
missing_required_repository.name,
missing_required_repository.owner,
missing_required_repository.status )
for repository_dependency in repository.repository_dependencies:
if repository_dependency.missing_repository_dependencies:
all_missing_repository_dependencies.extend( get_missing_repository_dependencies( repository_dependency,
all_missing_repository_dependencies ) )
return all_missing_repository_dependencies
def get_missing_tool_dependencies( repository, all_missing_tool_dependencies=None ):
"""
Return the entire list of missing tool dependencies for the received repository. The entire
dependency tree will be inspected.
"""
if all_missing_tool_dependencies is None:
all_missing_tool_dependencies = []
print 'Checking revision %s of repository %s owned by %s for missing tool dependencies.' % \
( repository.changeset_revision, repository.name, repository.owner )
all_missing_tool_dependencies.extend( repository.missing_tool_dependencies )
for missing_tool_dependency in repository.missing_tool_dependencies:
print 'Tool dependency %s version %s has status %s.' % \
( missing_tool_dependency.name, missing_tool_dependency.version, missing_tool_dependency.status )
for repository_dependency in repository.repository_dependencies:
if repository_dependency.missing_tool_dependencies:
all_missing_tool_dependencies.extend( get_missing_tool_dependencies( repository_dependency,
all_missing_tool_dependencies ) )
return all_missing_tool_dependencies
def get_repositories_to_install( tool_shed_url, test_framework ):
"""
Get a list of repository info dicts to install. This method expects a json list of dicts with the following structure:
[{ "changeset_revision": ,
"encoded_repository_id": ,
"name": ,
"owner": ,
"tool_shed_url": }]
"""
error_message = ''
latest_revision_only = '-check_all_revisions' not in sys.argv
if latest_revision_only:
print 'Testing is restricted to the latest downloadable revision in this test run.'
repository_dicts = []
parts = [ 'repository_revisions' ]
# We'll filter out deprecated repositories from testing since testing them is necessary only if reproducibility
# is guaranteed and we currently do not guarantee reproducibility.
if test_framework == REPOSITORIES_WITH_TOOLS:
params = dict( do_not_test='false',
downloadable='true',
includes_tools='true',
malicious='false',
missing_test_components='false',
skip_tool_test='false' )
elif test_framework == TOOL_DEPENDENCY_DEFINITIONS:
params = dict( do_not_test='false',
downloadable='true',
malicious='false',
skip_tool_test='false' )
api_url = get_api_url( base=tool_shed_url, parts=parts, params=params )
baseline_repository_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
for baseline_repository_dict in baseline_repository_dicts:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
repository_dict, error_message = get_repository_dict( galaxy_tool_shed_url, baseline_repository_dict )
if error_message:
print 'Error getting additional details from the API: %s' % error_message
else:
deprecated = asbool( repository_dict.get( 'deprecated', False ) )
# Filter deprecated repositories in the initial query. Repositories included in the query may have
# repository dependencies that are deprecated though.
if not deprecated:
changeset_revision = baseline_repository_dict.get( 'changeset_revision', hg_util.INITIAL_CHANGELOG_HASH )
if changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
# If testing repositories of type tool_dependency_definition, filter accordingly.
if test_framework == TOOL_DEPENDENCY_DEFINITIONS and \
repository_dict.get( 'type', None ) != rt_util.TOOL_DEPENDENCY_DEFINITION:
continue
# Merge the dictionary returned from /api/repository_revisions with the detailed repository_dict and
# append it to the list of repository_dicts to install and test.
if latest_revision_only:
latest_revision = repository_dict.get( 'latest_revision', None )
if changeset_revision == latest_revision:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
else:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
if testing_single_repository_dict:
tsr_name = testing_single_repository_dict.get( 'name', None )
tsr_owner = testing_single_repository_dict.get( 'owner', None )
tsr_changeset_revision = testing_single_repository_dict.get( 'changeset_revision', None )
print 'Testing single repository with name %s and owner %s.' % ( tsr_name, tsr_owner )
for repository_to_install in repository_dicts:
rti_name = repository_to_install.get( 'name', None )
rti_owner = repository_to_install.get( 'owner', None )
rti_changeset_revision = repository_to_install.get( 'changeset_revision', None )
if rti_name == tsr_name and rti_owner == tsr_owner:
if tsr_changeset_revision is None:
return [ repository_to_install ], error_message
else:
if tsr_changeset_revision == rti_changeset_revision:
return repository_dicts, error_message
return repository_dicts, error_message
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL
# environment variable.
print "The Tool Shed's API url...\n%s" % api_url
print "...retrieved %d repository revisions for installation and possible testing." % len( repository_dicts )
print "Repository revisions for testing:"
for repository_dict in repository_dicts:
repository_id = str( repository_dict.get( 'repository_id', None ) )
repository_metadata_id = str( repository_dict.get( 'id', None ) )
name = str( repository_dict.get( 'name', None ) )
owner = str( repository_dict.get( 'owner', None ) )
changeset_revision = str( repository_dict.get( 'changeset_revision', None ) )
print "Revision %s of repository %s owned by %s with repository_id %s, (repository_metadata) id %s" % \
( changeset_revision, name, owner, repository_id, repository_metadata_id )
return repository_dicts, error_message
def get_repository( name, owner, changeset_revision ):
"""Return a repository record associated with the received name, owner, changeset_revision if one exists."""
repository = None
try:
repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
except:
# The repository may not have been installed in a previous test.
pass
return repository
def get_repository_current_revision( repo_path ):
"""This method uses the python mercurial API to get the current working directory's mercurial changeset hash."""
# Initialize a mercurial repo object from the provided path.
repo = hg.repository( ui.ui(), repo_path )
# Get the working directory's change context.
ctx = repo[ None ]
# Extract the changeset hash of the first parent of that change context (the most recent changeset to which the
# working directory was updated).
changectx = ctx.parents()[ 0 ]
# Also get the numeric revision, so we can return the customary id:hash changeset identifiers.
ctx_rev = changectx.rev()
hg_id = '%d:%s' % ( ctx_rev, str( changectx ) )
return hg_id
def get_repository_dependencies_dicts( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the list of dictionaries that define all repository dependencies of the repository_metadata
record associated with the received encoded_repository_metadata_id via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id, 'repository_dependencies' ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_dependencies_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
return repository_dependencies_dicts, error_message
def get_repository_tuple_from_elem( elem ):
attributes = elem.attrib
name = attributes.get( 'name', None )
owner = attributes.get( 'owner', None )
changeset_revision = attributes.get( 'changeset_revision', None )
return ( name, owner, changeset_revision )
def get_static_settings():
"""
Return a dictionary of the settings necessary for a Galaxy application to be wrapped in the static
middleware. This mainly consists of the file system locations of url-mapped static resources.
"""
cwd = os.getcwd()
static_dir = os.path.join( cwd, 'static' )
#TODO: these should be copied from galaxy.ini
#TODO: static_enabled needed here?
return dict( static_enabled = True,
static_cache_time = 360,
static_dir = static_dir,
static_images_dir = os.path.join( static_dir, 'images', '' ),
static_favicon_dir = os.path.join( static_dir, 'favicon.ico' ),
static_scripts_dir = os.path.join( static_dir, 'scripts', '' ),
static_style_dir = os.path.join( static_dir, 'june_2007_style', 'blue' ),
static_robots_txt = os.path.join( static_dir, 'robots.txt' ) )
def get_time_last_tested( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the datetime value stored in the Tool Shed's repository_metadata.time_last_tested column
via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_metadata_dict, error_message = json_from_url( api_url )
if error_message:
return None, error_message
if isinstance( repository_metadata_dict, dict ):
# The tool_test_results used to be stored as a single dictionary rather than a list, but we currently
# return a list.
time_last_tested = repository_metadata_dict.get( 'time_last_tested', None )
return time_last_tested, error_message
else:
error_message = 'The url %s returned the invalid repository_metadata_dict %s' % ( str( api_url ), str( repository_metadata_dict ) )
return None, error_message
def get_tool_test_results_dict( tool_test_results_dicts ):
if tool_test_results_dicts:
# Inspect the tool_test_results_dict for the last test run to make sure it contains only a test_environment
# entry. If it contains more entries, then the script ~/tool_shed/api/check_repositories_for_functional_tests.py
# was not executed in preparation for this script's execution, so we'll just create an empty dictionary.
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
# We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
# a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
# since it will be re-inserted later.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
elif len( tool_test_results_dict ) == 2 and \
'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict:
# We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
# In this case, some tools are missing tests components while others are not.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
# The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
tool_test_results_dict = {}
else:
# Create a new dictionary for this test test run,
tool_test_results_dict = {}
return tool_test_results_dict
def get_tool_test_results_dicts( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the list of dictionaries contained in the Tool Shed's repository_metadata.tool_test_results
column via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_metadata_dict, error_message = json_from_url( api_url )
if error_message:
return None, error_message
if isinstance( repository_metadata_dict, dict ):
# The tool_test_results used to be stored as a single dictionary rather than a list, but we currently
# return a list.
tool_test_results = listify( repository_metadata_dict.get( 'tool_test_results', [] ) )
return tool_test_results, error_message
else:
error_message = 'The url %s returned the invalid repository_metadata_dict %s' % ( str( api_url ), str( repository_metadata_dict ) )
return None, error_message
def get_webapp_global_conf():
"""Return the global_conf dictionary sent as the first argument to app_factory."""
global_conf = {}
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
def initialize_install_and_test_statistics_dict():
# Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = {}
install_and_test_statistics_dict[ 'total_repositories_processed' ] = 0
install_and_test_statistics_dict[ 'successful_repository_installations' ] = []
install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ] = []
install_and_test_statistics_dict[ 'repositories_with_installation_error' ] = []
install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ] = []
install_and_test_statistics_dict[ 'all_tests_passed' ] = []
install_and_test_statistics_dict[ 'at_least_one_test_failed' ] = []
return install_and_test_statistics_dict
def initialize_tool_tests_results_dict( app, tool_test_results_dict ):
test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
if len( test_environment_dict ) == 0:
# Set information about the tool shed to nothing since we cannot currently determine it from here.
# We could eventually add an API method...
test_environment_dict = dict( tool_shed_database_version='',
tool_shed_mercurial_version='',
tool_shed_revision='' )
# Add the current time as the approximate time that this test run occurs. A similar value will also be
# set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
# may be configured to store multiple test run results, so each must be associated with a time stamp.
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
# Add information about the current platform.
test_environment_dict[ 'time_tested' ] = now
test_environment_dict[ 'python_version' ] = platform.python_version()
test_environment_dict[ 'architecture' ] = platform.machine()
operating_system, hostname, operating_system_version, uname, arch, processor = platform.uname()
test_environment_dict[ 'system' ] = '%s %s' % ( operating_system, operating_system_version )
# Add information about the current Galaxy environment.
test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
# Initialize and populate the tool_test_results_dict.
tool_test_results_dict[ 'test_environment' ] = test_environment_dict
tool_test_results_dict[ 'passed_tests' ] = []
tool_test_results_dict[ 'failed_tests' ] = []
tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[],
repository_dependencies=[],
tool_dependencies=[] )
tool_test_results_dict[ 'successful_installations' ] = dict( current_repository=[],
repository_dependencies=[],
tool_dependencies=[] )
return tool_test_results_dict
def install_repository( app, repository_dict ):
"""Install a repository defined by the received repository_dict from the tool shed into Galaxy."""
name = str( repository_dict.get( 'name', None ) )
owner = str( repository_dict.get( 'owner', None ) )
changeset_revision = str( repository_dict.get( 'changeset_revision', None ) )
error_message = ''
repository = None
print "Installing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner )
# Use the repository information dictionary to generate an install method that will install the repository into the
# embedded Galaxy application, with tool dependencies and repository dependencies, if any.
test_install_repositories.generate_install_method( repository_dict )
# Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to
# install the specified repository with tool and repository dependencies also selected for installation.
result, _ = run_tests( test_config )
# Get the repository record now that the tests that install it have completed.
repository = get_repository( name, owner, changeset_revision )
if repository is None:
error_message = 'Error getting revision %s of repository %s owned by %s: An entry for the repository was not found in the database.' % ( changeset_revision, name, owner )
log.error( error_message )
return repository, error_message
def is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ):
"""
Return True if the repository defined by the received name, owner, changeset_revision should
be excluded from testing for any reason.
"""
for exclude_dict in exclude_list_dicts:
reason = exclude_dict.get( 'reason', '' )
exclude_repositories = exclude_dict.get( 'repositories', None )
# 'repositories':
# [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]
if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
print 'Revision %s of repository %s owned by %s is excluded from testing because:\n%s' % \
( changeset_revision, name, owner, reason )
return True, reason
# Skip this repository if it has a repository dependency that is in the exclude list.
repository_dependency_dicts, error_message = \
get_repository_dependencies_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
if error_message:
print 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
( changeset_revision, name, owner )
print error_message
else:
for repository_dependency_dict in repository_dependency_dicts:
rd_name = repository_dependency_dict.get( 'name', '' )
rd_owner = repository_dependency_dict.get( 'owner', '' )
rd_changeset_revision = repository_dependency_dict.get( 'changeset_revision', '' )
if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
( rd_name, rd_owner, None ) in exclude_repositories:
print 'Revision %s of repository %s owned by %s is excluded from testing because ' % \
( changeset_revision, name, owner, reason )
print 'it requires revision %s of repository %s owned by %s (which is excluded from testing).' % \
( rd_changeset_revision, rd_name, rd_owner )
reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
( rd_changeset_revision, rd_name, rd_owner )
return True, reason
break
return False, None
def is_latest_downloadable_revision( url, repository_dict ):
"""
Return True if the changeset_revision defined in the received repository_dict is the latest
installable revision for the repository.
"""
error_message = ''
name = repository_dict.get( 'name', None )
owner = repository_dict.get( 'owner', None )
changeset_revision = repository_dict.get( 'changeset_revision', None )
if name is not None and owner is not None and changeset_revision is not None:
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
latest_revision, error_message = get_latest_downloadable_changeset_revision_via_api( url, name=name, owner=owner )
if latest_revision is None or error_message:
return None, error_message
is_latest_downloadable = changeset_revision == str( latest_revision )
return is_latest_downloadable, error_message
def parse_exclude_list( xml_filename ):
"""Return a list of repositories to exclude from testing."""
# This method expects an xml document that looks something like this:
#
#
#
#
# Some reason
#
#
#
#
# A list is returned with the following structure:
# [{ 'reason': The default reason or the reason specified in this section,
# 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]}]
exclude_list = []
exclude_tups = []
xml_tree, error_message = parse_xml( xml_filename )
if error_message:
print 'The exclude file %s is invalid, so no repositories will be excluded from testing: %s' % ( xml_filename, error_message )
return exclude_list
tool_sheds = xml_tree.findall( 'repositories' )
xml_element = []
exclude_count = 0
for tool_shed in tool_sheds:
if galaxy_tool_shed_url != tool_shed.attrib[ 'tool_shed' ]:
continue
else:
xml_element = tool_shed
for reason_section in xml_element:
reason_text = reason_section.find( 'text', None )
if reason_text is not None:
reason = str( reason_text.text )
else:
reason = 'No reason provided.'
repositories = reason_section.findall( 'repository' )
exclude_dict = dict( reason=reason, repositories=[] )
for repository in repositories:
repository_tuple = get_repository_tuple_from_elem( repository )
if repository_tuple not in exclude_dict[ 'repositories' ]:
exclude_tups.append( repository_tuple )
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
if exclude_tups:
print 'The exclude file %s defines the following %d repositories to be excluded from testing:' % ( xml_filename, exclude_count )
for name, owner, changeset_revision in exclude_tups:
if changeset_revision:
print 'Revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
print 'All revisions of repository %s owned by %s.' % ( name, owner )
else:
print 'The exclude file %s defines no repositories to be excluded from testing.' % xml_filename
return exclude_list
def populate_dependency_install_containers( app, repository, repository_identifier_tup, install_and_test_statistics_dict,
tool_test_results_dict ):
"""
Populate the installation containers (successful or errors) for the received repository's (which
itself was successfully installed) immediate repository and tool dependencies. The entire dependency
tree is not handled here.
"""
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
print 'Populating dependency install containers for revision %s of repository %s owned by %s.' % \
( repository_changeset_revision, repository_name, repository_owner )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if repository_identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_tup )
repository_identifier_dict = dict( name=repository_name, owner=repository_owner, changeset_revision=repository_changeset_revision )
tool_test_results_dict[ 'successful_installations' ][ 'current_repository' ].append( repository_identifier_dict )
params = dict( test_install_error=False,
do_not_test=False )
if repository.missing_repository_dependencies:
print 'The following repository dependencies for revision %s of repository %s owned by %s have installation errors:' % \
( repository_changeset_revision, repository_name, repository_owner )
params[ 'test_install_error' ] = True
# Keep statistics for this repository's repository dependencies that resulted in installation errors.
for missing_repository_dependency in repository.missing_repository_dependencies:
tool_shed = str( missing_repository_dependency.tool_shed )
name = str( missing_repository_dependency.name )
owner = str( missing_repository_dependency.owner )
changeset_revision = str( missing_repository_dependency.changeset_revision )
error_message = unicodify( missing_repository_dependency.error_message )
print 'Revision %s of repository %s owned by %s has the following installation error:' % ( changeset_revision, name, owner )
# Use log.debug here instead of print because print will throw UnicodeEncodeError exceptions.
log.debug( '%s' % error_message )
identity_tup = ( name, owner, changeset_revision )
processed_repositories_with_installation_error = \
install_and_test_statistics_dict.get( 'repositories_with_installation_error', [] )
if identity_tup not in processed_repositories_with_installation_error:
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( identity_tup )
missing_repository_dependency_info_dict = dict( tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision,
error_message=error_message )
tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( missing_repository_dependency_info_dict )
if repository.missing_tool_dependencies:
print 'The following tool dependencies for revision %s of repository %s owned by %s have installation errors:' % \
( repository_changeset_revision, repository_name, repository_owner )
params[ 'test_install_error' ] = True
# Keep statistics for this repository's tool dependencies that resulted in installation errors.
for missing_tool_dependency in repository.missing_tool_dependencies:
name = str( missing_tool_dependency.name )
type = str( missing_tool_dependency.type )
version = str( missing_tool_dependency.version )
error_message = unicodify( missing_tool_dependency.error_message )
print 'Version %s of tool dependency %s %s has the following installation error:' % ( version, type, name )
# Use log.debug here instead of print because print will throw UnicodeEncodeError exceptions.
log.debug( '%s' % error_message )
identity_tup = ( name, type, version )
processed_tool_dependencies_with_installation_error = \
install_and_test_statistics_dict.get( 'tool_dependencies_with_installation_error', [] )
if identity_tup not in processed_tool_dependencies_with_installation_error:
install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ].append( identity_tup )
missing_tool_dependency_info_dict = dict( type=type,
name=name,
version=version,
error_message=error_message )
tool_test_results_dict[ 'installation_errors' ][ 'tool_dependencies' ].append( missing_tool_dependency_info_dict )
if repository.installed_repository_dependencies:
print 'The following repository dependencies for revision %s of repository %s owned by %s are installed:' % \
( repository_changeset_revision, repository_name, repository_owner )
# Keep statistics for this repository's tool dependencies that resulted in successful installations.
for repository_dependency in repository.installed_repository_dependencies:
tool_shed = str( repository_dependency.tool_shed )
name = str( repository_dependency.name )
owner = str( repository_dependency.owner )
changeset_revision = str( repository_dependency.changeset_revision )
print 'Revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
identifier_tup = ( name, owner, changeset_revision )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( identifier_tup )
repository_dependency_info_dict = dict( tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision )
tool_test_results_dict[ 'successful_installations' ][ 'repository_dependencies' ].append( repository_dependency_info_dict )
if repository.installed_tool_dependencies:
print 'The following tool dependencies for revision %s of repository %s owned by %s are installed:' % \
( repository_changeset_revision, repository_name, repository_owner )
# Keep statistics for this repository's tool dependencies that resulted in successful installations.
for tool_dependency in repository.installed_tool_dependencies:
name = str( tool_dependency.name )
type = str( tool_dependency.type )
version = str( tool_dependency.version )
installation_directory = tool_dependency.installation_directory( app )
print 'Version %s of tool dependency %s %s is installed in: %s' % ( version, type, name, installation_directory )
identity_tup = ( name, type, version )
processed_successful_tool_dependency_installations = \
install_and_test_statistics_dict.get( 'successful_tool_dependency_installations', [] )
if identity_tup not in processed_successful_tool_dependency_installations:
install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ].append( identity_tup )
tool_dependency_info_dict = dict( type=type,
name=name,
version=version,
installation_directory=installation_directory )
tool_test_results_dict[ 'successful_installations' ][ 'tool_dependencies' ].append( tool_dependency_info_dict )
return params, install_and_test_statistics_dict, tool_test_results_dict
def populate_install_containers_for_repository_dependencies( app, repository, repository_metadata_id, install_and_test_statistics_dict,
can_update_tool_shed ):
"""
The handle_repository_dependencies check box is always checked when a repository is installed, so the
tool_test_results dictionary must be inspected for each dependency to make sure installation containers
(success or errors) have been populated. Since multiple repositories can depend on the same repository,
some of the containers may have been populated during a previous installation.
"""
# Get the list of dictionaries that define the received repository's repository dependencies
# via the Tool Shed API.
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
print 'Potentially populating install containers for repository dependencies of revision %s of repository %s owned by %s.' % \
( repository_changeset_revision, repository_name, repository_owner )
repository_dependencies_dicts, error_message = get_repository_dependencies_dicts( galaxy_tool_shed_url, repository_metadata_id )
if error_message:
print 'Cannot check or populate repository dependency install containers for revision %s of repository %s owned by %s ' % \
( repository_changeset_revision, repository_name, repository_owner )
print 'due to the following error getting repository_dependencies_dicts:\n%s' % str( error_message )
else:
if not repository_dependencies_dicts:
print 'Revision %s of repository %s owned by %s has no repository dependencies.' % \
( repository_changeset_revision, repository_name, repository_owner )
for repository_dependencies_dict in repository_dependencies_dicts:
if not isinstance( repository_dependencies_dict, dict ):
print 'Skipping invalid repository_dependencies_dict: %s' % str( repository_dependencies_dict )
continue
name = repository_dependencies_dict.get( 'name', None )
owner = repository_dependencies_dict.get( 'owner', None )
changeset_revision = repository_dependencies_dict.get( 'changeset_revision', None )
if name is None or owner is None or changeset_revision is None:
print 'Skipping invalid repository_dependencies_dict due to missing name, owner or changeset_revision: %s' % \
str( repository_dependencies_dict )
continue
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
print 'Checking installation containers for revision %s of repository dependency %s owned by %s' % \
( changeset_revision, name, owner )
required_repository_metadata_id = repository_dependencies_dict[ 'id' ]
# Get the current list of tool_test_results dictionaries associated with the repository_metadata
# record in the tool shed.
tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url,
required_repository_metadata_id )
if error_message:
print 'Cannot check install container for version %s of repository dependency %s owned by %s ' % \
( changeset_revision, name, owner )
print 'due to the following error getting tool_test_results:\n%s' % str( error_message )
else:
# The assumption is that the Tool Shed's install and test framework is executed no more than once per 24 hour
# period, so check the required repository's time_last_tested value to see if its tool_test_results column
# has been updated within the past 20 hours to allow for differing test run times (some may be slower than
# others). The RepositoryMetadata class's to_dict() method returns the value of time_last_tested in
# datetime.isoformat().
time_last_tested, error_message = get_time_last_tested( galaxy_tool_shed_url, required_repository_metadata_id )
print 'Value of time_last_tested: %s' % str( time_last_tested )
if time_last_tested is None:
print 'The time_last_tested column value is None for version %s of repository dependency %s owned by %s.' % \
( changeset_revision, name, owner )
else:
twenty_hours_ago = ( datetime.utcnow() - timedelta( hours=20 ) ).isoformat()
print 'Value of twenty_hours_ago: %s' % str( twenty_hours_ago )
# This is counter intuitive because the following check is on strings like this: '2014-01-21T19:46:06.953741',
# so if "time_last_tested > twenty_hours_ago" is True, then it implies that the time_last_tested column
# was actually updated less than 20 hours ago, and should not be updated again because we're likely processing
# another dependent repository, many of which can have the same repository dependency.
try:
# Be very conservative here. Our default behavior will be to assume containers have not been populated
# during the current test run.
already_populated = time_last_tested > twenty_hours_ago
except Exception, e:
log.exception( 'Error attempting to set already_populated: %s' % str( e ) )
already_populated = False
print 'Value of already_populated: %s' % str( already_populated )
if already_populated:
print 'The install containers for version %s of repository dependency %s owned by %s have been ' % \
( changeset_revision, name, owner )
print 'populated within the past 20 hours (likely in this test run), so skipping this check.'
continue
else:
print 'Version %s of repository dependency %s owned by %s was last tested more than 20 hours ago.' % \
( changeset_revision, name, owner )
# Inspect the tool_test_results_dict for the last test run to see if it has not yet been populated.
if len( tool_test_results_dicts ) == 0:
tool_test_results_dict = {}
else:
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
elif len( tool_test_results_dict ) == 2 and \
'test_environment' in tool_test_results_dict and \
'missing_test_components' in tool_test_results_dict:
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
tool_test_results_dict = {}
# Make sure all expected entries are available in the tool_test_results_dict.
tool_test_results_dict = initialize_tool_tests_results_dict( app, tool_test_results_dict )
# Get the installed repository record from the Galaxy database.
cleaned_tool_shed_url = remove_protocol_from_tool_shed_url( galaxy_tool_shed_url )
required_repository = \
suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( app,
cleaned_tool_shed_url,
name,
owner,
changeset_revision )
if required_repository is not None:
repository_identifier_tup = ( name, owner, changeset_revision )
if required_repository.is_installed:
# The required_repository was successfully installed, so populate the installation
# containers (success and error) for the repository's immediate dependencies.
params, install_and_test_statistics_dict, tool_test_results_dict = \
populate_dependency_install_containers( app,
required_repository,
repository_identifier_tup,
install_and_test_statistics_dict,
tool_test_results_dict )
save_test_results_for_changeset_revision( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dependencies_dict,
params,
can_update_tool_shed )
else:
# The required repository's installation failed.
required_repository_installation_error_dict = dict( tool_shed=galaxy_tool_shed_url,
name=name,
owner=owner,
changeset_revision=changeset_revision,
error_message=required_repository.error_message )
tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( required_repository_installation_error_dict )
params = dict( test_install_error=True,
do_not_test=False )
save_test_results_for_changeset_revision( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dependencies_dict,
params,
can_update_tool_shed )
else:
print 'Cannot retrieve revision %s of required repository %s owned by %s from the database ' % \
( changeset_revision, name, owner )
print 'so tool_test_results cannot be saved at this time.'
print 'The attributes used to retrieve the record are:'
print 'tool_shed: %s name: %s owner: %s changeset_revision: %s' % \
( cleaned_tool_shed_url, name, owner, changeset_revision )
def populate_shed_conf_file( shed_conf_file, tool_path, xml_elems=None ):
"""Populate the file defined by shed_conf_file with xml_elems or initialize it with a template string."""
if xml_elems is None:
tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
xml_elems = tool_conf_template_parser.safe_substitute( shed_tool_path=tool_path )
file( shed_conf_file, 'w' ).write( xml_elems )
def populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ):
"""
Populate the file defined by galaxy_shed_tools_dict_file with the contents of the shed_tools_dict
dictionary.
"""
if shed_tools_dict is None:
shed_tools_dict = {}
file( galaxy_shed_tools_dict_file, 'w' ).write( json.dumps( shed_tools_dict ) )
def print_install_and_test_results( install_stage_type, install_and_test_statistics_dict, error_message ):
"Print statistics for the current test run."
if error_message:
print "Error returned from install_and_test_repositories:"
print error_message
elif isinstance ( install_and_test_statistics_dict, dict ):
all_tests_passed = install_and_test_statistics_dict.get( 'all_tests_passed', None )
at_least_one_test_failed = install_and_test_statistics_dict.get( 'at_least_one_test_failed', None )
repositories_with_installation_error = \
install_and_test_statistics_dict.get( 'repositories_with_installation_error', None )
successful_repository_installations = \
install_and_test_statistics_dict.get( 'successful_repository_installations', None )
successful_tool_dependency_installations = \
install_and_test_statistics_dict.get( 'successful_tool_dependency_installations', None )
tool_dependencies_with_installation_error = \
install_and_test_statistics_dict.get( 'tool_dependencies_with_installation_error', None )
total_repositories_processed = install_and_test_statistics_dict.get( 'total_repositories_processed', None )
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - installation script for %s completed." % ( now, install_stage_type )
print "# Repository revisions processed: %s" % str( total_repositories_processed )
if successful_repository_installations:
print "# ----------------------------------------------------------------------------------"
print "# The following %d revisions were successfully installed:" % len( successful_repository_installations )
display_repositories_by_owner( successful_repository_installations )
if repositories_with_installation_error:
print "# ----------------------------------------------------------------------------------"
print "# The following %d revisions have installation errors:" % len( repositories_with_installation_error )
display_repositories_by_owner( repositories_with_installation_error )
if successful_tool_dependency_installations:
print "# ----------------------------------------------------------------------------------"
print "# The following %d tool dependencies were successfully installed:" % len( successful_tool_dependency_installations )
display_tool_dependencies_by_name( successful_tool_dependency_installations )
if tool_dependencies_with_installation_error:
print "# ----------------------------------------------------------------------------------"
print "# The following %d tool dependencies have installation errors:" % len( tool_dependencies_with_installation_error )
display_tool_dependencies_by_name( tool_dependencies_with_installation_error )
if all_tests_passed:
print '# ----------------------------------------------------------------------------------'
print "# The following %d revisions successfully passed all functional tests:" % len( all_tests_passed )
display_repositories_by_owner( all_tests_passed )
if at_least_one_test_failed:
print '# ----------------------------------------------------------------------------------'
print "# The following %d revisions failed at least 1 functional test:" % len( at_least_one_test_failed )
display_repositories_by_owner( at_least_one_test_failed )
print "####################################################################################"
def remove_protocol_from_tool_shed_url( base_url ):
"""Eliminate the protocol from the received base_url and return the possibly altered url."""
# The tool_shed value stored in the tool_shed_repository record does not include the protocol, but does
# include the port if one exists.
if base_url:
if base_url.find( '://' ) > -1:
try:
protocol, base = base_url.split( '://' )
except ValueError, e:
# The received base_url must be an invalid url.
log.debug( "Returning unchanged invalid base_url from remove_protocol_from_tool_shed_url: %s" % str( base_url ) )
return base_url
return base.rstrip( '/' )
return base_url.rstrip( '/' )
log.debug( "Returning base_url from remove_protocol_from_tool_shed_url: %s" % str( base_url ) )
return base_url
def run_tests( test_config ):
## TODO: replace whole method with...
# from base import nose_util
# result = nose_util.run( test_config, plugins=[ new ReportResults() ] )
# return result, test_config.plugins._plugins
loader = nose.loader.TestLoader( config=test_config )
test_config.plugins.addPlugin( ReportResults() )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
tests = loader.loadTestsFromNames( test_config.testNames )
test_runner = nose.core.TextTestRunner( stream=test_config.stream,
verbosity=test_config.verbosity,
config=test_config )
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
result = test_runner.run( tests )
return result, test_config.plugins._plugins
def save_test_results_for_changeset_revision( url, tool_test_results_dicts, tool_test_results_dict, repository_dict,
params, can_update_tool_shed ):
"""
Update the repository metadata tool_test_results and appropriate flags using the Tool Shed API. This method
updates tool_test_results with the received tool_test_results_dict, sets the do_not_test and tools_functionally
correct flags to the appropriate values and updates the time_last_tested field.
"""
if can_update_tool_shed:
metadata_revision_id = repository_dict.get( 'id', None )
if metadata_revision_id is not None:
name = repository_dict.get( 'name', None )
owner = repository_dict.get( 'owner', None )
changeset_revision = repository_dict.get( 'changeset_revision', None )
if name is None or owner is None or changeset_revision is None:
print 'Entries for name: ', name, ' owner: ', owner, ' or changeset_revision: ', changeset_revision, \
' missing from repository_dict:' % repository_dict
else:
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
# With regard to certification level one, the status of this repository may or may not have changed between
# this install and test run and the previous install and test run. Rather than attempting to determine if
# anything has changed here, we'll let the Tool Shed's repository registry handle the process of proper
# categorization. To enable this, we'll just remove entries from the Tool Shed's repository registry and
# then add them back. This will ensure proper categorization for this repository.
registry_params = dict( tool_shed_url=galaxy_tool_shed_url, name=name, owner=owner )
print "Removing entries for repository ", name, " owned by ", owner, "from the Tool Shed's repository registry."
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repositories', 'remove_repository_registry_entry' ) )
response_dict = submit( url, registry_params, api_key=tool_shed_api_key, return_formatted=False )
status = response_dict.get( 'status', 'ok' )
if status == 'error':
default_message = 'An unknown error occurred attempting to remove entries from the repository registry.'
error_message = response_dict.get( 'message', default_message )
print error_message
print "Adding entries for repository ", name, " owned by ", owner, "into the Tool Shed's repository registry."
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repositories', 'add_repository_registry_entry' ) )
response_dict = submit( url, registry_params, api_key=tool_shed_api_key, return_formatted=False )
status = response_dict.get( 'status', 'ok' )
if status == 'error':
default_message = 'An unknown error occurred attempting to add entries into the repository registry.'
error_message = response_dict.get( 'message', default_message )
print error_message
print '\n=============================================================\n'
print 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( tool_test_results_dict ) )
print 'Updating tool_test_results for repository_metadata id %s.' % metadata_revision_id
tool_test_results_dicts.insert( 0, tool_test_results_dict )
params[ 'tool_test_results' ] = tool_test_results_dicts
# Set the time_last_tested entry so that the repository_metadata.time_last_tested will be set in the tool shed.
params[ 'time_last_tested' ] = 'This entry will result in this value being set via the Tool Shed API.'
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', str( metadata_revision_id ) ) )
print 'url: ', url
print 'params: ', params
try:
response_from_update = update( tool_shed_api_key, url, params, return_formatted=False )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_from_update ) )
print '\n=============================================================\n'
except Exception, e:
log.exception( 'Error updating tool_test_results for repository_metadata id %s:\n%s' % \
( str( metadata_revision_id ), str( e ) ) )