diff --git a/oncrawl/code-env/python/desc.json b/oncrawl/code-env/python/desc.json new file mode 100644 index 00000000..a233995d --- /dev/null +++ b/oncrawl/code-env/python/desc.json @@ -0,0 +1,7 @@ +{ + "pythonInterpreter": "PYTHON36", + "acceptedPythonInterpreters": ["PYTHON36"], + "forceConda": false, + "installCorePackages": true, + "installJupyterSupport": false +} \ No newline at end of file diff --git a/oncrawl/code-env/python/spec/requirements.txt b/oncrawl/code-env/python/spec/requirements.txt new file mode 100644 index 00000000..c1905881 --- /dev/null +++ b/oncrawl/code-env/python/spec/requirements.txt @@ -0,0 +1,2 @@ +prison==0.1.3 +pendulum==2.1.0 diff --git a/oncrawl/custom-recipes/data_queries/recipe.json b/oncrawl/custom-recipes/data_queries/recipe.json new file mode 100644 index 00000000..66c05036 --- /dev/null +++ b/oncrawl/custom-recipes/data_queries/recipe.json @@ -0,0 +1,30 @@ +{ + "meta": { + "label": "OnCrawl data queries", + "description": "
Export URLs or aggregations from crawls or log monitoring events.
Contact us to get your API key
", + "icon": "icon-globe", + "iconColor": "sky" + }, + + "kind": "PYTHON", + + "inputRoles": [], + + "outputRoles": [ + { + "name": "output", + "label": "output", + "description": "output", + "arity": "UNARY", + "required": true, + "acceptsDataset": true + } + ], + + "paramsModule" : "oncrawl-data_queries.module", + "paramsPythonSetup" : "data_queries.py", + "paramsTemplate" : "data_queries.html", + + "params": [] + +} diff --git a/oncrawl/custom-recipes/data_queries/recipe.py b/oncrawl/custom-recipes/data_queries/recipe.py new file mode 100644 index 00000000..30ec2a51 --- /dev/null +++ b/oncrawl/custom-recipes/data_queries/recipe.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +import dataiku +from dataiku.customrecipe import get_output_names_for_role, get_recipe_config +import oncrawl as oc +from oncrawl import oncrawlDataAPI as ocd +from oncrawl import oncrawlProjectAPI as ocp + +output_names = get_output_names_for_role('output') +output_datasets = [dataiku.Dataset(name) for name in output_names] +output = output_datasets[0] + +#------------------------------config & vars +config = get_recipe_config() + +#config checker to raise better error +e = None +if 'api_key' not in config.keys(): + e = 'Please add your API key' + +if 'list_projects_id_name' not in config.keys() or len(config['list_projects_id_name'].keys()) == 0: + e = 'Your Oncrawl account seems to have no projects available. Please check with your Oncrawl account.' + +if 'list_configs_crawls' not in config.keys() or len(config['list_configs_crawls'].keys()) == 0 or 'list_crawls_project' not in config.keys() or len(config['list_crawls_project'].keys()) == 0: + e = 'Your Oncrawl account seems to have no crawls available. Please check the choosen project and date range with your Oncrawl account.' + +if e is not None: + raise Exception(e) + +headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'Authorization' : 'Bearer {}'.format(config['api_key']) +} + +#list project ids +p_ids = [] + +#if getting all projects : rebuild an up to date ids list +if config['projects_id'] == 'all': + try: + p_ids_uptodate = ocp.get_projects(config['api_key']) + + for p in p_ids_uptodate: + config['list_projects_id_name'][p['id']] = p['name'] + p_ids.append(p['id']) + + except Exception as e: + raise Exception(p_ids_uptodate) +else: + p_ids = [config['projects_id'].split(',')[0]] + +#--list ids to get data : according config['index'], ids are related to projects or crawls - each id represents a crawl when index = pages or links and a project when index = logs +if config['index'] == 'logs': + + ids = p_ids + +else: + + if config['crawls_id'] not in ['all', 'last']: + ids = [config['crawls_id']] + + else: + + #if getting all or last crawls : rebuild an up to date ids list + try: + dates = oc.build_date_range(config) + date_start_yyyy_mm_dd = dates['start'] + date_end_yyyy_mm_dd = dates['end'] + + crawl_start_timestamp = oc.datestring_to_miltimestamp_with_tz(dates['start']) + crawl_end_timestamp = oc.datestring_to_miltimestamp_with_tz(dates['end']) + + limit = None + + c_ids_uptodate = ocp.get_live_crawls(projects_id=p_ids, config=config, timestamp_range={'start': crawl_start_timestamp, 'end': crawl_end_timestamp}, limit=limit) + + ids = [] + count_crawls_by_projects = [] + for c in c_ids_uptodate: + if c['config_name'] == config['crawl_config']: + if (config['crawls_id'] == 'last' and c['project_id'] not in count_crawls_by_projects) or config['crawls_id'] != 'last': + count_crawls_by_projects.append(c['project_id']) + ids.append(c['id']) + + except Exception as e: + raise + + +#------------------------------schema +#fields not returned by oncrawl API +metadata = { + 'project_id': 'string', + 'project_name': 'string', + 'crawl_id': 'string', + 'config_name': 'string', + 'crawl_start_timestamp': 'bigint' + } + +metadata_fields = ocd.build_schema_from_metadata(config, metadata) + +schema = { + 'dataset_schema': metadata_fields['schema'], + 'dataset_schema_field_list': metadata_fields['list'] +} +fields_to_request_by_ids = {} + +for i, id in enumerate(ids): + + progress = '#{} {} {}/{}'.format(id, 'crawls' if config['index'] != 'logs' else 'projects', (i+1), len(ids)) + + #when aggregate data, all items have same schema + if config['data_action'] == 'aggs': + + if i == 0: + f = ocd.build_schema_from_config(config=config) + schema['dataset_schema'] = schema['dataset_schema'] + f + print('############################\r\n############################\r\nBuil dataset schema with: ', progress) + else: + break + else: + + print('############################\r\n############################\r\nBuil dataset schema with: ', progress) + #when export data, for many reasons all items could not have same schema + #return new fields to add to dataset schema and all fields to request for this item + f = ocd.build_schema_from_oncrawl(config=config, id=id, headers=headers, schema=schema) + + if 'item_schema' not in f.keys() or len(f['item_schema']) == 0: + continue + + schema['dataset_schema'] = schema['dataset_schema'] + f['dataset_schema'] + schema['dataset_schema_field_list'] = schema['dataset_schema_field_list'] + f['dataset_schema_field_list'] + + fields_to_request_by_ids[id] = f['item_schema'] + +output.write_schema(schema['dataset_schema']) + +#------------------------------data & writer +total_count = 0 +with output.get_writer() as writer: + + for i, id in enumerate(ids): + + #this case happend when project has no log feature or unexpected ES issue + if config['data_action'] == 'export' and id not in fields_to_request_by_ids.keys(): + continue + + progress = '#{} {} {}/{}'.format(id, 'crawls' if config['index'] != 'logs' else 'projects', (i+1), len(ids)) + print('############################\r\n############################\r\nGet data for: ', progress) + + metadata_value = ocd.fill_metadata(config, id) + if config['data_action'] == 'export': + data = ocd.export(config_oql=config['oql'], fields=fields_to_request_by_ids[id], config_index=config['index'], id=id, headers=headers) + else: + data = ocd.aggs(config_oql=config['oql'], config_index=config['index'], id=id, headers=headers) + + count_result = 0 + try: + for json_line in data: + + row = metadata_value + [] + + if config['data_action'] == 'export': + #oncrawl export api send values not in the same order as schema... + for field in schema['dataset_schema_field_list']: + if field not in list(metadata.keys()): + if field in list(json_line.keys()): + if field in ['title', 'meta_description', 'h1'] and json_line[field] is not None: + row.append(json_line[field].encode(encoding = 'utf8', errors = 'replace')) + else: + row.append(json_line[field]) + else: + row.append(None) + else: + row = row + list(json_line.values()) + + writer.write_row_array(row) + count_result += 1 + print(progress, 'row: ',count_result) + print(progress, ': total row recorded: ', count_result, '\r\n############################\r\n############################') + + except Exception as e: + raise Exception('{}'.format(e)) + + total_count += 1 + diff --git a/oncrawl/js/data_queries_controller.js b/oncrawl/js/data_queries_controller.js new file mode 100644 index 00000000..c2cd9903 --- /dev/null +++ b/oncrawl/js/data_queries_controller.js @@ -0,0 +1,237 @@ +var app = angular.module('oncrawl-data_queries.module', []); + +app.controller('oncrawl_data_queries', function($scope) { + + //Behave.js is a lightweight library for adding IDE style behaviors to plain text areas, making it much more enjoyable to write code in. + var editor = new Behave({ + textarea: document.getElementById('oql') + }); + + //init default vars + $scope.api_error = null + $scope.oql_error = null + + if(!$scope.config.date_kind) + { + $scope.config.date_kind = 'relative'; + } + $scope.toggle_date = false; + if($scope.config.date_kind == 'absolute') + $scope.toggle_date = true; + + if(!$scope.config.date_filter_time_cursor) + { + $scope.config.date_filter_time_cursor = 'current'; + $scope.config.date_filter_unit = 'month'; + $scope.config.date_filter_include_today = true; + $scope.config.date_filter_type = true; + + } + if(!$scope.config.date_filter_num_unit) + { + $scope.config.date_filter_num_unit = 1; + } + if(!$scope.config.data_action) + { + $scope.config.data_action = 'aggs'; + } + $scope.toggle_action = false; + if($scope.config.data_action == 'export') + $scope.toggle_action = true; + + if(!$scope.config.index) + { + $scope.config.index = 'pages'; + } + + $scope.selectDefaultCrawls = function() + { + $scope.config.crawls_id = selectDefaultCrawls($scope); + } + $scope.$watchGroup(['date_start_yyyy_mm_dd', 'date_end_yyyy_mm_dd'], updateDatesRange); + function updateDatesRange(o, n) + { + if (!$scope.date_start_yyyy_mm_dd && !$scope.date_end_yyyy_mm_dd) + return + + $scope.config.oql = build_oql($scope); + if($scope.config.index != 'logs') + { + $scope.get_crawls(); + } + } + $scope.build_date_range = function() + { + $scope.config.date_kind ='relative' + if($scope.toggle_date) + { + $scope.config.date_kind = 'absolute' + } + + $scope.date_start_yyyy_mm_dd = ""; + $scope.date_end_yyyy_mm_dd = ""; + + + if($scope.config.date_kind == 'absolute') + { + if(!$scope.config.override_date_start_yyyy_mm_dd || !$scope.config.override_date_end_yyyy_mm_dd) + { + return; + } + } + if($scope.config.date_kind == 'relative') + { + if(!$scope.config.date_filter_num_unit) + { + return; + } + } + + $scope.callPythonDo({'method': 'build_date_range' + }).then(function(response) { + try + { + $scope.date_start_yyyy_mm_dd = response.start + $scope.date_end_yyyy_mm_dd = response.end + } + catch(e) { + $scope.api_error = response.error + } + + }, function(response) { + $scope.api_error = "Unexpected error occurred" + }); + } + $scope.build_date_range(); + + + $scope.build_oql = function(reset=false) + { + $scope.config.data_action ='aggs' + if($scope.toggle_action) + { + $scope.config.data_action = 'export' + } + + $scope.config.oql = build_oql($scope, reset) + } + $scope.check_oql = function() + { + $scope.oql_error = null; + $scope.config.oql = document.getElementById('oql').value + try + { + if($scope.config.oql) + { + JSON.parse($scope.config.oql) + } + + //build oql if empty and add default required missing fields + $scope.config.oql = prettyPrint(build_oql($scope)) + document.getElementById('oql').value = $scope.config.oql + } + catch(e) + { + $scope.oql_error = e; + } + } + + + if($scope.config.list_projects_id_name) + { + $scope.num_projects = Object.keys($scope.config.list_projects_id_name).length; + } + + if($scope.config.list_configs_crawls) + { + $scope.num_configs = Object.keys($scope.config.list_configs_crawls).length; + } + + + $scope.get_projects = function() + { + + $scope.callPythonDo({'method': 'get_projects', + 'offset': $scope.config.projects_filter_offset || 0, + 'limit': $scope.config.projects_filter_limit || null, + 'sort': $scope.config.projects_filter_sort || 'name:asc' + }).then(function(response) { + try + { + + $scope.api_error = null + + $scope.config.list_projects_id_name = response.projects; + + $scope.num_projects = Object.keys($scope.config.list_projects_id_name).length; + + if(Object.keys(response.projects).length > 1 && !$scope.config.projects_id) + { + $scope.config.projects_id = 'all'; + } + + $scope.get_crawls(); + + } + catch(e) + { + $scope.api_error = response.error + } + },function(response) { + $scope.api_error = "Unexpected error occurred" + }); + } + + + $scope.get_crawls = function() + { + + if(!$scope.config.projects_id) + { + return; + } + + if(!$scope.date_start_yyyy_mm_dd || !$scope.date_end_yyyy_mm_dd) + { + return; + } + + if($scope.config.index == 'logs') + { + return; + } + + $scope.callPythonDo({'method': 'get_crawls', + 'projects_id': $scope.config.projects_id, + 'date_start_yyyy_mm_dd' : $scope.date_start_yyyy_mm_dd, + 'date_end_yyyy_mm_dd' : $scope.date_end_yyyy_mm_dd, + 'index': $scope.config.index + }).then(function(response) { + try + { + + $scope.config.list_configs_crawls = response.configs; + //list_crawls_project to allow recipe to build crawls metadata (project_id, start_date...) + $scope.config.list_crawls_project = response.crawls; + $scope.num_configs = Object.keys($scope.config.list_configs_crawls).length; + + //by default take first crawl config... + if(!$scope.config.crawl_config) + { + $scope.config.crawl_config = Object.keys(response.configs)[0]; + } + //... and return 'all' or a crawl id if there is only 1 crawl fr the choosen config + $scope.selectDefaultCrawls(); + + } + catch(e) { + $scope.api_error = response.error + } + + }, function(response) { + $scope.api_error = "Unexpected error occurred " + }); + + } + +}); \ No newline at end of file diff --git a/oncrawl/plugin.json b/oncrawl/plugin.json new file mode 100644 index 00000000..0dbb323b --- /dev/null +++ b/oncrawl/plugin.json @@ -0,0 +1,15 @@ +{ + "id": "oncrawl", + "version": "1.0.0", + + "meta": { + + "label": "Oncrawl", + "description": "Export URLs or aggregations from crawls or log monitoring events", + "author": "Cogniteev", + "icon": "icon-globe", + "tags": ["API", "Cloud", "Logs"], + "url": "https://www.dataiku.com/product/plugins/oncrawl/", + "licenseInfo": "Apache 2" + } +} diff --git a/oncrawl/python-lib/oncrawl/__init__.py b/oncrawl/python-lib/oncrawl/__init__.py new file mode 100644 index 00000000..008b7227 --- /dev/null +++ b/oncrawl/python-lib/oncrawl/__init__.py @@ -0,0 +1,69 @@ +#from datetime import datetime, timedelta +from calendar import monthrange +import pendulum +import json + +def build_date_range(config): + + # work with date string to support manual date override + # do not forget that range requested is [[ => always add 1 day !! + date_start_yyyy_mm_dd = "" + date_end_yyyy_mm_dd = "" + + if config['date_kind'] == 'relative': + + # use user timezone + datetime_reference = pendulum.now() + #user_tz = datetime_reference.timezone.name + + datetime_reference_first_day_month = datetime_reference.start_of('month') + datetime_reference_last_day_month = datetime_reference.end_of('month') + + datetime_reference_first_day_week = datetime_reference.start_of('week') + datetime_reference_last_day_week = datetime_reference.end_of('week') + + if config['date_filter_time_cursor'] == 'current': + if config['date_filter_unit'] == 'month': + date_start_yyyy_mm_dd = datetime_reference_first_day_month.strftime('%Y-%m-%d') + date_end_yyyy_mm_dd = datetime_reference_last_day_month.add(days=1).strftime('%Y-%m-%d') + + if config['date_filter_unit'] == 'day': + date_start_yyyy_mm_dd = datetime_reference.strftime('%Y-%m-%d') + date_end_yyyy_mm_dd = datetime_reference.add(days=1).strftime('%Y-%m-%d') + + if config['date_filter_time_cursor'] == 'previous': + if config['date_filter_unit'] == 'month': + date_start_yyyy_mm_dd = datetime_reference_first_day_month.subtract(months=config['date_filter_num_unit']).strftime('%Y-%m-%d') + date_end_yyyy_mm_dd = datetime_reference_first_day_month.strftime('%Y-%m-%d') + + if config['date_filter_include_today']: + date_end_yyyy_mm_dd = datetime_reference_last_day_month.add(days=1).strftime('%Y-%m-%d') + + if config['date_filter_unit'] == 'day': + date_start_yyyy_mm_dd = datetime_reference.subtract(days=config['date_filter_num_unit']).strftime('%Y-%m-%d') + date_end_yyyy_mm_dd = datetime_reference.strftime('%Y-%m-%d') + + if config['date_filter_include_today']: + date_end_yyyy_mm_dd = datetime_reference.add(days=1).strftime('%Y-%m-%d') + + else: + + date_start_yyyy_mm_dd = config['override_date_start_yyyy_mm_dd'] + date_end_yyyy_mm_dd = config['override_date_end_yyyy_mm_dd'] + + return {'start': date_start_yyyy_mm_dd, 'end': date_end_yyyy_mm_dd} + + +def datestring_to_miltimestamp_with_tz(date): + + user_tz = pendulum.now().timezone.name + + d = pendulum.parse(date, tz=user_tz).timestamp() * 1000 + + return d + + + + + + diff --git a/oncrawl/python-lib/oncrawl/oncrawlDataAPI.py b/oncrawl/python-lib/oncrawl/oncrawlDataAPI.py new file mode 100644 index 00000000..8c3971c6 --- /dev/null +++ b/oncrawl/python-lib/oncrawl/oncrawlDataAPI.py @@ -0,0 +1,202 @@ +import requests +import json + +endpoint_by_index = { + 'pages': {'end_point' : 'crawl/__id__/pages'}, + 'links': {'end_point' : 'crawl/__id__/links'}, + 'logs': {'end_point' : 'project/__id__/log_monitoring/events'} +} + +def map_dss_storage_type(field_type): + + mapping = { + 'bool':'boolean', + 'float':'double', + 'ratio':'double', + 'object':'object' + } + + dss_type = mapping.get(field_type, 'string') + + return dss_type + +def fill_metadata(config, id): + + p_id = id + if config['index'] != 'logs': + p_id = config['list_crawls_project'][id]['project_id'] + + v = [p_id, config['list_projects_id_name'][p_id]] + if config['index'] != 'logs': + v = v + [id, config['crawl_config'], config['list_crawls_project'][id]['created_at']] + + return v + + +def build_schema_from_metadata(config, metadata): + + fields = list(metadata.keys()) + if config['index'] == 'logs': + fields = list(metadata.keys())[:2] + + f = { + 'list': fields, + 'schema': [{'name': field, 'type': metadata[field]} for field in fields] + } + + return f + +def build_schema_from_oncrawl(config, id, headers, schema): + + f = {'dataset_schema': [], 'dataset_schema_field_list': [], 'item_schema': []} + + fields = get_fields(config_index=config['index'], id=id, headers=headers) + + try: + for field in fields['fields']: + + if field['can_display']: + + #item field list + f['item_schema'].append(field['name']) + + #look for new fields to add to dataset schema + if field['name'] not in schema['dataset_schema_field_list']: + field_type = map_dss_storage_type(field['type']) + + f['dataset_schema'].append({ + "name": field['name'], + "type": field_type, + }) + + f['dataset_schema_field_list'].append(field['name']) + + except Exception as e: + print('############################\r\nProject {} has no logs monitoring feature\n\r############################'.format(id)) + + return f + +def build_schema_from_config(config): + + f = [] + oql = json.loads(config['oql']) + + for i, agg in enumerate(oql['aggs']): + + field_type = 'bigint' + if 'fields' in agg.keys(): + field_type = 'object' + + f.append({ + "name":agg['name'], + "type": field_type, + }) + + return f + +def get_fields(id, config_index, headers): + + endpoint = endpoint_by_index[config_index]['end_point'].replace('__id__', id) + + try: + + #get fields = dataset cols + get_fields = requests.request('GET', 'https://app.oncrawl.com/api/v2/data/{}/fields'.format(endpoint), headers=headers) + get_fields.raise_for_status() + + fields = get_fields.json() + + return fields + + except requests.exceptions.HTTPError as e: + if config_index != 'logs' and get_fields.status_code != 403: + raise Exception('{}-{}'.format(str(e), get_fields.text)) + else: + return + + except Exception as e: + raise Exception(e) + + +def export(config_oql, fields, config_index, id, headers): + + endpoint = endpoint_by_index[config_index]['end_point'].replace('__id__', id) + + #oql = oncrawl query language - interface to query our ES + oql = json.loads(config_oql)['oql'] + body = { + + 'oql' : oql, + 'fields' : fields, + 'file_type':'json' + } + + #get urls = dataset rows + try: + export = requests.request('POST', 'https://app.oncrawl.com/api/v2/data/{}?export=true'.format(endpoint), json=body, headers=headers, stream=True) + export.raise_for_status() + + for line in export.iter_lines(): + json_line = json.loads(line) + + yield json_line + + except requests.exceptions.HTTPError as e: + if config_index != 'logs' and export.status_code != 403: + raise Exception('{}-{}'.format(str(e), export.text)) + + except Exception as e: + raise Exception(e) + + +def aggs(config_oql, config_index, id, headers): + + endpoint = endpoint_by_index[config_index]['end_point'].replace('__id__', id) + + oql = json.loads(config_oql)['aggs'] + + body = { + + 'aggs' : oql + } + + try: + + get_data = requests.request('POST', 'https://app.oncrawl.com/api/v2/data/{}/aggs?fmt=row_objects'.format(endpoint), json=body, headers=headers) + get_data.raise_for_status() + + data = get_data.json() + + agg_value = {} + for j, agg in enumerate(data['aggs']): + cols = agg['cols'] + col_name = cols[-1] + agg_name = oql[j].get('name') if oql[j].get('name') else col_name + + if agg_name and agg_name in agg_value: + agg_name = '{}_{}'.format(agg_name,j) + + if len(agg['rows']) == 1: + agg_value[agg_name] = agg['rows'][0][col_name] + else: + agg_value[agg_name] = agg['rows'] + + json_line = agg_value + + yield json_line + + except requests.exceptions.HTTPError as e: + if config_index != 'logs' and get_data.status_code != 403: + raise Exception('{}-{}'.format(str(e), get_data.text)) + + except Exception as e: + error = e + + if data.get('aggs')[0].get('error'): + error = data.get('aggs')[0].get('error') + + if config_index != 'logs': + raise Exception(error) + + + \ No newline at end of file diff --git a/oncrawl/python-lib/oncrawl/oncrawlProjectAPI.py b/oncrawl/python-lib/oncrawl/oncrawlProjectAPI.py new file mode 100644 index 00000000..0f4f3f9a --- /dev/null +++ b/oncrawl/python-lib/oncrawl/oncrawlProjectAPI.py @@ -0,0 +1,112 @@ +import requests +import prison + +headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', +} + +def build_human_error(response): + + r = 'Please apologize, something bad happened' + + if response.status_code == 401: + r = 'Your API key seems to be invalid. Please check it and contact us if the error persists.' + + return r + +def get_projects(api_key): + + headers['Authorization'] = 'Bearer {}'.format(api_key) + + offset = 0 + limit = 1000 + sort = 'name:asc' + + response = [] + + while offset is not None: + + try: + + r = requests.get('https://app.oncrawl.com/api/v2/projects?&offset={}&limit={}&sort={}'.format(offset, limit, sort), headers = headers) + r.raise_for_status() + items = r.json() + + offset = items['meta']['offset'] + items['meta']['limit'] + + for item in items['projects']: + response.append({'id': item['id'], 'name':item['name']}) + + assert offset <= items['meta']['total'] + + except AssertionError: + offset = None + + except requests.exceptions.HTTPError: + offset = None + response = {'error': build_human_error(r)} + + except Exception as e: + offset = None + response = {'error' : e} + + return response + +def get_live_crawls(config, projects_id, timestamp_range, limit=None): + + headers['Authorization'] = 'Bearer {}'.format(config['api_key']) + + offset = 0 + + if limit is None: + limit = 1000 + + filters = { + "and" : [ + {"field": ["status", "equals", "done"]}, + {"field": ["created_at", "gte", timestamp_range['start']]}, + {"field": ["created_at", "lt", timestamp_range['end']]}, + {"field": ["project_id", "one_of", projects_id]} + ] + } + + try: + + response = [] + while offset is not None: + + r = requests.get('https://app.oncrawl.com/api/v2/crawls?filters={}&offset={}&limit={}&sort=created_at:desc'.format(prison.dumps(filters), offset, limit), headers = headers) + r.raise_for_status() + + items = r.json() + offset = items['meta']['offset'] + items['meta']['limit'] + + for item in items['crawls']: + + if ( config['index'] == 'pages' and item['status'] == 'done') or (config['index'] == 'links' and item['link_status'] == 'live'): + + response.append( + { + 'id': item['id'], + 'config_name': item['crawl_config']['name'], + 'project_id': item['project_id'], + 'created_at': item['created_at'], + 'ended_at': item['ended_at'] + } + ) + + assert offset <= items['meta']['total'] + + except AssertionError: + offset = None + + except requests.exceptions.HTTPError as e: + offset = None + response = {'error': build_human_error(r)} + + except Exception as e: + response = {'error' : e} + + return response + diff --git a/oncrawl/resource/behave.js b/oncrawl/resource/behave.js new file mode 100644 index 00000000..2d735bf6 --- /dev/null +++ b/oncrawl/resource/behave.js @@ -0,0 +1,673 @@ +/* + * Behave.js + * + * Copyright 2013, Jacob Kelley - http://jakiestfu.com/ + * Released under the MIT Licence + * http://opensource.org/licenses/MIT + * + * Github: http://github.com/jakiestfu/Behave.js/ + * Version: 1.5 + */ + + +(function(undefined){ + + 'use strict'; + + var BehaveHooks = BehaveHooks || (function(){ + var hooks = {}; + + return { + add: function(hookName, fn){ + if(typeof hookName == "object"){ + var i; + for(i=0; i