From 84dd1dd267803a3beecdbe945e20235b293a9581 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Thu, 3 Aug 2023 13:37:33 +0000 Subject: [PATCH] cleanup: remove six --- gnocchi/amqp1d.py | 3 +- gnocchi/archive_policy.py | 7 +- gnocchi/carbonara.py | 57 +++-- gnocchi/chef.py | 3 +- gnocchi/cli/manage.py | 3 +- gnocchi/common/redis.py | 5 +- gnocchi/common/swift.py | 2 +- gnocchi/gendoc.py | 20 +- gnocchi/incoming/__init__.py | 5 +- gnocchi/incoming/ceph.py | 9 +- gnocchi/incoming/file.py | 9 +- gnocchi/incoming/redis.py | 9 +- gnocchi/incoming/swift.py | 6 +- gnocchi/indexer/__init__.py | 7 +- ...397987e38570_no_more_slash_and_reencode.py | 3 - gnocchi/indexer/sqlalchemy.py | 9 +- gnocchi/indexer/sqlalchemy_base.py | 5 +- gnocchi/json.py | 10 +- gnocchi/resource_type.py | 11 +- gnocchi/rest/aggregates/api.py | 23 +- gnocchi/rest/aggregates/processor.py | 9 +- gnocchi/rest/api.py | 222 +++++++++--------- gnocchi/rest/exceptions.py | 3 +- gnocchi/rest/influxdb.py | 6 +- gnocchi/service.py | 2 +- gnocchi/statsd.py | 7 +- gnocchi/storage/__init__.py | 53 ++--- gnocchi/storage/ceph.py | 11 +- gnocchi/storage/file.py | 3 +- gnocchi/storage/redis.py | 24 +- gnocchi/storage/swift.py | 3 +- gnocchi/tests/base.py | 12 +- .../tests/functional_live/test_gabbi_live.py | 2 +- .../indexer/sqlalchemy/test_migrations.py | 8 +- gnocchi/tests/test_carbonara.py | 11 +- gnocchi/tests/test_rest.py | 9 +- gnocchi/tests/test_storage.py | 17 +- gnocchi/utils.py | 11 +- setup.cfg | 1 - setup.py | 1 - tools/measures_injector.py | 7 +- 41 files changed, 280 insertions(+), 348 deletions(-) diff --git a/gnocchi/amqp1d.py b/gnocchi/amqp1d.py index 86e53e159..3267477ad 100644 --- a/gnocchi/amqp1d.py +++ b/gnocchi/amqp1d.py @@ -19,7 +19,6 @@ import daiquiri import proton.handlers import proton.reactor -import six import ujson from gnocchi import incoming @@ -61,7 +60,7 @@ def flush(self): def _flush(self): archive_policies = {} resources = self._get_resources(self._measures.keys()) - for host_id, measures_by_names in six.iteritems(self._measures): + for host_id, measures_by_names in self._measures.items(): resource = resources[host_id] names = set(measures_by_names.keys()) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index e92773312..74aa02668 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -21,7 +21,6 @@ import numpy from oslo_config import cfg from oslo_config import types -import six from gnocchi import carbonara from gnocchi import utils @@ -40,7 +39,7 @@ class ArchivePolicy(object): VALID_AGGREGATION_METHODS = set( ('mean', 'sum', 'last', 'max', 'min', 'std', 'median', 'first', 'count')).union( - set((str(i) + 'pct' for i in six.moves.range(1, 100)))) + set((str(i) + 'pct' for i in range(1, 100)))) VALID_AGGREGATION_METHODS = VALID_AGGREGATION_METHODS.union( set(map(lambda s: "rate:" + s, @@ -240,13 +239,13 @@ def timespan(self): def jsonify(self): """Return a dict representation with human readable values.""" return { - 'timespan': six.text_type( + 'timespan': str( datetime.timedelta( seconds=utils.timespan_total_seconds( self.timespan))) if self.timespan is not None else None, - 'granularity': six.text_type( + 'granularity': str( datetime.timedelta( seconds=utils.timespan_total_seconds( self.granularity))), diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 22ec38e00..65c14e5d9 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -27,7 +27,6 @@ import lz4.block import numpy -import six from gnocchi import calendar @@ -232,7 +231,7 @@ def __init__(self, ts=None): self.ts = ts def __iter__(self): - return six.moves.zip(self.ts['timestamps'], self.ts['values']) + return zip(self.ts['timestamps'], self.ts['values']) @classmethod def from_data(cls, timestamps=None, values=None): @@ -411,34 +410,34 @@ def benchmark(cls): now = numpy.datetime64("2015-04-03 23:11") timestamps = numpy.sort(numpy.array( [now + numpy.timedelta64(random.randint(1000000, 10000000), 'us') - for i in six.moves.range(points)])) + for i in range(points)])) print(cls.__name__) print("=" * len(cls.__name__)) for title, values in [ - ("Simple continuous range", six.moves.range(points)), + ("Simple continuous range", range(points)), ("All 0", [float(0)] * points), ("All 1", [float(1)] * points), ("0 and 1", [0, 1] * (points // 2)), ("1 and 0 random", [random.randint(0, 1) - for x in six.moves.range(points)]), + for x in range(points)]), ("Small number random pos/neg", [random.randint(-100000, 10000) - for x in six.moves.range(points)]), + for x in range(points)]), ("Small number random pos", - [random.randint(0, 20000) for x in six.moves.range(points)]), + [random.randint(0, 20000) for x in range(points)]), ("Small number random neg", - [random.randint(-20000, 0) for x in six.moves.range(points)]), - ("Sin(x)", list(map(math.sin, six.moves.range(points)))), + [random.randint(-20000, 0) for x in range(points)]), + ("Sin(x)", list(map(math.sin, range(points)))), ("random ", [random.random() - for x in six.moves.range(points)]), + for x in range(points)]), ]: print(title) ts = cls.from_data(timestamps, values) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): s = ts.serialize() t1 = time.time() print(" Serialization speed: %.2f MB/s" @@ -447,7 +446,7 @@ def benchmark(cls): print(" Bytes per point: %.2f" % (len(s) / float(points))) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): cls.unserialize(s, ONE_SECOND, 1) t1 = time.time() print(" Unserialization speed: %.2f MB/s" @@ -637,7 +636,7 @@ def split(self): round_timestamp(self.timestamps, freq), return_counts=True) start = 0 - for key, count in six.moves.zip(keys, counts): + for key, count in zip(keys, counts): end = start + count yield (SplitKey(key, self.aggregation.granularity), AggregatedTimeSerie(self.aggregation, self[start:end])) @@ -680,7 +679,7 @@ def __repr__(self): @staticmethod def is_compressed(serialized_data): """Check whatever the data was serialized with compression.""" - return six.indexbytes(serialized_data, 0) == ord("c") + return serialized_data[0] == ord("c") @classmethod def unserialize(cls, data, key, aggregation): @@ -813,29 +812,29 @@ def benchmark(cls): now = numpy.datetime64("2015-04-03 23:11") timestamps = numpy.sort(numpy.array( [now + i * sampling - for i in six.moves.range(points)])) + for i in range(points)])) print(cls.__name__) print("=" * len(cls.__name__)) for title, values in [ - ("Simple continuous range", six.moves.range(points)), + ("Simple continuous range", range(points)), ("All 0", [float(0)] * points), ("All 1", [float(1)] * points), ("0 and 1", [0, 1] * (points // 2)), ("1 and 0 random", [random.randint(0, 1) - for x in six.moves.range(points)]), + for x in range(points)]), ("Small number random pos/neg", [random.randint(-100000, 10000) - for x in six.moves.range(points)]), + for x in range(points)]), ("Small number random pos", - [random.randint(0, 20000) for x in six.moves.range(points)]), + [random.randint(0, 20000) for x in range(points)]), ("Small number random neg", - [random.randint(-20000, 0) for x in six.moves.range(points)]), - ("Sin(x)", list(map(math.sin, six.moves.range(points)))), + [random.randint(-20000, 0) for x in range(points)]), + ("Sin(x)", list(map(math.sin, range(points)))), ("random ", [random.random() - for x in six.moves.range(points)]), + for x in range(points)]), ]: print(title) serialize_times = 50 @@ -843,7 +842,7 @@ def benchmark(cls): ts = cls.from_data(aggregation, timestamps, values) t0 = time.time() key = ts.get_split_key() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): e, s = ts.serialize(key, compressed=False) t1 = time.time() print(" Uncompressed serialization speed: %.2f MB/s" @@ -852,7 +851,7 @@ def benchmark(cls): print(" Bytes per point: %.2f" % (len(s) / float(points))) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): cls.unserialize(s, key, 'mean') t1 = time.time() print(" Unserialization speed: %.2f MB/s" @@ -860,7 +859,7 @@ def benchmark(cls): / ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0))) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): o, s = ts.serialize(key, compressed=True) t1 = time.time() print(" Compressed serialization speed: %.2f MB/s" @@ -869,7 +868,7 @@ def benchmark(cls): print(" Bytes per point: %.2f" % (len(s) / float(points))) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): cls.unserialize(s, key, 'mean') t1 = time.time() print(" Uncompression speed: %.2f MB/s" @@ -880,7 +879,7 @@ def per_sec(t1, t0): return 1 / ((t1 - t0) / serialize_times) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): list(ts.split()) t1 = time.time() print(" split() speed: %.2f Hz" % per_sec(t1, t0)) @@ -894,7 +893,7 @@ def per_sec(t1, t0): ) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): ts.merge(tsbis) t1 = time.time() print(" merge() speed %.2f Hz" % per_sec(t1, t0)) @@ -904,7 +903,7 @@ def per_sec(t1, t0): serialize_times = 3 if agg.endswith('pct') else 10 ts = cls(ts=pts, aggregation=aggregation) t0 = time.time() - for i in six.moves.range(serialize_times): + for i in range(serialize_times): ts.resample(resample) t1 = time.time() print(" resample(%s) speed: %.2f Hz" diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 7d963bd51..a51f88ad1 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -17,7 +17,6 @@ import hashlib import daiquiri -import six from gnocchi import indexer @@ -124,7 +123,7 @@ def refresh_metrics(self, metrics, timeout=None, sync=False): self.storage.add_measures_to_metrics({ metrics_by_id[metric_id]: measures for metric_id, measures - in six.iteritems(metrics_and_measures) + in metrics_and_measures.items() }) LOG.debug("Measures for %d metrics processed", len(metric_ids)) diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py index c5220ef9e..aa4c42eff 100644 --- a/gnocchi/cli/manage.py +++ b/gnocchi/cli/manage.py @@ -20,7 +20,6 @@ import daiquiri from oslo_config import cfg from oslo_config import generator -import six from gnocchi import archive_policy from gnocchi import incoming @@ -81,7 +80,7 @@ def upgrade(): and not index.list_archive_policy_rules()): if conf.skip_index: index = indexer.get_driver(conf) - for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): + for name, ap in archive_policy.DEFAULT_ARCHIVE_POLICIES.items(): index.create_archive_policy(ap) index.create_archive_policy_rule("default", "*", "low") diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 9ccec2864..ba41a8c74 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -17,8 +17,7 @@ from __future__ import absolute_import from oslo_config import cfg -import six -from six.moves.urllib import parse +from urllib import parse try: import redis @@ -173,7 +172,7 @@ def get_client(conf, scripts=None): if scripts is not None: scripts = { name: client.register_script(code) - for name, code in six.iteritems(scripts) + for name, code in scripts.items() } return client, scripts diff --git a/gnocchi/common/swift.py b/gnocchi/common/swift.py index 7fc0efbdf..5fa802383 100644 --- a/gnocchi/common/swift.py +++ b/gnocchi/common/swift.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. import daiquiri -from six.moves.urllib.parse import quote +from urllib.parse import quote try: from swiftclient import client as swclient diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index d44e024b3..954302220 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import +import io import json import os import subprocess @@ -22,8 +23,6 @@ import jinja2 from oslo_config import generator -import six -import six.moves from sphinx.util import logging import webob.request import yaml @@ -49,10 +48,7 @@ def _extract_body(req_or_resp): if not req_or_resp.text: return "" - if six.PY2: - body = req_or_resp.body - else: - body = req_or_resp.text + body = req_or_resp.text if req_or_resp.content_type.startswith("application/json"): body = _format_json(body) return "\n ".join(body.split("\n")) @@ -61,7 +57,7 @@ def _extract_body(req_or_resp): def _format_headers(headers): return "\n".join( " %s: %s" % (k, v) - for k, v in six.iteritems(headers)) + for k, v in headers.items()) def _response_to_httpdomain(response): @@ -209,10 +205,8 @@ def setup(app): scenarios=scenarios) template = jinja2.Template(entry['request']) - fake_file = six.moves.cStringIO() + fake_file = io.StringIO() content = template.render(scenarios=scenarios) - if six.PY2: - content = content.encode('utf-8') fake_file.write(content) fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) @@ -228,7 +222,7 @@ def setup(app): request.body = fake_file.read(clen) LOG.info("Doing request %s: %s", - entry['name'], six.text_type(request)) + entry['name'], str(request)) with webapp.use_admin_user(): response = webapp.request(request) entry['response'] = response @@ -238,13 +232,9 @@ def setup(app): test.tearDownClass() with open("doc/source/rest.j2", "r") as f: content = f.read() - if six.PY2: - content = content.decode("utf-8") template = jinja2.Template(content) with open("doc/source/rest.rst", "w") as f: content = template.render(scenarios=scenarios) - if six.PY2: - content = content.encode("utf-8") f.write(content) config_output_file = 'doc/source/gnocchi.conf.sample' diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index d3620e991..1079b23cf 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -21,7 +21,6 @@ import daiquiri import numpy -import six from gnocchi.carbonara import TIMESERIES_ARRAY_DTYPE from gnocchi import exceptions @@ -194,7 +193,7 @@ def add_measures_batch(self, metrics_and_measures): self.MAP_METHOD(self._store_new_measures, ((metric_id, self._encode_measures(measures)) for metric_id, measures - in six.iteritems(metrics_and_measures))) + in metrics_and_measures.items())) @staticmethod def _store_new_measures(metric_id, data): @@ -245,7 +244,7 @@ def sack_for_metric(self, metric_id): return self._make_sack(metric_id.int % self.NUM_SACKS) def iter_sacks(self): - return (self._make_sack(i) for i in six.moves.range(self.NUM_SACKS)) + return (self._make_sack(i) for i in range(self.NUM_SACKS)) @staticmethod def iter_on_sacks_to_process(): diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 8a39fa8e4..127205035 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -19,7 +19,6 @@ import uuid import numpy -import six from gnocchi.common import ceph from gnocchi import incoming @@ -82,7 +81,7 @@ def remove_sacks(self): def add_measures_batch(self, metrics_and_measures): data_by_sack = defaultdict(lambda: defaultdict(list)) - for metric_id, measures in six.iteritems(metrics_and_measures): + for metric_id, measures in metrics_and_measures.items(): name = "_".join(( self.MEASURE_PREFIX, str(metric_id), @@ -188,7 +187,7 @@ def process_measure_for_metrics(self, metric_ids): processed_keys[sack] = self._list_keys_to_process( sack, prefix=self.MEASURE_PREFIX + "_" + str(metric_id)) m = self._make_measures_array() - for k, v in six.iteritems(processed_keys[sack]): + for k, v in processed_keys[sack].items(): m = numpy.concatenate( (m, self._unserialize_measures(k, v))) @@ -198,7 +197,7 @@ def process_measure_for_metrics(self, metric_ids): # Now clean omap with rados.WriteOpCtx() as op: - for sack, keys in six.iteritems(processed_keys): + for sack, keys in processed_keys.items(): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! self.ioctx.remove_omap_keys(op, tuple(keys.keys())) @@ -210,7 +209,7 @@ def process_measures_for_sack(self, sack): measures = defaultdict(self._make_measures_array) omaps = self._list_keys_to_process( sack, prefix=self.MEASURE_PREFIX + "_") - for k, v in six.iteritems(omaps): + for k, v in omaps.items(): try: metric_id = uuid.UUID(k.split("_")[1]) except (ValueError, IndexError): diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 446807e83..b21a0cb66 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -22,7 +22,6 @@ import daiquiri import numpy -import six from gnocchi import incoming from gnocchi import utils @@ -62,7 +61,7 @@ def _sack_path(self, sack): return os.path.join(self.basepath, str(sack)) def _measure_path(self, sack, metric_id): - return os.path.join(self._sack_path(sack), six.text_type(metric_id)) + return os.path.join(self._sack_path(sack), str(metric_id)) def _build_measure_path(self, metric_id, random_id=None): sack = self.sack_for_metric(metric_id) @@ -70,7 +69,7 @@ def _build_measure_path(self, metric_id, random_id=None): if random_id: if random_id is True: now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - random_id = six.text_type(uuid.uuid4()) + now + random_id = str(uuid.uuid4()) + now return os.path.join(path, random_id) return path @@ -177,7 +176,7 @@ def process_measure_for_metrics(self, metric_ids): yield measures - for metric_id, files in six.iteritems(processed_files): + for metric_id, files in processed_files.items(): self._delete_measures_files_for_metric(metric_id, files) @contextlib.contextmanager @@ -204,5 +203,5 @@ def process_measures_for_sack(self, sack): yield measures - for metric_id, files in six.iteritems(processed_files): + for metric_id, files in processed_files.items(): self._delete_measures_files_for_metric(metric_id, files) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 28859fc77..f52390960 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -18,7 +18,6 @@ import daiquiri from redis.exceptions import ConnectionError -import six import tenacity from gnocchi.common import redis @@ -86,7 +85,7 @@ def _build_measure_path(self, metric_id): def add_measures_batch(self, metrics_and_measures): notified_sacks = set() pipe = self._client.pipeline(transaction=False) - for metric_id, measures in six.iteritems(metrics_and_measures): + for metric_id, measures in metrics_and_measures.items(): sack_name = str(self.sack_for_metric(metric_id)) path = self._build_measure_path_with_sack(metric_id, sack_name) pipe.rpush(path, self._encode_measures(measures)) @@ -108,7 +107,7 @@ def update_report(results, m_list): report_vars['measures'] += sum(results) if details: report_vars['metric_details'].update( - dict(six.moves.zip(m_list, results))) + dict(zip(m_list, results))) match = redis.SEP.join([self._get_sack_name("*").encode(), b"*"]) metrics = 0 @@ -149,12 +148,12 @@ def process_measure_for_metrics(self, metric_ids): ) results = pipe.execute() - for metric_id, (item_len, data) in six.moves.zip(metric_ids, results): + for metric_id, (item_len, data) in zip(metric_ids, results): measures[metric_id] = self._unserialize_measures(metric_id, data) yield measures - for metric_id, (item_len, data) in six.moves.zip(metric_ids, results): + for metric_id, (item_len, data) in zip(metric_ids, results): key = self._build_measure_path(metric_id) # ltrim is inclusive, bump 1 to remove up to and including nth item pipe.ltrim(key, item_len + 1, -1) diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index b232bfd72..bc1429282 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -18,8 +18,6 @@ import json import uuid -import six - from gnocchi.common import swift from gnocchi import incoming from gnocchi import utils @@ -85,7 +83,7 @@ def _build_report(self, details): def _list_measure_files_for_metric(self, sack, metric_id): headers, files = self.swift.get_container( - str(sack), path=six.text_type(metric_id), + str(sack), path=str(metric_id), full_listing=True) return files @@ -118,7 +116,7 @@ def process_measure_for_metrics(self, metric_ids): yield measures # Now clean objects - for sack_name, files in six.iteritems(all_files): + for sack_name, files in all_files.items(): swift.bulk_delete(self.swift, sack_name, files) @contextlib.contextmanager diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 55cba32da..9704a1f3a 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -19,9 +19,8 @@ import iso8601 from oslo_config import cfg -import six -from six.moves.urllib import parse from stevedore import driver +from urllib import parse from gnocchi import exceptions from gnocchi import utils @@ -59,8 +58,8 @@ def __eq__(self, other): @property def etag(self): etag = hashlib.sha1() - etag.update(six.text_type(self.id).encode('utf-8')) - etag.update(six.text_type( + etag.update(str(self.id).encode('utf-8')) + etag.update(str( self.revision_start.isoformat()).encode('utf-8')) return etag.hexdigest() diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py index a671fc1db..d004c556b 100644 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py @@ -23,7 +23,6 @@ import uuid from alembic import op -import six import sqlalchemy as sa import sqlalchemy_utils @@ -119,8 +118,6 @@ def upgrade(): new_original_resource_id = resource.original_resource_id.replace( '/', '_') - if six.PY2: - new_original_resource_id = new_original_resource_id.encode('utf-8') new_id = sa.literal(uuidtype.process_bind_param( str(utils.ResourceUUID( new_original_resource_id, resource.creator)), diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 32c12eb66..2b4d22365 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -37,13 +37,12 @@ import pymysql.err except ImportError: pymysql = None -import six -from six.moves.urllib import parse as urlparse import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url import sqlalchemy.exc from sqlalchemy import types as sa_types import sqlalchemy_utils +from urllib import parse as urlparse from gnocchi import exceptions from gnocchi import indexer @@ -960,7 +959,7 @@ def update_resource(self, resource_type, resource_id, r.ended_at = ended_at if kwargs: - for attribute, value in six.iteritems(kwargs): + for attribute, value in kwargs.items(): if hasattr(r, attribute): setattr(r, attribute, value) else: @@ -991,7 +990,7 @@ def update_resource(self, resource_type, resource_id, @staticmethod def _set_metrics_for_resource(session, r, metrics): - for name, value in six.iteritems(metrics): + for name, value in metrics.items(): if isinstance(value, uuid.UUID): try: update = session.query(Metric).filter( @@ -1327,7 +1326,7 @@ class QueryTransformer(object): converters = ( (types.TimestampUTC, utils.to_datetime), - (sa_types.String, six.text_type), + (sa_types.String, str), (sa_types.Integer, int), (sa_types.Numeric, float), ) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index bcb11fd08..97a74c9de 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -17,7 +17,6 @@ from __future__ import absolute_import from oslo_db.sqlalchemy import models -import six import sqlalchemy from sqlalchemy.ext import declarative import sqlalchemy_utils @@ -209,7 +208,7 @@ def jsonify(self, attrs=None): d = dict(self) del d['revision'] if 'metrics' not in sqlalchemy.inspect(self).unloaded: - d['metrics'] = dict((m.name, six.text_type(m.id)) + d['metrics'] = dict((m.name, str(m.id)) for m in self.metrics) if self.creator is None: @@ -358,7 +357,7 @@ def __next__(self): # NOTE(sileht): Our custom resource attribute columns don't # have the same name in database than in sqlalchemy model # so remove the additional "f_" for the model name - n = six.advance_iterator(self.i) + n = next(self.i) model_attr = n[2:] if n[:2] == "f_" else n return model_attr, getattr(self.model, n) diff --git a/gnocchi/json.py b/gnocchi/json.py index 3147d38db..7150dc859 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -17,17 +17,16 @@ import uuid import numpy -import six import ujson def to_primitive(obj): - if isinstance(obj, ((six.text_type,) - + six.integer_types + if isinstance(obj, ((str,) + + (int,) + (type(None), bool, float))): return obj if isinstance(obj, uuid.UUID): - return six.text_type(obj) + return str(obj) if isinstance(obj, datetime.datetime): return obj.isoformat() if isinstance(obj, numpy.datetime64): @@ -43,9 +42,6 @@ def to_primitive(obj): if isinstance(obj, dict): return {to_primitive(k): to_primitive(v) for k, v in obj.items()} - if hasattr(obj, 'iteritems'): - return to_primitive(dict(obj.iteritems())) - # Python 3 does not have iteritems if hasattr(obj, 'items'): return to_primitive(dict(obj.items())) if hasattr(obj, '__iter__'): diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 9daec8e6e..e46206ff7 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -14,7 +14,6 @@ import numbers import re -import six import stevedore import voluptuous @@ -149,7 +148,7 @@ def __init__(self, min_length, max_length, *args, **kwargs): @property def schema_ext(self): - return voluptuous.All(six.text_type, + return voluptuous.All(str, voluptuous.Length( min=self.min_length, max=self.max_length)) @@ -231,18 +230,18 @@ def __init__(self, *args, **kwargs): type_schemas = tuple([ext.plugin.meta_schema() for ext in self.extensions]) self._schema = voluptuous.Schema({ - "name": six.text_type, + "name": str, voluptuous.Required("attributes", default={}): { - six.text_type: voluptuous.Any(*tuple(type_schemas)) + str: voluptuous.Any(*tuple(type_schemas)) } }) type_schemas = tuple([ext.plugin.meta_schema(for_update=True) for ext in self.extensions]) self._schema_for_update = voluptuous.Schema({ - "name": six.text_type, + "name": str, voluptuous.Required("attributes", default={}): { - six.text_type: voluptuous.Any(*tuple(type_schemas)) + str: voluptuous.Any(*tuple(type_schemas)) } }) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 689a9b6d3..aa435fb73 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -22,7 +22,6 @@ import pecan from pecan import rest import pyparsing -import six import voluptuous from gnocchi import indexer @@ -65,10 +64,10 @@ def MetricSchema(v): raise voluptuous.Invalid("'%s' operation invalid" % v[0]) return [u"metric"] + voluptuous.Schema(voluptuous.Any( - voluptuous.ExactSequence([six.text_type, six.text_type]), + voluptuous.ExactSequence([str, str]), voluptuous.All( voluptuous.Length(min=1), - [voluptuous.ExactSequence([six.text_type, six.text_type])], + [voluptuous.ExactSequence([str, str])], )), required=True)(v[1:]) @@ -128,14 +127,14 @@ def MetricSchema(v): def OperationsSchema(v): - if isinstance(v, six.text_type): + if isinstance(v, str): try: v = pyparsing.OneOrMore( pyparsing.nestedExpr()).parseString(v).asList()[0] except pyparsing.ParseException as e: api.abort(400, {"cause": "Invalid operations", "reason": "Fail to parse the operations string", - "detail": six.text_type(e)}) + "detail": str(e)}) return voluptuous.Schema(voluptuous.Any(*OperationsSchemaBase), required=True)(v) @@ -507,9 +506,9 @@ def post(self, start=None, stop=None, granularity=None, references, resources, start, stop, groupby) except indexer.NoSuchMetric as e: - api.abort(404, six.text_type(e)) + api.abort(404, str(e)) except indexer.IndexerException as e: - api.abort(400, six.text_type(e)) + api.abort(400, str(e)) except Exception as e: LOG.exception(e) raise e @@ -517,23 +516,23 @@ def post(self, start=None, stop=None, granularity=None, if not results: all_metrics_not_found = list(set((m for (m, a) in references))) all_metrics_not_found.sort() - api.abort(404, six.text_type( + api.abort(404, str( indexer.NoSuchMetric(all_metrics_not_found))) return results else: try: - metric_ids = set(six.text_type(utils.UUID(m)) + metric_ids = set(str(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort(400, {"cause": "Invalid metric references", - "reason": six.text_type(e), + "reason": str(e), "detail": references}) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) + - set(str(m.id) for m in metrics)) if missing_metric_ids: api.abort(404, {"cause": "Unknown metrics", "reason": "Provided metrics don't exists", @@ -546,7 +545,7 @@ def post(self, start=None, stop=None, granularity=None, for metric in metrics: api.enforce("get metric", metric) - metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) + metrics_by_ids = dict((str(m.id), m) for m in metrics) references = [processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references] diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index e837df22e..2af385034 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -19,7 +19,6 @@ import daiquiri import numpy -import six from gnocchi import carbonara from gnocchi.rest.aggregates import exceptions @@ -88,8 +87,8 @@ def get_measures(storage, references, operations, # granularities_in_common granularities = [ g - for g, occurrence in six.iteritems( - collections.Counter(all_granularities)) + for g, occurrence in collections.Counter( + all_granularities).items() if occurrence == len(references) ] @@ -210,7 +209,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, v = values[0] t = times g = [granularity] * len(t) - output["aggregated"].extend(six.moves.zip(t, g, v)) + output["aggregated"].extend(zip(t, g, v)) return output else: r_output = collections.defaultdict( @@ -230,7 +229,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, v = values[i] t = times g = [granularity] * len(t) - measures = six.moves.zip(t, g, v) + measures = zip(t, g, v) if ref.resource is None: m_output[ref.name][ref.aggregation].extend(measures) else: diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 5cc65dfd1..6e23a4c8e 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -25,10 +25,9 @@ import pecan from pecan import rest import pyparsing -import six -from six.moves.urllib import parse as urllib_parse import tenacity import tooz +from urllib import parse as urllib_parse import voluptuous import werkzeug.http @@ -75,8 +74,8 @@ def abort(status_code, detail=''): if isinstance(detail, voluptuous.Invalid): detail = { 'cause': 'Invalid input', - 'reason': six.text_type(detail), - 'detail': [six.text_type(path) for path in detail.path], + 'reason': str(detail), + 'detail': [str(path) for path in detail.path], } elif isinstance(detail, Exception): detail = detail.jsonify() @@ -90,7 +89,7 @@ def flatten_dict_to_keypairs(d, separator=':'): :param d: dictionaries which may be nested :param separator: symbol between names """ - for name, value in sorted(six.iteritems(d)): + for name, value in sorted(d.items()): if isinstance(value, dict): for subname, subvalue in flatten_dict_to_keypairs(value, separator): @@ -123,12 +122,6 @@ def enforce(rule, target): def set_resp_location_hdr(location): location = '%s%s' % (pecan.request.script_name, location) - # NOTE(sileht): according the pep-3333 the headers must be - # str in py2 and py3 even this is not the same thing in both - # version - # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues - if six.PY2 and isinstance(location, six.text_type): - location = location.encode('utf-8') location = urllib_parse.quote(location) pecan.response.headers['Location'] = location @@ -198,7 +191,7 @@ def strtobool(varname, v): try: return utils.strtobool(v) except ValueError as e: - abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) + abort(400, "Unable to parse `%s': %s" % (varname, str(e))) RESOURCE_DEFAULT_PAGINATION = [u'revision_start:asc', @@ -216,11 +209,11 @@ def get_pagination_options(params, default): voluptuous.Range(min=1), voluptuous.Clamp( min=1, max=pecan.request.conf.api.max_limit)), - "marker": six.text_type, + "marker": str, voluptuous.Required("sort", default=default): voluptuous.All( voluptuous.Coerce(arg_to_list), - [six.text_type]), + [str]), }, extra=voluptuous.REMOVE_EXTRA)(params) except voluptuous.Invalid as e: abort(400, {"cause": "Argument value error", @@ -252,14 +245,14 @@ def get(self): if ap: enforce("get archive policy", ap) return ap - abort(404, six.text_type( + abort(404, str( indexer.NoSuchArchivePolicy(self.archive_policy))) @pecan.expose('json') def patch(self): ap = pecan.request.indexer.get_archive_policy(self.archive_policy) if not ap: - abort(404, six.text_type( + abort(404, str( indexer.NoSuchArchivePolicy(self.archive_policy))) enforce("update archive policy", ap) @@ -273,14 +266,14 @@ def patch(self): ap_items = [archive_policy.ArchivePolicyItem(**item) for item in body['definition']] except ValueError as e: - abort(400, six.text_type(e)) + abort(400, str(e)) try: return pecan.request.indexer.update_archive_policy( self.archive_policy, ap_items, back_window=body.get('back_window')) except indexer.UnsupportedArchivePolicyChange as e: - abort(400, six.text_type(e)) + abort(400, str(e)) @pecan.expose('json') def delete(self): @@ -291,9 +284,9 @@ def delete(self): try: pecan.request.indexer.delete_archive_policy(self.archive_policy) except indexer.NoSuchArchivePolicy as e: - abort(404, six.text_type(e)) + abort(404, str(e)) except indexer.ArchivePolicyInUse as e: - abort(400, six.text_type(e)) + abort(400, str(e)) class ArchivePoliciesController(rest.RestController): @@ -310,7 +303,7 @@ def post(self): archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES ) ArchivePolicySchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, + voluptuous.Required("name"): str, voluptuous.Required("back_window", default=0): voluptuous.All( voluptuous.Coerce(int), voluptuous.Range(min=0), @@ -327,12 +320,12 @@ def post(self): try: ap = archive_policy.ArchivePolicy.from_dict(body) except ValueError as e: - abort(400, six.text_type(e)) + abort(400, str(e)) enforce("create archive policy", ap) try: ap = pecan.request.indexer.create_archive_policy(ap) except indexer.ArchivePolicyAlreadyExists as e: - abort(409, six.text_type(e)) + abort(409, str(e)) location = "/archive_policy/" + ap.name set_resp_location_hdr(location) @@ -353,16 +346,16 @@ def _lookup(self, archive_policy_rule, *remainder): ) if apr: return ArchivePolicyRuleController(apr), remainder - abort(404, six.text_type( + abort(404, str( indexer.NoSuchArchivePolicyRule(archive_policy_rule))) @pecan.expose('json') def post(self): enforce("create archive policy rule", {}) ArchivePolicyRuleSchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, - voluptuous.Required("metric_pattern"): six.text_type, - voluptuous.Required("archive_policy_name"): six.text_type, + voluptuous.Required("name"): str, + voluptuous.Required("metric_pattern"): str, + voluptuous.Required("archive_policy_name"): str, }) body = deserialize_and_validate(ArchivePolicyRuleSchema) @@ -373,7 +366,7 @@ def post(self): body['archive_policy_name'] ) except indexer.ArchivePolicyRuleAlreadyExists as e: - abort(409, six.text_type(e)) + abort(409, str(e)) except indexer.NoSuchArchivePolicy as e: abort(400, e) @@ -400,7 +393,7 @@ def get(self): @pecan.expose('json') def patch(self): ArchivePolicyRuleSchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, + voluptuous.Required("name"): str, }) body = deserialize_and_validate(ArchivePolicyRuleSchema) enforce("update archive policy rule", {}) @@ -408,7 +401,7 @@ def patch(self): return pecan.request.indexer.update_archive_policy_rule( self.archive_policy_rule.name, body["name"]) except indexer.UnsupportedArchivePolicyRuleChange as e: - abort(400, six.text_type(e)) + abort(400, str(e)) @pecan.expose('json') def delete(self): @@ -421,7 +414,7 @@ def delete(self): self.archive_policy_rule.name ) except indexer.NoSuchArchivePolicyRule as e: - abort(404, six.text_type(e)) + abort(404, str(e)) def MeasuresListSchema(measures): @@ -437,7 +430,7 @@ def MeasuresListSchema(measures): except Exception: raise voluptuous.Invalid("unexpected measures value") - return (incoming.Measure(t, v) for t, v in six.moves.zip(times, values)) + return (incoming.Measure(t, v) for t, v in zip(times, values)) class MetricController(rest.RestController): @@ -477,7 +470,7 @@ def get_measures(self, start=None, stop=None, aggregation='mean', resample = (resample if calendar.GROUPINGS.get(resample) else utils.to_timespan(resample)) except ValueError as e: - abort(400, six.text_type(e)) + abort(400, str(e)) if granularity is None: granularity = [d.granularity @@ -502,7 +495,7 @@ def get_measures(self, start=None, stop=None, aggregation='mean', agg = self.metric.archive_policy.get_aggregation( aggregation, g) if agg is None: - abort(404, six.text_type( + abort(404, str( storage.AggregationDoesNotExist( self.metric, aggregation, g) )) @@ -526,7 +519,7 @@ def get_measures(self, start=None, stop=None, aggregation='mean', reverse=True) for timestamp, value in results[key]] except storage.AggregationDoesNotExist as e: - abort(404, six.text_type(e)) + abort(404, str(e)) except storage.MetricDoesNotExist: return [] @@ -536,7 +529,7 @@ def delete(self): try: pecan.request.indexer.delete_metric(self.metric.id) except indexer.NoSuchMetric as e: - abort(404, six.text_type(e)) + abort(404, str(e)) class MetricsController(rest.RestController): @@ -546,13 +539,13 @@ def _lookup(self, id, *remainder): try: metric_id = uuid.UUID(id) except ValueError: - abort(404, six.text_type(indexer.NoSuchMetric(id))) + abort(404, str(indexer.NoSuchMetric(id))) # Load details for ACL metrics = pecan.request.indexer.list_metrics( attribute_filter={"=": {"id": metric_id}}, details=True) if not metrics: - abort(404, six.text_type(indexer.NoSuchMetric(id))) + abort(404, str(indexer.NoSuchMetric(id))) return MetricController(metrics[0]), remainder # NOTE(jd) Define this method as it was a voluptuous schema – it's just a @@ -564,11 +557,11 @@ def MetricSchema(definition): # First basic validation schema = voluptuous.Schema({ - "archive_policy_name": six.text_type, + "archive_policy_name": str, "resource_id": functools.partial(ResourceID, creator=creator), - "name": six.text_type, + "name": str, voluptuous.Optional("unit"): - voluptuous.All(six.text_type, voluptuous.Length(max=31)), + voluptuous.All(str, voluptuous.Length(max=31)), }) definition = schema(definition) archive_policy_name = definition.get('archive_policy_name') @@ -633,7 +626,7 @@ def post(self): unit=body.get('unit'), archive_policy_name=body['archive_policy_name']) except indexer.NoSuchArchivePolicy as e: - abort(400, six.text_type(e)) + abort(400, str(e)) except indexer.NamedMetricAlreadyExists as e: abort(400, e) set_resp_location_hdr("/metric/" + str(m.id)) @@ -641,13 +634,13 @@ def post(self): return m MetricListSchema = voluptuous.Schema({ - "user_id": six.text_type, - "project_id": six.text_type, - "creator": six.text_type, - "name": six.text_type, - "id": six.text_type, - "unit": six.text_type, - "archive_policy_name": six.text_type, + "user_id": str, + "project_id": str, + "creator": str, + "name": str, + "id": str, + "unit": str, + "archive_policy_name": str, "status": voluptuous.Any("active", "delete"), }, extra=voluptuous.REMOVE_EXTRA) @@ -677,7 +670,7 @@ def get_all(cls, **kwargs): if provided_creator is not None: attr_filters.append({"=": {"creator": provided_creator}}) - for k, v in six.iteritems(filtering): + for k, v in filtering.items(): attr_filters.append({"=": {k: v}}) policy_filter = pecan.request.auth_helper.get_metric_policy_filter( @@ -698,12 +691,11 @@ def get_all(cls, **kwargs): set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) return metrics except indexer.InvalidPagination as e: - abort(400, six.text_type(e)) + abort(400, str(e)) _MetricsSchema = voluptuous.Schema({ - six.text_type: voluptuous.Any(utils.UUID, - MetricsController.MetricSchema), + str: voluptuous.Any(utils.UUID, MetricsController.MetricSchema), }) @@ -713,7 +705,7 @@ def MetricsSchema(data): # available when doing the metric validation with its own MetricSchema, # and so we can do things such as applying archive policy rules. if isinstance(data, dict): - for metric_name, metric_def in six.iteritems(data): + for metric_name, metric_def in data.items(): if isinstance(metric_def, dict): metric_def['name'] = metric_name return _MetricsSchema(data) @@ -738,16 +730,16 @@ def _lookup(self, name, *remainder): resource = pecan.request.indexer.get_resource(self.resource_type, self.resource_id) if resource: - abort(404, six.text_type(indexer.NoSuchMetric(name))) + abort(404, str(indexer.NoSuchMetric(name))) else: - abort(404, six.text_type(indexer.NoSuchResource(self.resource_id))) + abort(404, str(indexer.NoSuchResource(self.resource_id))) @pecan.expose('json') def post(self): resource = pecan.request.indexer.get_resource( self.resource_type, self.resource_id) if not resource: - abort(404, six.text_type(indexer.NoSuchResource(self.resource_id))) + abort(404, str(indexer.NoSuchResource(self.resource_id))) enforce("update resource", resource) metrics = deserialize_and_validate(MetricsSchema) try: @@ -760,11 +752,11 @@ def post(self): except (indexer.NoSuchMetric, indexer.NoSuchArchivePolicy, ValueError) as e: - abort(400, six.text_type(e)) + abort(400, str(e)) except indexer.NamedMetricAlreadyExists as e: - abort(409, six.text_type(e)) + abort(409, str(e)) except indexer.NoSuchResource as e: - abort(404, six.text_type(e)) + abort(404, str(e)) return r.metrics @@ -773,7 +765,7 @@ def get_all(self): resource = pecan.request.indexer.get_resource( self.resource_type, self.resource_id) if not resource: - abort(404, six.text_type(indexer.NoSuchResource(self.resource_id))) + abort(404, str(indexer.NoSuchResource(self.resource_id))) enforce("get resource", resource) return pecan.request.indexer.list_metrics( attribute_filter={"=": {"resource_id": self.resource_id}}) @@ -793,7 +785,7 @@ def get(self, **kwargs): resource = pecan.request.indexer.get_resource( self.resource_type, self.resource_id) if not resource: - abort(404, six.text_type(indexer.NoSuchResource(self.resource_id))) + abort(404, str(indexer.NoSuchResource(self.resource_id))) enforce("get resource", resource) @@ -810,7 +802,7 @@ def get(self, **kwargs): set_resp_link_hdr(marker, kwargs, pagination_opts) return resources except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) def etag_precondition_check(obj): @@ -866,7 +858,7 @@ def get(self): try: rt = pecan.request.indexer.get_resource_type(self._name) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) enforce("get resource type", rt) return rt @@ -878,7 +870,7 @@ def patch(self): try: rt = pecan.request.indexer.get_resource_type(self._name) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) enforce("update resource type", rt) # Ensure this is a valid jsonpatch dict @@ -891,7 +883,7 @@ def patch(self): try: rt_json_next = jsonpatch.apply_patch(rt_json_current, patch) except jsonpatch.JsonPatchException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) del rt_json_next['state'] # Validate that the whole new resource_type is valid @@ -930,7 +922,7 @@ def patch(self): self._name, add_attributes=add_attrs, del_attributes=del_attrs, update_attributes=update_attrs) except indexer.NoSuchResourceType as e: - abort(400, six.text_type(e)) + abort(400, str(e)) def create_update_attrs(self, schema, update_attrs): new_attrs = dict(map(lambda entry: (entry[0], entry[1][1]), @@ -978,13 +970,13 @@ def delete(self): try: pecan.request.indexer.get_resource_type(self._name) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) enforce("delete resource type", resource_type) try: pecan.request.indexer.delete_resource_type(self._name) except (indexer.NoSuchResourceType, indexer.ResourceTypeInUse) as e: - abort(400, six.text_type(e)) + abort(400, str(e)) class ResourceTypesController(rest.RestController): @@ -1008,7 +1000,7 @@ def post(self): try: rt = pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists as e: - abort(409, six.text_type(e)) + abort(409, str(e)) set_resp_location_hdr("/resource_type/" + rt.name) pecan.response.status = 201 return rt @@ -1019,15 +1011,15 @@ def get_all(self, **kwargs): try: return pecan.request.indexer.list_resource_types() except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) def ResourceSchema(schema): base_schema = { voluptuous.Optional('started_at'): utils.to_datetime, voluptuous.Optional('ended_at'): utils.to_datetime, - voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type), - voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type), + voluptuous.Optional('user_id'): voluptuous.Any(None, str), + voluptuous.Optional('project_id'): voluptuous.Any(None, str), voluptuous.Optional('metrics'): MetricsSchema, } base_schema.update(schema) @@ -1043,7 +1035,7 @@ def __init__(self, resource_type, id): try: self.id = utils.ResourceUUID(id, creator) except ValueError: - abort(404, six.text_type(indexer.NoSuchResource(id))) + abort(404, str(indexer.NoSuchResource(id))) self.metric = NamedMetricController(str(self.id), self._resource_type) self.history = ResourceHistoryController(str(self.id), self._resource_type) @@ -1057,14 +1049,14 @@ def get(self): etag_precondition_check(resource) etag_set_headers(resource) return resource - abort(404, six.text_type(indexer.NoSuchResource(self.id))) + abort(404, str(indexer.NoSuchResource(self.id))) @pecan.expose('json') def patch(self): resource = pecan.request.indexer.get_resource( self._resource_type, self.id, with_metrics=True) if not resource: - abort(404, six.text_type(indexer.NoSuchResource(self.id))) + abort(404, str(indexer.NoSuchResource(self.id))) enforce("update resource", resource) etag_precondition_check(resource) @@ -1091,9 +1083,9 @@ def patch(self): except (indexer.NoSuchMetric, indexer.NoSuchArchivePolicy, ValueError) as e: - abort(400, six.text_type(e)) + abort(400, str(e)) except indexer.NoSuchResource as e: - abort(404, six.text_type(e)) + abort(404, str(e)) etag_set_headers(resource) return resource @@ -1102,13 +1094,13 @@ def delete(self): resource = pecan.request.indexer.get_resource( self._resource_type, self.id) if not resource: - abort(404, six.text_type(indexer.NoSuchResource(self.id))) + abort(404, str(indexer.NoSuchResource(self.id))) enforce("delete resource", resource) etag_precondition_check(resource) try: pecan.request.indexer.delete_resource(self.id) except indexer.NoSuchResource as e: - abort(404, six.text_type(e)) + abort(404, str(e)) def schema_for(resource_type): @@ -1128,7 +1120,7 @@ def ResourceID(value, creator): :return: A tuple (original_resource_id, resource_id) """ - return (six.text_type(value), ResourceUUID(value, creator)) + return (str(value), ResourceUUID(value, creator)) class ResourcesController(rest.RestController): @@ -1165,12 +1157,12 @@ def post(self): except (ValueError, indexer.NoSuchMetric, indexer.NoSuchArchivePolicy) as e: - abort(400, six.text_type(e)) + abort(400, str(e)) except indexer.ResourceAlreadyExists as e: - abort(409, six.text_type(e)) + abort(409, str(e)) set_resp_location_hdr("/resource/" + self._resource_type + "/" - + six.text_type(resource.id)) + + str(resource.id)) etag_set_headers(resource) pecan.response.status = 201 return resource @@ -1204,7 +1196,7 @@ def get_all(self, **kwargs): set_resp_link_hdr(marker, kwargs, pagination_opts) return [r.jsonify(json_attrs) for r in resources] except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) @pecan.expose('json') def delete(self, **kwargs): @@ -1234,7 +1226,7 @@ def delete(self, **kwargs): delete_num = pecan.request.indexer.delete_resources( self._resource_type, attribute_filter=attr_filter) except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) return {"deleted": delete_num} @@ -1252,7 +1244,7 @@ def _lookup(self, resource_type, *remainder): try: pecan.request.indexer.get_resource_type(resource_type) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) return ResourcesController(resource_type), remainder @@ -1346,10 +1338,10 @@ def ResourceSearchSchema(v): # NOTE(sileht): indexer will cast this type to the real attribute # type, here we just want to be sure this is not a dict or a list ResourceSearchSchemaAttributeValue = voluptuous.Any( - six.text_type, float, int, bool, None) + str, float, int, bool, None) -NotIDKey = voluptuous.All(six.text_type, voluptuous.NotIn(["id"])) +NotIDKey = voluptuous.All(str, voluptuous.NotIn(["id"])) def _ResourceSearchSchema(): @@ -1447,7 +1439,7 @@ def post(self, **kwargs): try: return [r.jsonify(json_attrs) for r in self._search(**kwargs)] except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) class SearchResourceController(rest.RestController): @@ -1456,7 +1448,7 @@ def _lookup(self, resource_type, *remainder): try: pecan.request.indexer.get_resource_type(resource_type) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) return SearchResourceTypeController(resource_type), remainder @@ -1641,7 +1633,7 @@ def post(self, metric_id, start=None, stop=None, aggregation='mean', try: predicate = self.MeasureQuery(query) except self.MeasureQuery.InvalidQuery as e: - abort(400, six.text_type(e)) + abort(400, str(e)) if granularity is not None: granularity = sorted( @@ -1676,11 +1668,11 @@ def post(self, metric_id, start=None, stop=None, aggregation='mean', return { str(metric.id): [ (timestamp, aggregation.granularity, value) - for aggregation, ts in six.iteritems(aggregations_and_ts) + for aggregation, ts in aggregations_and_ts.items() for timestamp, value in ts if predicate(value) ] - for metric, aggregations_and_ts in six.iteritems(timeseries) + for metric, aggregations_and_ts in timeseries.items() } @@ -1691,9 +1683,9 @@ def BackwardCompatibleMeasuresList(v): v = voluptuous.Schema( voluptuous.Any(MeasuresListSchema, {voluptuous.Optional("archive_policy_name"): - six.text_type, + str, voluptuous.Optional("unit"): - six.text_type, + str, "measures": MeasuresListSchema}), required=True)(v) if isinstance(v, dict): @@ -1708,7 +1700,7 @@ def post(self, create_metrics=False): pecan.request) MeasuresBatchSchema = voluptuous.Schema( {functools.partial(ResourceID, creator=creator): - {six.text_type: self.BackwardCompatibleMeasuresList}}) + {str: self.BackwardCompatibleMeasuresList}}) body = deserialize_and_validate(MeasuresBatchSchema) known_metrics = [] @@ -1762,13 +1754,13 @@ def post(self, create_metrics=False): already_exists_names.append(e.metric_name) except indexer.NoSuchResource: unknown_resources.append({ - 'resource_id': six.text_type(resource_id), + 'resource_id': str(resource_id), 'original_resource_id': original_resource_id}) break except indexer.IndexerException as e: # This catch NoSuchArchivePolicy, which is unlikely # be still possible - abort(400, six.text_type(e)) + abort(400, str(e)) else: known_metrics.append(m) @@ -1784,7 +1776,7 @@ def post(self, create_metrics=False): elif len(names) != len(metrics): unknown_metrics.extend( - ["%s/%s" % (six.text_type(resource_id), m) + ["%s/%s" % (str(resource_id), m) for m in names if m not in known_names]) known_metrics.extend(metrics) @@ -1828,7 +1820,7 @@ def post(self): if len(metrics) != len(body): missing_metrics = sorted(set(body) - set(m.id for m in metrics)) abort(400, "Unknown metrics: %s" % ", ".join( - six.moves.map(str, missing_metrics))) + map(str, missing_metrics))) for metric in metrics: enforce("post measures", metric) @@ -1868,7 +1860,7 @@ def post(self, start=None, stop=None, aggregation='mean', except indexer.InvalidPagination: abort(400, "Invalid groupby attribute") except indexer.IndexerException as e: - abort(400, six.text_type(e)) + abort(400, str(e)) if resources is None: return [] @@ -1936,7 +1928,7 @@ def validate_qs(start=None, stop=None, granularity=None, except ValueError as e: abort(400, {"cause": "Argument value error", "detail": "granularity", - "reason": six.text_type(e)}) + "reason": str(e)}) if fill is not None: try: @@ -1964,7 +1956,7 @@ def _lookup(self, object_type, resource_type, key, metric_name, try: pecan.request.indexer.get_resource_type(resource_type) except indexer.NoSuchResourceType as e: - abort(404, six.text_type(e)) + abort(404, str(e)) return AggregationResourceController(resource_type, metric_name), remainder @@ -1995,7 +1987,7 @@ def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, resample = (resample if calendar.GROUPINGS.get(resample) else utils.to_timespan(resample)) except ValueError as e: - abort(400, six.text_type(e)) + abort(400, str(e)) if granularity is None: granularities = ( @@ -2006,8 +1998,8 @@ def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, # granularities_in_common granularity = [ g - for g, occurrence in six.iteritems( - collections.Counter(granularities)) + for g, occurrence in collections.Counter( + granularities).items() if occurrence == len(metrics) ] @@ -2023,7 +2015,7 @@ def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, agg = metric.archive_policy.get_aggregation( aggregation, g) if agg is None: - abort(404, six.text_type( + abort(404, str( storage.AggregationDoesNotExist(metric, aggregation, g) )) aggregations.add(agg) @@ -2088,7 +2080,7 @@ def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, except exceptions.UnAggregableTimeseries as e: abort(400, e) except storage.AggregationDoesNotExist as e: - abort(404, six.text_type(e)) + abort(404, str(e)) MetricIDsSchema = [utils.UUID] @@ -2107,15 +2099,15 @@ def get_metric(self, metric=None, start=None, stop=None, self._workaround_pecan_issue_88() metric_ids = deserialize_and_validate(self.MetricIDsSchema) - metric_ids = [six.text_type(m) for m in metric_ids] + metric_ids = [str(m) for m in metric_ids] # Check RBAC policy metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) + - set(str(m.id) for m in metrics)) if missing_metric_ids: # Return one of the missing one in the error - abort(404, six.text_type(storage.MetricDoesNotExist( + abort(404, str(storage.MetricDoesNotExist( missing_metric_ids.pop()))) return self.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, @@ -2167,10 +2159,10 @@ def get(details=True): member.decode() for member in members ] members_data = {} - for member, cap in six.moves.zip(members, caps): + for member, cap in zip(members, caps): caps_data = { - six.ensure_str(k): v - for k, v in six.iteritems(cap.get()) + str(k): v + for k, v in cap.get().items() } members_data[member.decode()] = caps_data report_dict['metricd']['statistics'] = members_data diff --git a/gnocchi/rest/exceptions.py b/gnocchi/rest/exceptions.py index 1cc5eb88a..43005ff4b 100644 --- a/gnocchi/rest/exceptions.py +++ b/gnocchi/rest/exceptions.py @@ -13,12 +13,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import six class UnableToDecodeBody(Exception): def __init__(self, exception, body): - self.reason = six.text_type(exception) + self.reason = str(exception) self.body = body super(UnableToDecodeBody, self).__init__(body) diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py index 3af25a6c7..dd6b723ec 100644 --- a/gnocchi/rest/influxdb.py +++ b/gnocchi/rest/influxdb.py @@ -27,7 +27,6 @@ import pecan from pecan import rest import pyparsing -import six import tenacity try: import uwsgi @@ -211,7 +210,7 @@ def post_write(self, db="influxdb"): ",".join(("%s=%s" % (k, tags[k])) for k in sorted(tags))) - for field_name, field_value in six.iteritems(fields): + for field_name, field_value in fields.items(): if isinstance(field_value, str): # We do not support field value that are not numerical continue @@ -229,8 +228,7 @@ def post_write(self, db="influxdb"): incoming.Measure(timestamp, field_value)) measures_to_batch = {} - for resource_name, metrics_and_measures in six.iteritems( - resources): + for resource_name, metrics_and_measures in resources.items(): resource_name = resource_name resource_id = utils.ResourceUUID( resource_name, creator=creator) diff --git a/gnocchi/service.py b/gnocchi/service.py index cdcc9ec06..c3f7dc77c 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -19,7 +19,7 @@ import daiquiri from oslo_config import cfg from oslo_db import options as db_options -from six.moves.urllib import parse as urlparse +from urllib import parse as urlparse import gnocchi from gnocchi import archive_policy diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 95bb3cc8a..42b6deeae 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -21,7 +21,6 @@ import trollius as asyncio import daiquiri from oslo_config import cfg -import six from gnocchi import incoming from gnocchi import indexer @@ -95,9 +94,9 @@ def treat_metric(self, metric_name, metric_type, value, sampling): def flush(self): for metric_name, measure in itertools.chain( - six.iteritems(self.gauges), - six.iteritems(self.counters), - six.iteritems(self.times)): + self.gauges.items(), + self.counters.items(), + self.times.items()): try: # NOTE(jd) We avoid considering any concurrency here as statsd # is not designed to run in parallel and we do not envision diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 936710a73..cc3b5fc87 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -21,7 +21,6 @@ import daiquiri import numpy from oslo_config import cfg -import six from gnocchi import carbonara from gnocchi import utils @@ -143,9 +142,9 @@ def _get_splits(self, metrics_aggregations_keys, version=3): lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)), # noqa ((metric, key, aggregation, version) for metric, aggregations_and_keys - in six.iteritems(metrics_aggregations_keys) + in metrics_aggregations_keys.items() for aggregation, keys - in six.iteritems(aggregations_and_keys) + in aggregations_and_keys.items() for key in keys)): results[metric][aggregation].append(split) return results @@ -174,7 +173,7 @@ def _get_or_create_unaggregated_timeseries(self, metrics, version=3): :param version: The storage format version number. """ return dict( - six.moves.zip( + zip( metrics, self.MAP_METHOD( utils.return_none_on_failure( @@ -233,7 +232,7 @@ def _store_metric_splits(self, metrics_keys_aggregations_data_offset, self._store_metric_splits_unbatched, ((metric, key, aggregation, data, offset, version) for metric, keys_aggregations_data_offset - in six.iteritems(metrics_keys_aggregations_data_offset) + in metrics_keys_aggregations_data_offset.items() for key, aggregation, data, offset in keys_aggregations_data_offset)) @@ -268,7 +267,7 @@ def _list_split_keys(self, metrics_and_aggregations, version=3): for metric in metrics)) return { metric: results - for metric, results in six.moves.zip(metrics, r) + for metric, results in zip(metrics, r) } @staticmethod @@ -293,8 +292,8 @@ def get_aggregated_measures(self, metrics_and_aggregations, """ metrics_aggs_keys = self._list_split_keys(metrics_and_aggregations) - for metric, aggregations_keys in six.iteritems(metrics_aggs_keys): - for aggregation, keys in six.iteritems(aggregations_keys): + for metric, aggregations_keys in metrics_aggs_keys.items(): + for aggregation, keys in aggregations_keys.items(): start = ( carbonara.SplitKey.from_timestamp_and_sampling( from_timestamp, aggregation.granularity) @@ -316,7 +315,7 @@ def get_aggregated_measures(self, metrics_and_aggregations, metrics_aggs_keys) results = collections.defaultdict(dict) - for metric, aggregations in six.iteritems(metrics_and_aggregations): + for metric, aggregations in metrics_and_aggregations.items(): for aggregation in aggregations: ts = carbonara.AggregatedTimeSerie.from_timeseries( metrics_aggregations_splits[metric][aggregation], @@ -350,9 +349,9 @@ def _get_splits_and_unserialize(self, metrics_aggregations_keys): raw_measures = self._get_splits(metrics_aggregations_keys) results = collections.defaultdict( lambda: collections.defaultdict(list)) - for metric, aggregations_and_raws in six.iteritems(raw_measures): - for aggregation, raws in six.iteritems(aggregations_and_raws): - for key, raw in six.moves.zip( + for metric, aggregations_and_raws in raw_measures.items(): + for aggregation, raws in aggregations_and_raws.items(): + for key, raw in zip( metrics_aggregations_keys[metric][aggregation], raws): try: ts = carbonara.AggregatedTimeSerie.unserialize( @@ -386,10 +385,10 @@ def _update_metric_splits(self, metrics_keys_aggregations_splits): lambda: collections.defaultdict(list)) for metric, (keys_and_aggregations_and_splits, - oldest_mutable_timestamp) in six.iteritems( - metrics_keys_aggregations_splits): - for (key, aggregation), split in six.iteritems( - keys_and_aggregations_and_splits): + oldest_mutable_timestamp) in ( + metrics_keys_aggregations_splits.items()): + for (key, aggregation), split in ( + keys_and_aggregations_and_splits.items()): # NOTE(jd) We write the full split only if the driver works # that way (self.WRITE_FULL) or if the oldest_mutable_timestamp # is out of range. @@ -403,11 +402,11 @@ def _update_metric_splits(self, metrics_keys_aggregations_splits): existing_data = self._get_splits_and_unserialize(keys_to_get) for metric, (keys_and_aggregations_and_splits, - oldest_mutable_timestamp) in six.iteritems( - metrics_keys_aggregations_splits): - for aggregation, existing_list in six.iteritems( - existing_data[metric]): - for key, split, existing in six.moves.zip( + oldest_mutable_timestamp) in ( + metrics_keys_aggregations_splits.items()): + for aggregation, existing_list in ( + existing_data[metric].items()): + for key, split, existing in zip( keys_to_get[metric][aggregation], splits_to_rewrite[metric][aggregation], existing_list): @@ -416,8 +415,8 @@ def _update_metric_splits(self, metrics_keys_aggregations_splits): (key, split.aggregation)] = existing keys_aggregations_data_offset = [] - for (key, aggregation), split in six.iteritems( - keys_and_aggregations_and_splits): + for (key, aggregation), split in ( + keys_and_aggregations_and_splits.items()): # Do not store the split if it's empty. if split: offset, data = split.serialize( @@ -461,7 +460,7 @@ def _compute_split_operations(self, metric, aggregations_and_timeseries, aggregations_needing_list_of_keys = set() oldest_values = {} - for aggregation, ts in six.iteritems(aggregations_and_timeseries): + for aggregation, ts in aggregations_and_timeseries.items(): # Don't do anything if the timeseries is empty if not ts: continue @@ -497,7 +496,7 @@ def _compute_split_operations(self, metric, aggregations_and_timeseries, keys_and_split_to_store = {} deleted_keys = set() - for aggregation, ts in six.iteritems(aggregations_and_timeseries): + for aggregation, ts in aggregations_and_timeseries.items(): # Don't do anything if the timeseries is empty if not ts: continue @@ -577,7 +576,7 @@ def _delete_metric_splits(self, metrics_keys_aggregations, version=3): utils.return_none_on_failure(self._delete_metric_splits_unbatched), ((metric, key, aggregation) for metric, keys_and_aggregations - in six.iteritems(metrics_keys_aggregations) + in metrics_keys_aggregations.items() for key, aggregation in keys_and_aggregations)) def add_measures_to_metrics(self, metrics_and_measures): @@ -598,7 +597,7 @@ def add_measures_to_metrics(self, metrics_and_measures): splits_to_delete = {} splits_to_update = {} - for metric, measures in six.iteritems(metrics_and_measures): + for metric, measures in metrics_and_measures.items(): measures = numpy.sort(measures, order='timestamps') agg_methods = list(metric.archive_policy.aggregation_methods) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 91793cd76..9b9f93c44 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -17,7 +17,6 @@ import collections from oslo_config import cfg -import six from gnocchi import carbonara from gnocchi.common import ceph @@ -84,8 +83,8 @@ def _create_metric(self, metric): def _store_metric_splits(self, metrics_keys_aggregations_data_offset, version=3): with rados.WriteOpCtx() as op: - for metric, keys_aggregations_data_offset in six.iteritems( - metrics_keys_aggregations_data_offset): + for metric, keys_aggregations_data_offset in ( + metrics_keys_aggregations_data_offset.items()): for key, agg, data, offset in keys_aggregations_data_offset: name = self._get_object_name( metric, key, agg.method, version) @@ -99,8 +98,8 @@ def _store_metric_splits(self, metrics_keys_aggregations_data_offset, def _delete_metric_splits(self, metrics_keys_aggregations, version=3): with rados.WriteOpCtx() as op: - for metric, keys_and_aggregations in six.iteritems( - metrics_keys_aggregations): + for metric, keys_and_aggregations in ( + metrics_keys_aggregations.items()): names = tuple( self._get_object_name( metric, key, aggregation.method, version) @@ -189,7 +188,7 @@ def _list_split_keys_unbatched(self, metric, aggregations, version=3): k_methods = zipped[3] k_granularities = list(map(utils.to_timespan, zipped[4])) - for timestamp, method, granularity in six.moves.zip( + for timestamp, method, granularity in zip( k_timestamps, k_methods, k_granularities): for aggregation in aggregations: if (aggregation.method == method diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 1f63c0054..4a341d81a 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -26,7 +26,6 @@ import daiquiri from oslo_config import cfg -import six from gnocchi import carbonara from gnocchi import storage @@ -205,7 +204,7 @@ def _list_split_keys_unbatched(self, metric, aggregations, version=3): k_timestamps = utils.to_timestamps(zipped[0]) k_granularities = list(map(utils.to_timespan, zipped[1])) grouped_aggregations = list(grouped_aggregations) - for timestamp, granularity in six.moves.zip( + for timestamp, granularity in zip( k_timestamps, k_granularities): for agg in grouped_aggregations: if granularity == agg.granularity: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index d65027af3..4936cf982 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -15,8 +15,6 @@ # under the License. import collections -import six - from gnocchi import carbonara from gnocchi.common import redis from gnocchi import storage @@ -93,7 +91,7 @@ def _get_or_create_unaggregated_timeseries(self, metrics, version=3): # Replace "" by None metric: data or None for metric, (created, data) - in six.moves.zip(metrics, utils.grouper(pipe.execute(), 2)) + in zip(metrics, utils.grouper(pipe.execute(), 2)) } return ts @@ -125,7 +123,7 @@ def _list_split_keys(self, metrics_and_aggregations, version=3): start + 1:start + 1 + number_of_aggregations ] start += 1 + number_of_aggregations # 1 for metric_exists_p - for aggregation, k in six.moves.zip( + for aggregation, k in zip( aggregations, keys_for_aggregations): if not k: keys[metric][aggregation] = set() @@ -137,14 +135,14 @@ def _list_split_keys(self, metrics_and_aggregations, version=3): carbonara.SplitKey(timestamp, sampling=granularity) for timestamp, granularity - in six.moves.zip(timestamps, granularities) + in zip(timestamps, granularities) } return keys def _delete_metric_splits(self, metrics_keys_aggregations, version=3): pipe = self._client.pipeline(transaction=False) - for metric, keys_and_aggregations in six.iteritems( - metrics_keys_aggregations): + for metric, keys_and_aggregations in ( + metrics_keys_aggregations.items()): metric_key = self._metric_key(metric) for key, aggregation in keys_and_aggregations: pipe.hdel(metric_key, self._aggregated_field_for_split( @@ -154,8 +152,8 @@ def _delete_metric_splits(self, metrics_keys_aggregations, version=3): def _store_metric_splits(self, metrics_keys_aggregations_data_offset, version=3): pipe = self._client.pipeline(transaction=False) - for metric, keys_aggs_data_offset in six.iteritems( - metrics_keys_aggregations_data_offset): + for metric, keys_aggs_data_offset in ( + metrics_keys_aggregations_data_offset.items()): metric_key = self._metric_key(metric) for key, aggregation, data, offset in keys_aggs_data_offset: key = self._aggregated_field_for_split( @@ -170,9 +168,9 @@ def _get_splits(self, metrics_aggregations_keys, version=3): # Use a list of metric and aggregations with a constant sorting metrics_aggregations = [ (metric, aggregation) - for metric, aggregation_and_keys in six.iteritems( - metrics_aggregations_keys) - for aggregation, keys in six.iteritems(aggregation_and_keys) + for metric, aggregation_and_keys in ( + metrics_aggregations_keys.items()) + for aggregation, keys in aggregation_and_keys.items() # Do not send any fetch request if keys is empty if keys ] @@ -188,7 +186,7 @@ def _get_splits(self, metrics_aggregations_keys, version=3): results = collections.defaultdict( lambda: collections.defaultdict(list)) - for (metric, aggregation), result in six.moves.zip( + for (metric, aggregation), result in zip( metrics_aggregations, pipe.execute()): results[metric][aggregation] = result diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 509113950..4ade3855a 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -17,7 +17,6 @@ import collections from oslo_config import cfg -import six from gnocchi import carbonara from gnocchi.common import swift @@ -184,7 +183,7 @@ def _list_split_keys_unbatched(self, metric, aggregations, version=3): k_methods = zipped[1] k_granularities = list(map(utils.to_timespan, zipped[2])) - for timestamp, method, granularity in six.moves.zip( + for timestamp, method, granularity in zip( k_timestamps, k_methods, k_granularities): for aggregation in aggregations: if (aggregation.method == method diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 217b21f8b..c059dd4a6 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -24,8 +24,7 @@ import daiquiri import fixtures import numpy -import six -from six.moves.urllib.parse import unquote +from urllib.parse import unquote try: from swiftclient import exceptions as swexc except ImportError: @@ -59,7 +58,7 @@ def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.NotImplementedError as e: - raise testcase.TestSkipped(six.text_type(e)) + raise testcase.TestSkipped(str(e)) return skip_if_not_implemented @@ -85,7 +84,7 @@ def get_container(self, container, delimiter=None, files = [] directories = set() - for k, v in six.iteritems(container.copy()): + for k, v in container.copy().items(): if path and not k.startswith(path): continue @@ -221,8 +220,7 @@ def setUp(self): self.useFixture(CaptureOutput()) -@six.add_metaclass(SkipNotImplementedMeta) -class TestCase(BaseTestCase): +class TestCase(BaseTestCase, metaclass=SkipNotImplementedMeta): REDIS_DB_INDEX = 0 REDIS_DB_LOCK = threading.Lock() @@ -303,7 +301,7 @@ def setUp(self): self.index.upgrade() self.archive_policies = self.ARCHIVE_POLICIES.copy() - for name, ap in six.iteritems(self.archive_policies): + for name, ap in self.archive_policies.items(): # Create basic archive policies try: self.index.create_archive_policy(ap) diff --git a/gnocchi/tests/functional_live/test_gabbi_live.py b/gnocchi/tests/functional_live/test_gabbi_live.py index aeed07a88..b2c8ca80a 100644 --- a/gnocchi/tests/functional_live/test_gabbi_live.py +++ b/gnocchi/tests/functional_live/test_gabbi_live.py @@ -18,7 +18,7 @@ import os from gabbi import driver -import six.moves.urllib.parse as urlparse +import urllib.parse as urlparse TESTS_DIR = 'gabbits' diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index e3de27131..b61d815ce 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -17,7 +17,6 @@ import fixtures import oslo_db.exception from oslo_db.sqlalchemy import test_migrations -import six import sqlalchemy.schema import sqlalchemy_utils from unittest import mock @@ -32,10 +31,9 @@ class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta): pass -class ModelsMigrationsSync( - six.with_metaclass(ABCSkip, - base.TestCase, - test_migrations.ModelsMigrationsSync)): +class ModelsMigrationsSync(base.TestCase, + test_migrations.ModelsMigrationsSync, + metaclass=ABCSkip): def setUp(self): super(ModelsMigrationsSync, self).setUp() diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 680140fd7..f50b56fcb 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -21,7 +21,6 @@ import fixtures import iso8601 import numpy -import six from gnocchi import carbonara from gnocchi.tests import base @@ -632,7 +631,7 @@ def test_no_truncation(self): ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie() - for i in six.moves.range(1, 11): + for i in range(1, 11): tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, i, i), float(i))], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), @@ -853,8 +852,8 @@ def test_split(self): points = 100000 ts = carbonara.TimeSerie.from_data( timestamps=list(map(datetime.datetime.utcfromtimestamp, - six.moves.range(points))), - values=list(six.moves.range(points))) + range(points))), + values=list(range(points))) agg = self._resample(ts, sampling, 'mean') grouped_points = list(agg.split()) @@ -876,8 +875,8 @@ def test_from_timeseries(self): points = 100000 ts = carbonara.TimeSerie.from_data( timestamps=list(map(datetime.datetime.utcfromtimestamp, - six.moves.range(points))), - values=list(six.moves.range(points))) + range(points))), + values=list(range(points))) agg = self._resample(ts, sampling, 'mean') split = [t[1] for t in list(agg.split())] diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 392732980..3aa249b7c 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -26,7 +26,6 @@ import fixtures import iso8601 from keystonemiddleware import fixture as ksm_fixture -import six import testscenarios from testtools import testcase from unittest import mock @@ -295,7 +294,7 @@ def test_list_archive_policy(self): # Transform list to set for ap in aps: ap['aggregation_methods'] = set(ap['aggregation_methods']) - for name, ap in six.iteritems(self.archive_policies): + for name, ap in self.archive_policies.items(): apj = ap.jsonify() apj['definition'] = [ archive_policy.ArchivePolicyItem(**d).jsonify() @@ -1144,7 +1143,7 @@ def test_patch_resource_attributes(self, utcnow): result = json.loads(response.text) presult = json.loads(presponse.text) self.assertEqual(result, presult) - for k, v in six.iteritems(self.patchable_attributes): + for k, v in self.patchable_attributes.items(): self.assertEqual(v, result[k]) self.assertIsNone(result['revision_end']) self.assertEqual(result['revision_start'], @@ -1162,7 +1161,7 @@ def test_patch_resource_attributes(self, utcnow): self.assertEqual(result, history[1]) h = history[0] - for k, v in six.iteritems(self.attributes): + for k, v in self.attributes.items(): self.assertEqual(v, h[k]) self.assertEqual(h['revision_end'], "2014-01-02T06:48:00+00:00") @@ -1262,7 +1261,7 @@ def test_delete_resource_with_metrics(self): "/v1/metric", params={'archive_policy_name': "high"}) metric_id = json.loads(metric.text)['id'] - metric_name = six.text_type(uuid.uuid4()) + metric_name = str(uuid.uuid4()) self.attributes['metrics'] = {metric_name: metric_id} self.app.get("/v1/metric/" + metric_id, status=200) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 0d9238cec..38f164880 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -17,7 +17,6 @@ import uuid import numpy -import six.moves from unittest import mock from gnocchi import archive_policy @@ -232,7 +231,7 @@ def test_measures_reporting_format(self): def test_measures_reporting(self): m2, __ = self._create_metric('medium') - for i in six.moves.range(60): + for i in range(60): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69), ]) @@ -253,7 +252,7 @@ def test_measures_reporting(self): def test_get_aggregated_measures(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) - for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + for i in range(0, 60) for j in range(0, 60)]) self.trigger_processing([self.metric]) aggregations = self.metric.archive_policy.aggregations @@ -271,11 +270,11 @@ def test_get_aggregated_measures(self): def test_get_aggregated_measures_multiple(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) - for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + for i in range(0, 60) for j in range(0, 60)]) m2, __ = self._create_metric('medium') self.incoming.add_measures(m2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) - for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + for i in range(0, 60) for j in range(0, 60)]) self.trigger_processing([self.metric, m2]) aggregations = self.metric.archive_policy.aggregations @@ -300,7 +299,7 @@ def test_add_measures_big(self): m, __ = self._create_metric('high') self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) - for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + for i in range(0, 60) for j in range(0, 60)]) self.trigger_processing([m]) aggregations = ( @@ -316,7 +315,7 @@ def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) - for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] + for i in range(2) for j in range(0, 60, 2)] self.incoming.add_measures(m.id, measures) self.trigger_processing([m]) @@ -331,7 +330,7 @@ def test_add_measures_update_subset_split(self): for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] - for metric, key_agg_data_offset in six.iteritems(args[0]): + for metric, key_agg_data_offset in args[0].items(): if metric.id == m_sql.id: for key, aggregation, data, offset in key_agg_data_offset: if (key.sampling == numpy.timedelta64(1, 'm') @@ -343,7 +342,7 @@ def test_add_measures_update_subset(self): m, m_sql = self._create_metric('medium') measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) - for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] + for i in range(2) for j in range(0, 60, 2)] self.incoming.add_measures(m.id, measures) self.trigger_processing([m]) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index ea7c1bf0d..ee2f52820 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -17,6 +17,7 @@ import datetime import distutils.util import errno +import functools import itertools import multiprocessing import os @@ -27,7 +28,6 @@ import iso8601 import numpy import pytimeparse -import six from stevedore import driver import tenacity import time @@ -53,11 +53,6 @@ def ResourceUUID(value, creator): if len(value) <= 255: if creator is None: creator = "\x00" - # value/creator must be str (unicode) in Python 3 and str (bytes) - # in Python 2. It's not logical, I know. - if six.PY2: - value = value.encode('utf-8') - creator = creator.encode('utf-8') return uuid.uuid5(RESOURCE_ID_NAMESPACE, value + "\x00" + creator) raise ValueError( @@ -320,7 +315,7 @@ def return_none_on_failure(f): except AttributeError: fname = f.__name__ - @six.wraps(f) + @functools.wraps(f) def _return_none_on_failure(*args, **kwargs): try: return f(*args, **kwargs) @@ -354,7 +349,7 @@ def retry_on_exception_and_log(msg): def is_resource_revision_needed(resource, request_attributes): - for k, v in six.iteritems(request_attributes): + for k, v in request_attributes.items(): if not hasattr(resource, k): continue diff --git a/setup.cfg b/setup.cfg index 8b40c0e51..54733c6a4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,7 +36,6 @@ install_requires = futures; python_version < '3' jsonpatch cotyledon>=1.5.0 - six stevedore ujson voluptuous>=0.8.10 diff --git a/setup.py b/setup.py index e95fd038c..f53f5885b 100755 --- a/setup.py +++ b/setup.py @@ -47,7 +47,6 @@ """ -# Can't use six in this file it's too early in the bootstrap process PY3 = sys.version_info >= (3,) diff --git a/tools/measures_injector.py b/tools/measures_injector.py index 6a188256f..f79d5801f 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -18,7 +18,6 @@ from concurrent import futures from oslo_config import cfg -import six from gnocchi import incoming from gnocchi import indexer @@ -45,15 +44,15 @@ def todo(): creator=conf.creator, archive_policy_name=conf.archive_policy_name) - for _ in six.moves.range(conf.batch_of_measures): + for _ in range(conf.batch_of_measures): measures = [ incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), random.random()) - for __ in six.moves.range(conf.measures_per_batch)] + for __ in range(conf.measures_per_batch)] instore.add_measures(metric, measures) with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor: - for m in six.moves.range(conf.metrics): + for m in range(conf.metrics): executor.submit(todo)