diff --git a/tests/testlib/crawler.py b/tests/testlib/crawler.py index 9d3f14a1d28..2c8f1e2c687 100644 --- a/tests/testlib/crawler.py +++ b/tests/testlib/crawler.py @@ -211,13 +211,11 @@ async def crawl(self, max_tasks: int, max_url_batch_size: int = 100) -> None: """Crawl through URLs using simultaneously using tasks / coroutines. A group of tasks / coroutines is added every `rate_create_crawl_task` seconds. - Crawling stop when URLs are not found for last `memory_size_urls_exist` iterations - (`rate_create_crawl_task` x `memory_size_urls_exist` seconds). + Crawling stop when URLs are not found for last `memory_size_urls_exist` iterations. debug-mode: Crawling URLs stop when at least `--max-urls` number of URLs have been crawled. """ - rate_create_crawl_task = 0.1 # seconds - memory_size_urls_exist = 100 # iterations + memory_size_urls_exist = 25 # iterations search_limited_urls: bool = self._max_urls > 0 # special-case if search_limited_urls and self._max_urls < max_tasks: @@ -256,9 +254,6 @@ async def crawl(self, max_tasks: int, max_url_batch_size: int = 100) -> None: logger.info("No more URLs to crawl. Stopping ...") # ---- - # ensure rate of URL collection in `self._todos` > rate of new tasks added - time.sleep(rate_create_crawl_task) - async def setup_checkmk_context( self, browser: playwright.async_api.Browser ) -> playwright.async_api.BrowserContext: