Rotate URLs in tests script and increase timeout

The tests fail sometimes with timeout for fetching the wikipedia URL.
Try to reduce the chance it happens by increasing the timeout and
rotating URLs.
This commit is contained in:
lwthiker
2022-07-05 08:20:47 +03:00
parent 9d9e393d0e
commit d417eb5c5c

View File

@@ -2,6 +2,7 @@ import os
import io import io
import re import re
import sys import sys
import random
import asyncio import asyncio
import logging import logging
import pathlib import pathlib
@@ -131,7 +132,12 @@ class TestImpersonation:
TEST_URLS = [ TEST_URLS = [
"https://www.wikimedia.org", "https://www.wikimedia.org",
"https://www.wikipedia.org" "https://www.wikipedia.org",
"https://www.archive.org",
"https://www.mozilla.org/en-US",
"https://www.apache.org",
"https://www.kernel.org",
"https://git-scm.com"
] ]
# List of binaries and their expected signatures # List of binaries and their expected signatures
@@ -251,6 +257,11 @@ class TestImpersonation:
) )
] ]
@pytest.fixture
def test_urls(self):
# Shuffle TEST_URLS randomly
return random.sample(self.TEST_URLS, k=len(self.TEST_URLS))
@pytest.fixture @pytest.fixture
def tcpdump(self, pytestconfig): def tcpdump(self, pytestconfig):
"""Initialize a sniffer to capture curl's traffic.""" """Initialize a sniffer to capture curl's traffic."""
@@ -368,7 +379,7 @@ class TestImpersonation:
args.extend(urls) args.extend(urls)
curl = subprocess.Popen(args, env=env) curl = subprocess.Popen(args, env=env)
return curl.wait(timeout=10) return curl.wait(timeout=15)
def _extract_client_hello(self, pcap: bytes) -> List[bytes]: def _extract_client_hello(self, pcap: bytes) -> List[bytes]:
"""Find and return the Client Hello TLS record from a pcap. """Find and return the Client Hello TLS record from a pcap.
@@ -448,7 +459,8 @@ class TestImpersonation:
env_vars, env_vars,
ld_preload, ld_preload,
browser_signatures, browser_signatures,
expected_signature): expected_signature,
test_urls):
""" """
Check that curl's TLS signature is identical to that of a Check that curl's TLS signature is identical to that of a
real browser. real browser.
@@ -471,10 +483,11 @@ class TestImpersonation:
pytestconfig.getoption("install_dir"), "lib", ld_preload pytestconfig.getoption("install_dir"), "lib", ld_preload
)) ))
test_urls = test_urls[0:2]
ret = self._run_curl(curl_binary, ret = self._run_curl(curl_binary,
env_vars=env_vars, env_vars=env_vars,
extra_args=None, extra_args=None,
urls=self.TEST_URLS) urls=test_urls)
assert ret == 0 assert ret == 0
try: try:
@@ -494,7 +507,7 @@ class TestImpersonation:
client_hellos = self._extract_client_hello(pcap) client_hellos = self._extract_client_hello(pcap)
# A client hello message for each URL # A client hello message for each URL
assert len(client_hellos) == len(self.TEST_URLS) assert len(client_hellos) == len(test_urls)
logging.debug(f"Found {len(client_hellos)} Client Hello messages, " logging.debug(f"Found {len(client_hellos)} Client Hello messages, "
f"comparing to signature '{expected_signature}'") f"comparing to signature '{expected_signature}'")
@@ -572,7 +585,8 @@ class TestImpersonation:
curl_binary, curl_binary,
env_vars, env_vars,
ld_preload, ld_preload,
expected_signature): expected_signature,
test_urls):
""" """
Ensure the output of curl-impersonate is correct, i.e. that compressed Ensure the output of curl-impersonate is correct, i.e. that compressed
responses are decoded correctly. responses are decoded correctly.
@@ -595,9 +609,14 @@ class TestImpersonation:
ret = self._run_curl(curl_binary, ret = self._run_curl(curl_binary,
env_vars=env_vars, env_vars=env_vars,
extra_args=None, extra_args=None,
urls=[self.TEST_URLS[0]], urls=[test_urls[0]],
output=output) output=output)
assert ret == 0 assert ret == 0
with open(output, "r") as f: with open(output, "r") as f:
assert "<!DOCTYPE html>" in f.read() body = f.read()
assert (
"<!DOCTYPE html>" in body or
"<html>" in body or
"<!doctype html>" in body
)