aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Nanis2026-03-09 06:27:14 -0400
committerMax Nanis2026-03-09 06:27:14 -0400
commit2f92429a68ec7209059d2d18fe67964c8dd57cf2 (patch)
tree641358598982860f6452d27a74cae809b0d2d430
parentce291a165fab6b6dc9f053c7b75a699d0fdf389f (diff)
downloadgeneralresearch-2f92429a68ec7209059d2d18fe67964c8dd57cf2.tar.gz
generalresearch-2f92429a68ec7209059d2d18fe67964c8dd57cf2.zip
Simple typing changes, Ruff import formatter. p3
-rw-r--r--test_utils/grliq/conftest.py8
-rw-r--r--test_utils/incite/collections/conftest.py49
-rw-r--r--test_utils/incite/conftest.py6
-rw-r--r--test_utils/incite/mergers/conftest.py32
-rw-r--r--test_utils/managers/cashout_methods.py9
-rw-r--r--test_utils/managers/conftest.py33
-rw-r--r--test_utils/managers/contest/conftest.py15
-rw-r--r--test_utils/managers/ledger/conftest.py45
-rw-r--r--test_utils/models/conftest.py252
-rw-r--r--tests/grliq/managers/test_forensic_data.py50
-rw-r--r--tests/grliq/managers/test_forensic_results.py15
-rw-r--r--tests/grliq/models/test_forensic_data.py34
-rw-r--r--tests/grliq/test_utils.py5
-rw-r--r--tests/incite/collections/test_df_collection_base.py16
-rw-r--r--tests/incite/collections/test_df_collection_item_base.py18
-rw-r--r--tests/incite/collections/test_df_collection_item_thl_web.py241
-rw-r--r--tests/incite/collections/test_df_collection_thl_marketplaces.py9
-rw-r--r--tests/incite/collections/test_df_collection_thl_web.py62
-rw-r--r--tests/incite/mergers/foundations/test_enriched_session.py14
19 files changed, 508 insertions, 405 deletions
diff --git a/test_utils/grliq/conftest.py b/test_utils/grliq/conftest.py
index 1818794..edd777e 100644
--- a/test_utils/grliq/conftest.py
+++ b/test_utils/grliq/conftest.py
@@ -1,23 +1,23 @@
from datetime import datetime, timedelta, timezone
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from uuid import uuid4
import pytest
-
if TYPE_CHECKING:
+ from generalresearch.config import GRLBaseSettings
from generalresearch.grliq.models.forensic_data import GrlIqData
@pytest.fixture(scope="function")
-def mnt_grliq_archive_dir(settings):
+def mnt_grliq_archive_dir(settings: "GRLBaseSettings") -> Optional[str]:
return settings.mnt_grliq_archive_dir
@pytest.fixture(scope="function")
def grliq_data() -> "GrlIqData":
- from generalresearch.grliq.models.forensic_data import GrlIqData
from generalresearch.grliq.managers import DUMMY_GRLIQ_DATA
+ from generalresearch.grliq.models.forensic_data import GrlIqData
g: GrlIqData = DUMMY_GRLIQ_DATA[1]["data"]
diff --git a/test_utils/incite/collections/conftest.py b/test_utils/incite/collections/conftest.py
index cf4f0ed..74e4081 100644
--- a/test_utils/incite/collections/conftest.py
+++ b/test_utils/incite/collections/conftest.py
@@ -1,26 +1,25 @@
-from datetime import timedelta, datetime
-from typing import TYPE_CHECKING, Optional, Callable
-from generalresearch.pg_helper import PostgresConfig
+from datetime import datetime, timedelta
+from typing import TYPE_CHECKING, Callable, Optional
import pytest
-from test_utils.incite.conftest import mnt_filepath
+from generalresearch.pg_helper import PostgresConfig
from test_utils.conftest import clear_directory
if TYPE_CHECKING:
+ from generalresearch.incite.base import DFCollectionType, GRLDatasets
from generalresearch.incite.collections import DFCollection
- from generalresearch.incite.base import GRLDatasets, DFCollectionType
- from generalresearch.incite.collections.thl_web import LedgerDFCollection
from generalresearch.incite.collections.thl_web import (
- WallDFCollection,
+ AuditLogDFCollection,
+ LedgerDFCollection,
SessionDFCollection,
TaskAdjustmentDFCollection,
UserDFCollection,
- AuditLogDFCollection,
+ WallDFCollection,
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def user_collection(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -29,8 +28,8 @@ def user_collection(
thl_web_rr: PostgresConfig,
) -> "UserDFCollection":
from generalresearch.incite.collections.thl_web import (
- UserDFCollection,
DFCollectionType,
+ UserDFCollection,
)
return UserDFCollection(
@@ -42,7 +41,7 @@ def user_collection(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def wall_collection(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -51,8 +50,8 @@ def wall_collection(
thl_web_rr: PostgresConfig,
) -> "WallDFCollection":
from generalresearch.incite.collections.thl_web import (
- WallDFCollection,
DFCollectionType,
+ WallDFCollection,
)
return WallDFCollection(
@@ -64,7 +63,7 @@ def wall_collection(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def session_collection(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -73,8 +72,8 @@ def session_collection(
thl_web_rr: PostgresConfig,
) -> "SessionDFCollection":
from generalresearch.incite.collections.thl_web import (
- SessionDFCollection,
DFCollectionType,
+ SessionDFCollection,
)
return SessionDFCollection(
@@ -102,17 +101,17 @@ def session_collection(
# )
-@pytest.fixture(scope="function")
+@pytest.fixture
def task_adj_collection(
mnt_filepath: "GRLDatasets",
offset: str,
duration: Optional[timedelta],
start: datetime,
- thl_web_rr,
+ thl_web_rr: PostgresConfig,
) -> "TaskAdjustmentDFCollection":
from generalresearch.incite.collections.thl_web import (
- TaskAdjustmentDFCollection,
DFCollectionType,
+ TaskAdjustmentDFCollection,
)
return TaskAdjustmentDFCollection(
@@ -126,13 +125,13 @@ def task_adj_collection(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def auditlog_collection(
mnt_filepath: "GRLDatasets",
offset: str,
duration: timedelta,
start: datetime,
- thl_web_rr,
+ thl_web_rr: PostgresConfig,
) -> "AuditLogDFCollection":
from generalresearch.incite.collections.thl_web import (
AuditLogDFCollection,
@@ -148,7 +147,7 @@ def auditlog_collection(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def ledger_collection(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -157,8 +156,8 @@ def ledger_collection(
thl_web_rr: PostgresConfig,
) -> "LedgerDFCollection":
from generalresearch.incite.collections.thl_web import (
- LedgerDFCollection,
DFCollectionType,
+ LedgerDFCollection,
)
return LedgerDFCollection(
@@ -170,8 +169,10 @@ def ledger_collection(
)
-@pytest.fixture(scope="function")
-def rm_ledger_collection(ledger_collection: "LedgerDFCollection") -> Callable:
+@pytest.fixture
+def rm_ledger_collection(
+ ledger_collection: "LedgerDFCollection",
+) -> Callable[..., None]:
def _inner():
clear_directory(ledger_collection.archive_path)
@@ -184,7 +185,7 @@ def rm_ledger_collection(ledger_collection: "LedgerDFCollection") -> Callable:
# --------------------------
-@pytest.fixture(scope="function")
+@pytest.fixture
def df_collection(
mnt_filepath: "GRLDatasets",
df_collection_data_type: "DFCollectionType",
diff --git a/test_utils/incite/conftest.py b/test_utils/incite/conftest.py
index 9632ee5..058093e 100644
--- a/test_utils/incite/conftest.py
+++ b/test_utils/incite/conftest.py
@@ -14,17 +14,15 @@ from faker import Faker
# from test_utils.models.conftest import session_factory
if TYPE_CHECKING:
- from generalresearch.config import GRLSettings
+ from generalresearch.config import GRLBaseSettings
from generalresearch.incite.base import GRLDatasets
from generalresearch.incite.collections import (
- DFCollection,
DFCollectionItem,
DFCollectionType,
)
from generalresearch.incite.mergers import MergeType
from generalresearch.models.admin.request import (
ReportRequest,
- ReportType,
)
from generalresearch.models.thl.product import Product
from generalresearch.models.thl.session import Session
@@ -35,7 +33,7 @@ fake = Faker()
@pytest.fixture
-def mnt_gr_api_dir(request: SubRequest, settings: "GRLSettings") -> Path:
+def mnt_gr_api_dir(request: SubRequest, settings: "GRLBaseSettings") -> Path:
p = Path(settings.mnt_gr_api_dir)
p.mkdir(parents=True, exist_ok=True)
diff --git a/test_utils/incite/mergers/conftest.py b/test_utils/incite/mergers/conftest.py
index 010c44f..d094b84 100644
--- a/test_utils/incite/mergers/conftest.py
+++ b/test_utils/incite/mergers/conftest.py
@@ -1,10 +1,9 @@
from datetime import datetime, timedelta
-from typing import TYPE_CHECKING, Callable, Optional
+from typing import TYPE_CHECKING, Callable
import pytest
from test_utils.conftest import clear_directory
-from test_utils.incite.conftest import mnt_filepath
if TYPE_CHECKING:
from generalresearch.incite.base import GRLDatasets
@@ -21,7 +20,10 @@ if TYPE_CHECKING:
from generalresearch.incite.mergers.foundations.user_id_product import (
UserIdProductMerge,
)
- from generalresearch.incite.mergers.pop_ledger import PopLedgerMerge
+ from generalresearch.incite.mergers.pop_ledger import (
+ PopLedgerMerge,
+ PopLedgerMergeItem,
+ )
from generalresearch.incite.mergers.ym_survey_wall import (
YMSurveyWallMerge,
YMSurveyWallMergeCollectionItem,
@@ -37,8 +39,8 @@ if TYPE_CHECKING:
# --------------------------
-@pytest.fixture(scope="function")
-def rm_pop_ledger_merge(pop_ledger_merge) -> Callable[..., None]:
+@pytest.fixture
+def rm_pop_ledger_merge(pop_ledger_merge: "PopLedgerMerge") -> Callable[..., None]:
def _inner():
clear_directory(pop_ledger_merge.archive_path)
@@ -46,7 +48,7 @@ def rm_pop_ledger_merge(pop_ledger_merge) -> Callable[..., None]:
return _inner
-@pytest.fixture(scope="function")
+@pytest.fixture
def pop_ledger_merge(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -65,7 +67,7 @@ def pop_ledger_merge(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def pop_ledger_merge_item(
start: datetime,
pop_ledger_merge: "PopLedgerMerge",
@@ -79,7 +81,7 @@ def pop_ledger_merge_item(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def ym_survey_wall_merge(
mnt_filepath: "GRLDatasets",
start: datetime,
@@ -94,7 +96,7 @@ def ym_survey_wall_merge(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def ym_survey_wall_merge_item(
start: datetime, ym_survey_wall_merge: "YMSurveyWallMerge"
) -> "YMSurveyWallMergeCollectionItem":
@@ -108,7 +110,7 @@ def ym_survey_wall_merge_item(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def ym_wall_summary_merge(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -144,7 +146,7 @@ def ym_wall_summary_merge_item(
# --------------------------
-@pytest.fixture(scope="function")
+@pytest.fixture
def enriched_session_merge(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -164,7 +166,7 @@ def enriched_session_merge(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def enriched_task_adjust_merge(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -186,7 +188,7 @@ def enriched_task_adjust_merge(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def enriched_wall_merge(
mnt_filepath: "GRLDatasets",
offset: str,
@@ -206,7 +208,7 @@ def enriched_wall_merge(
)
-@pytest.fixture(scope="function")
+@pytest.fixture
def user_id_product_merge(
mnt_filepath: "GRLDatasets",
duration: timedelta,
@@ -231,7 +233,7 @@ def user_id_product_merge(
# --------------------------
-@pytest.fixture(scope="function")
+@pytest.fixture
def merge_collection(
mnt_filepath: "GRLDatasets",
merge_type: "MergeType",
diff --git a/test_utils/managers/cashout_methods.py b/test_utils/managers/cashout_methods.py
index 96d33cf..b201e8c 100644
--- a/test_utils/managers/cashout_methods.py
+++ b/test_utils/managers/cashout_methods.py
@@ -1,12 +1,11 @@
-from generalresearch.models.thl.wallet import PayoutType, Currency
+import random
+from uuid import uuid4
+
+from generalresearch.models.thl.wallet import Currency, PayoutType
from generalresearch.models.thl.wallet.cashout_method import (
CashoutMethod,
TangoCashoutMethodData,
- AmtCashoutMethodData,
)
-import random
-
-from uuid import uuid4
def random_ext_id(base: str = "U02"):
diff --git a/test_utils/managers/conftest.py b/test_utils/managers/conftest.py
index 94dabae..10ee8ea 100644
--- a/test_utils/managers/conftest.py
+++ b/test_utils/managers/conftest.py
@@ -35,6 +35,7 @@ if TYPE_CHECKING:
MembershipManager,
TeamManager,
)
+ from generalresearch.managers.thl.buyer import BuyerManager
from generalresearch.managers.thl.category import CategoryManager
from generalresearch.managers.thl.contest_manager import ContestManager
from generalresearch.managers.thl.ipinfo import (
@@ -73,8 +74,6 @@ if TYPE_CHECKING:
)
from generalresearch.managers.thl.userhealth import (
AuditLogManager,
- IPGeonameManager,
- IPInformationManager,
IPRecordManager,
UserIpHistoryManager,
)
@@ -82,6 +81,7 @@ if TYPE_CHECKING:
WallCacheManager,
WallManager,
)
+ from generalresearch.models.thl.user import User
# === THL ===
@@ -98,7 +98,7 @@ def ltxm(
)
return LedgerTransactionManager(
- sql_helper=thl_web_rw,
+ pg_config=thl_web_rw,
permissions=[Permission.CREATE, Permission.READ],
testing=True,
redis_config=thl_redis_config,
@@ -335,7 +335,7 @@ def category_manager(thl_web_rw: PostgresConfig) -> "CategoryManager":
@pytest.fixture(scope="session")
-def buyer_manager(thl_web_rw: PostgresConfig):
+def buyer_manager(thl_web_rw: PostgresConfig) -> "BuyerManager":
# assert "/unittest-" in thl_web_rw.dsn.path
from generalresearch.managers.thl.buyer import BuyerManager
@@ -359,7 +359,7 @@ def surveystat_manager(thl_web_rw: PostgresConfig):
@pytest.fixture(scope="session")
-def surveypenalty_manager(thl_redis_config):
+def surveypenalty_manager(thl_redis_config: RedisConfig):
from generalresearch.managers.thl.survey_penalty import SurveyPenaltyManager
return SurveyPenaltyManager(redis_config=thl_redis_config)
@@ -404,7 +404,7 @@ def uqa_manager(thl_web_rw: PostgresConfig, thl_redis_config: RedisConfig):
@pytest.fixture(scope="function")
-def uqa_manager_clear_cache(uqa_manager, user):
+def uqa_manager_clear_cache(uqa_manager, user: "User"):
# On successive py-test/jenkins runs, the cache may contain
# the previous run's info (keyed under the same user_id)
uqa_manager.clear_cache(user)
@@ -484,23 +484,34 @@ def geoipinfo_manager(
@pytest.fixture(scope="session")
-def maxmind_basic_manager() -> "MaxmindBasicManager":
+def maxmind_basic_manager(settings: "GRLBaseSettings") -> "MaxmindBasicManager":
from generalresearch.managers.thl.maxmind.basic import (
MaxmindBasicManager,
)
- return MaxmindBasicManager(data_dir="/tmp/")
+ return MaxmindBasicManager(
+ data_dir="/tmp/",
+ maxmind_account_id=settings.maxmind_account_id,
+ maxmind_license_key=settings.maxmind_license_key,
+ )
@pytest.fixture(scope="session")
def maxmind_manager(
- thl_web_rw: PostgresConfig, thl_redis_config: RedisConfig
+ settings: "GRLBaseSettings",
+ thl_web_rw: PostgresConfig,
+ thl_redis_config: RedisConfig,
) -> "MaxmindManager":
assert "/unittest-" in thl_web_rw.dsn.path
from generalresearch.managers.thl.maxmind import MaxmindManager
- return MaxmindManager(pg_config=thl_web_rw, redis_config=thl_redis_config)
+ return MaxmindManager(
+ pg_config=thl_web_rw,
+ redis_config=thl_redis_config,
+ maxmind_account_id=settings.maxmind_account_id,
+ maxmind_license_key=settings.maxmind_license_key,
+ )
@pytest.fixture(scope="session")
@@ -700,7 +711,7 @@ def grliq_crr(grliq_db: PostgresConfig) -> "GrlIqCategoryResultsReader":
@pytest.fixture(scope="session")
-def delete_buyers_surveys(thl_web_rw: PostgresConfig, buyer_manager):
+def delete_buyers_surveys(thl_web_rw: PostgresConfig, buyer_manager: "BuyerManager"):
# assert "/unittest-" in thl_web_rw.dsn.path
thl_web_rw.execute_write(
"""
diff --git a/test_utils/managers/contest/conftest.py b/test_utils/managers/contest/conftest.py
index 0a25185..fb0b44b 100644
--- a/test_utils/managers/contest/conftest.py
+++ b/test_utils/managers/contest/conftest.py
@@ -9,26 +9,15 @@ from generalresearch.currency import USDCent
if TYPE_CHECKING:
from generalresearch.managers.thl.contest_manager import ContestManager
- from generalresearch.models.thl.contest import (
- ContestEndCondition,
- ContestPrize,
- )
+ from generalresearch.managers.thl.ledger_manager.thl_ledger import ThlLedgerManager
from generalresearch.models.thl.contest.contest import Contest
- from generalresearch.models.thl.contest.definitions import (
- ContestPrizeKind,
- ContestType,
- )
- from generalresearch.models.thl.contest.io import contest_create_to_contest
from generalresearch.models.thl.contest.leaderboard import (
LeaderboardContestCreate,
)
from generalresearch.models.thl.contest.milestone import (
- ContestEntryTrigger,
MilestoneContestCreate,
- MilestoneContestEndCondition,
)
from generalresearch.models.thl.contest.raffle import (
- ContestEntryType,
RaffleContestCreate,
)
from generalresearch.models.thl.product import Product
@@ -273,7 +262,7 @@ def user_with_money(
request,
user_factory: Callable[..., "User"],
product_user_wallet_yes: "Product",
- thl_lm,
+ thl_lm: "ThlLedgerManager",
) -> "User":
from generalresearch.models.thl.user import User
diff --git a/test_utils/managers/ledger/conftest.py b/test_utils/managers/ledger/conftest.py
index aafdaf7..c66ee4e 100644
--- a/test_utils/managers/ledger/conftest.py
+++ b/test_utils/managers/ledger/conftest.py
@@ -10,14 +10,11 @@ from generalresearch.currency import USDCent
from generalresearch.managers.base import PostgresManager
from test_utils.models.conftest import (
payout_config,
- product,
product_amt_true,
- product_factory,
product_user_wallet_no,
product_user_wallet_yes,
session,
session_factory,
- user,
user_factory,
wall,
wall_factory,
@@ -36,23 +33,24 @@ _ = (
)
if TYPE_CHECKING:
- from datetime import datetime, timedelta
from generalresearch.currency import LedgerCurrency
from generalresearch.managers.thl.ledger_manager.ledger import LedgerManager
from generalresearch.managers.thl.ledger_manager.thl_ledger import (
ThlLedgerManager,
)
+ from generalresearch.managers.thl.payout import (
+ BrokerageProductPayoutEventManager,
+ BusinessPayoutEventManager,
+ )
+ from generalresearch.managers.thl.session import SessionManager
+ from generalresearch.managers.thl.wall import WallManager
from generalresearch.models.thl.ledger import (
- AccountType,
- Direction,
LedgerAccount,
- LedgerEntry,
LedgerTransaction,
)
from generalresearch.models.thl.payout import (
BrokerageProductPayoutEvent,
- UserPayoutEvent,
)
from generalresearch.models.thl.product import Product
from generalresearch.models.thl.session import Session
@@ -185,10 +183,10 @@ def usd_cent(request) -> USDCent:
@pytest.fixture
def bp_payout_event(
product: "Product",
- usd_cent,
- business_payout_event_manager,
+ usd_cent: "USDCent",
+ business_payout_event_manager: "BusinessPayoutEventManager",
thl_lm: "ThlLedgerManager",
-) -> "UserPayoutEvent":
+) -> "BrokerageProductPayoutEvent":
return business_payout_event_manager.create_bp_payout_event(
thl_ledger_manager=thl_lm,
@@ -227,6 +225,7 @@ def bp_payout_event_factory(
@pytest.fixture
def currency(lm: "LedgerManager") -> "LedgerCurrency":
# return request.param if hasattr(request, "currency") else LedgerCurrency.TEST
+ assert lm.currency, "LedgerManager must have a currency specified for these tests"
return lm.currency
@@ -242,11 +241,11 @@ def tx_metadata(request) -> Optional[Dict[str, str]]:
@pytest.fixture
def ledger_tx(
request,
- ledger_account_credit,
- ledger_account_debit,
- tag,
+ ledger_account_credit: "LedgerAccount",
+ ledger_account_debit: "LedgerAccount",
+ tag: str,
currency: "LedgerCurrency",
- tx_metadata,
+ tx_metadata: Optional[Dict[str, str]],
lm: "LedgerManager",
) -> "LedgerTransaction":
from generalresearch.models.thl.ledger import Direction, LedgerEntry
@@ -455,7 +454,7 @@ def account_expense_tango(
@pytest.fixture
def user_account_user_wallet(
- lm: "LedgerManager", user, currency: "LedgerCurrency"
+ lm: "LedgerManager", user: "User", currency: "LedgerCurrency"
) -> "LedgerAccount":
from generalresearch.models.thl.ledger import (
AccountType,
@@ -580,8 +579,8 @@ def session_with_tx_factory(
user_factory: Callable[..., "User"],
product: "Product",
session_factory: Callable[..., "Session"],
- session_manager,
- wall_manager,
+ session_manager: "SessionManager",
+ wall_manager: "WallManager",
utc_hour_ago: datetime,
thl_lm: "ThlLedgerManager",
) -> Callable[..., "Session"]:
@@ -616,7 +615,7 @@ def session_with_tx_factory(
)
status, status_code_1 = s.determine_session_status()
- thl_net, commission_amount, bp_pay, user_pay = s.determine_payments()
+ _, _, bp_pay, user_pay = s.determine_payments()
session_manager.finish_with_status(
session=s,
finished=last_wall.finished,
@@ -642,7 +641,9 @@ def session_with_tx_factory(
@pytest.fixture
def adj_to_fail_with_tx_factory(
- session_manager, wall_manager, thl_lm: "ThlLedgerManager"
+ session_manager: "SessionManager",
+ wall_manager: "WallManager",
+ thl_lm: "ThlLedgerManager",
) -> Callable[..., None]:
from datetime import datetime, timedelta
@@ -698,7 +699,9 @@ def adj_to_fail_with_tx_factory(
@pytest.fixture
def adj_to_complete_with_tx_factory(
- session_manager, wall_manager, thl_lm: "ThlLedgerManager"
+ session_manager: "SessionManager",
+ wall_manager: "WallManager",
+ thl_lm: "ThlLedgerManager",
) -> Callable[..., None]:
from datetime import timedelta
diff --git a/test_utils/models/conftest.py b/test_utils/models/conftest.py
index 81dc11e..468bea2 100644
--- a/test_utils/models/conftest.py
+++ b/test_utils/models/conftest.py
@@ -5,6 +5,7 @@ from random import randint
from typing import TYPE_CHECKING, Callable, Dict, List, Optional
from uuid import uuid4
+import pytest
from pydantic import AwareDatetime, PositiveInt
from generalresearch.models import Source
@@ -12,23 +13,33 @@ from generalresearch.models.thl.definitions import (
WALL_ALLOWED_STATUS_STATUS_CODE,
Status,
)
-from generalresearch.models.thl.survey.model import Buyer, Survey
from generalresearch.pg_helper import PostgresConfig
-from test_utils.managers.conftest import (
- business_address_manager,
- business_manager,
- gr_um,
- membership_manager,
- product_manager,
- session_manager,
- team_manager,
- user_manager,
- wall_manager,
-)
+from generalresearch.redis_helper import RedisConfig
if TYPE_CHECKING:
from generalresearch.currency import USDCent
+ from generalresearch.managers.gr.authentication import GRTokenManager, GRUserManager
+ from generalresearch.managers.gr.business import (
+ BusinessAddressManager,
+ BusinessBankAccountManager,
+ BusinessManager,
+ )
+ from generalresearch.managers.gr.team import MembershipManager, TeamManager
+ from generalresearch.managers.thl.buyer import BuyerManager
+ from generalresearch.managers.thl.ipinfo import (
+ IPGeonameManager,
+ IPInformationManager,
+ )
+ from generalresearch.managers.thl.ledger_manager.thl_ledger import ThlLedgerManager
+ from generalresearch.managers.thl.payout import (
+ BusinessPayoutEventManager,
+ )
+ from generalresearch.managers.thl.product import ProductManager
from generalresearch.managers.thl.session import SessionManager
+ from generalresearch.managers.thl.survey import SurveyManager
+ from generalresearch.managers.thl.user_manager.user_manager import UserManager
+ from generalresearch.managers.thl.userhealth import AuditLogManager, IPRecordManager
+ from generalresearch.managers.thl.wall import WallManager
from generalresearch.models.gr.authentication import GRToken, GRUser
from generalresearch.models.gr.business import (
Business,
@@ -37,24 +48,29 @@ if TYPE_CHECKING:
)
from generalresearch.models.gr.team import Membership, Team
from generalresearch.models.thl.ipinfo import IPGeoname, IPInformation
- from generalresearch.models.thl.payout import UserPayoutEvent
+ from generalresearch.models.thl.payout import (
+ BrokerageProductPayoutEvent,
+ )
from generalresearch.models.thl.product import (
PayoutConfig,
- PayoutTransformation,
- PayoutTransformationPercentArgs,
Product,
)
from generalresearch.models.thl.session import Session, Wall
+ from generalresearch.models.thl.survey.model import Buyer, Survey
from generalresearch.models.thl.user import User
from generalresearch.models.thl.user_iphistory import IPRecord
from generalresearch.models.thl.userhealth import AuditLog, AuditLogLevel
-
# === THL ===
-@pytest.fixture(scope="function")
-def user(request, product_manager, user_manager, thl_web_rr: PostgresConfig) -> "User":
+@pytest.fixture
+def user(
+ request,
+ product_manager: "ProductManager",
+ user_manager: "UserManager",
+ thl_web_rr: PostgresConfig,
+) -> "User":
product = getattr(request, "product", None)
if product is None:
@@ -68,7 +84,7 @@ def user(request, product_manager, user_manager, thl_web_rr: PostgresConfig) ->
@pytest.fixture
def user_with_wallet(
- request, user_factory, product_user_wallet_yes: "Product"
+ request, user_factory: Callable[..., "User"], product_user_wallet_yes: "Product"
) -> "User":
# A user on a product with user wallet enabled, but they have no money
return user_factory(product=product_user_wallet_yes)
@@ -83,9 +99,11 @@ def user_with_wallet_amt(
@pytest.fixture(scope="function")
-def user_factory(user_manager, thl_web_rr: PostgresConfig) -> Callable[..., "User"]:
+def user_factory(
+ user_manager: "UserManager", thl_web_rr: PostgresConfig
+) -> Callable[..., "User"]:
- def _inner(product: "Product", created: Optional[datetime] = None):
+ def _inner(product: "Product", created: Optional[datetime] = None) -> "User":
u = user_manager.create_dummy(product=product, created=created)
u.prefetch_product(pg_config=thl_web_rr)
@@ -94,12 +112,12 @@ def user_factory(user_manager, thl_web_rr: PostgresConfig) -> Callable[..., "Use
return _inner
-@pytest.fixture(scope="function")
-def wall_factory(wall_manager) -> Callable[..., "Wall"]:
+@pytest.fixture
+def wall_factory(wall_manager: "WallManager") -> Callable[..., "Wall"]:
def _inner(
session: "Session", wall_status: "Status", req_cpi: Optional[Decimal] = None
- ):
+ ) -> "Wall":
assert session.started <= datetime.now(
tz=timezone.utc
@@ -135,7 +153,9 @@ def wall_factory(wall_manager) -> Callable[..., "Wall"]:
@pytest.fixture
-def wall(session, user, wall_manager) -> Optional["Wall"]:
+def wall(
+ session: "Session", user: "User", wall_manager: "WallManager"
+) -> Optional["Wall"]:
from generalresearch.models.thl.task_status import StatusCode1
wall = wall_manager.create_dummy(session_id=session.id, user_id=user.user_id)
@@ -150,9 +170,9 @@ def wall(session, user, wall_manager) -> Optional["Wall"]:
@pytest.fixture
def session_factory(
- wall_factory,
+ wall_factory: Callable[..., "Wall"],
session_manager: "SessionManager",
- wall_manager,
+ wall_manager: "WallManager",
utc_hour_ago: datetime,
) -> Callable[..., "Session"]:
from generalresearch.models.thl.session import Source
@@ -245,7 +265,7 @@ def finished_session_factory(
started=started,
)
status, status_code_1 = s.determine_session_status()
- thl_net, commission_amount, bp_pay, user_pay = s.determine_payments()
+ _, _, bp_pay, user_pay = s.determine_payments()
session_manager.finish_with_status(
s,
finished=s.wall_events[-1].finished,
@@ -259,8 +279,10 @@ def finished_session_factory(
return _inner
-@pytest.fixture(scope="function")
-def session(user, session_manager: "SessionManager", wall_manager) -> "Session":
+@pytest.fixture
+def session(
+ user: "User", session_manager: "SessionManager", wall_manager: "WallManager"
+) -> "Session":
from generalresearch.models.thl.session import Session, Wall
session: Session = session_manager.create_dummy(user=user, country_iso="us")
@@ -275,13 +297,11 @@ def session(user, session_manager: "SessionManager", wall_manager) -> "Session":
@pytest.fixture
-def product(request, product_manager) -> "Product":
- from generalresearch.managers.thl.product import ProductManager
+def product(request, product_manager: "ProductManager") -> "Product":
team = getattr(request, "team", None)
business = getattr(request, "business", None)
- product_manager: ProductManager
return product_manager.create_dummy(
team_id=team.uuid if team else None,
business_id=business.uuid if business else None,
@@ -289,19 +309,20 @@ def product(request, product_manager) -> "Product":
@pytest.fixture
-def product_factory(product_manager) -> Callable:
- def _create_product(
+def product_factory(product_manager: "ProductManager") -> Callable[..., "Product"]:
+
+ def _inner(
team: Optional["Team"] = None,
business: Optional["Business"] = None,
commission_pct: Decimal = Decimal("0.05"),
- ):
+ ) -> "Product":
return product_manager.create_dummy(
team_id=team.uuid if team else None,
business_id=business.uuid if business else None,
commission_pct=commission_pct,
)
- return _create_product
+ return _inner
@pytest.fixture
@@ -326,29 +347,29 @@ def payout_config(request) -> "PayoutConfig":
@pytest.fixture
-def product_user_wallet_yes(payout_config, product_manager) -> "Product":
- from generalresearch.managers.thl.product import ProductManager
+def product_user_wallet_yes(
+ payout_config: "PayoutConfig", product_manager: "ProductManager"
+) -> "Product":
from generalresearch.models.thl.product import UserWalletConfig
- product_manager: ProductManager
return product_manager.create_dummy(
payout_config=payout_config, user_wallet_config=UserWalletConfig(enabled=True)
)
@pytest.fixture
-def product_user_wallet_no(product_manager) -> "Product":
- from generalresearch.managers.thl.product import ProductManager
+def product_user_wallet_no(product_manager: "ProductManager") -> "Product":
from generalresearch.models.thl.product import UserWalletConfig
- product_manager: ProductManager
return product_manager.create_dummy(
user_wallet_config=UserWalletConfig(enabled=False)
)
@pytest.fixture
-def product_amt_true(product_manager, payout_config) -> "Product":
+def product_amt_true(
+ product_manager: "ProductManager", payout_config: "PayoutConfig"
+) -> "Product":
from generalresearch.models.thl.product import UserWalletConfig
return product_manager.create_dummy(
@@ -359,16 +380,19 @@ def product_amt_true(product_manager, payout_config) -> "Product":
@pytest.fixture
def bp_payout_factory(
- thl_lm, product_manager, business_payout_event_manager
-) -> Callable:
- def _create_bp_payout(
+ thl_lm: "ThlLedgerManager",
+ product_manager: "ProductManager",
+ business_payout_event_manager: "BusinessPayoutEventManager",
+) -> Callable[..., "BrokerageProductPayoutEvent"]:
+
+ def _inner(
product: Optional["Product"] = None,
amount: Optional["USDCent"] = None,
ext_ref_id: Optional[str] = None,
created: Optional[AwareDatetime] = None,
skip_wallet_balance_check: bool = False,
skip_one_per_day_check: bool = False,
- ) -> "UserPayoutEvent":
+ ) -> "BrokerageProductPayoutEvent":
from generalresearch.currency import USDCent
product = product or product_manager.create_dummy()
@@ -384,56 +408,50 @@ def bp_payout_factory(
skip_one_per_day_check=skip_one_per_day_check,
)
- return _create_bp_payout
+ return _inner
# === GR ===
@pytest.fixture
-def business(request, business_manager) -> "Business":
- from generalresearch.managers.gr.business import BusinessManager
-
- business_manager: BusinessManager
+def business(request, business_manager: "BusinessManager") -> "Business":
return business_manager.create_dummy()
@pytest.fixture
-def business_address(request, business, business_address_manager) -> "BusinessAddress":
- from generalresearch.managers.gr.business import BusinessAddressManager
-
- business_address_manager: BusinessAddressManager
+def business_address(
+ request, business: "Business", business_address_manager: "BusinessAddressManager"
+) -> "BusinessAddress":
return business_address_manager.create_dummy(business_id=business.id)
@pytest.fixture
def business_bank_account(
- request, business, business_bank_account_manager
+ request,
+ business: "Business",
+ business_bank_account_manager: "BusinessBankAccountManager",
) -> "BusinessBankAccount":
- from generalresearch.managers.gr.business import BusinessBankAccountManager
-
- business_bank_account_manager: BusinessBankAccountManager
return business_bank_account_manager.create_dummy(business_id=business.id)
@pytest.fixture
-def team(request, team_manager) -> "Team":
- from generalresearch.managers.gr.team import TeamManager
-
- team_manager: TeamManager
+def team(request, team_manager: "TeamManager") -> "Team":
return team_manager.create_dummy()
@pytest.fixture
-def gr_user(gr_um) -> "GRUser":
- from generalresearch.managers.gr.authentication import GRUserManager
-
- gr_um: GRUserManager
+def gr_user(gr_um: "GRUserManager") -> "GRUser":
return gr_um.create_dummy()
@pytest.fixture
-def gr_user_cache(gr_user, gr_db, thl_web_rr, gr_redis_config):
+def gr_user_cache(
+ gr_user: "GRUser",
+ gr_db: PostgresConfig,
+ thl_web_rr: PostgresConfig,
+ gr_redis_config: RedisConfig,
+) -> "GRUser":
gr_user.set_cache(
pg_config=gr_db, thl_web_rr=thl_web_rr, redis_config=gr_redis_config
)
@@ -441,7 +459,8 @@ def gr_user_cache(gr_user, gr_db, thl_web_rr, gr_redis_config):
@pytest.fixture
-def gr_user_factory(gr_um) -> Callable[..., "GRUser"]:
+def gr_user_factory(gr_um: "GRUserManager") -> Callable[..., "GRUser"]:
+
def _inner():
return gr_um.create_dummy()
@@ -449,11 +468,15 @@ def gr_user_factory(gr_um) -> Callable[..., "GRUser"]:
@pytest.fixture()
-def gr_user_token(gr_user, gr_tm, gr_db) -> "GRToken":
+def gr_user_token(
+ gr_user: "GRUser", gr_tm: "GRTokenManager", gr_db: PostgresConfig
+) -> "GRToken":
gr_tm.create(user_id=gr_user.id)
gr_user.prefetch_token(pg_config=gr_db)
- return gr_user.token
+ res = gr_user.token
+ assert res is not None, "GRToken should exist after creation and prefetching"
+ return res
@pytest.fixture()
@@ -462,7 +485,9 @@ def gr_user_token_header(gr_user_token: "GRToken") -> Dict[str, str]:
@pytest.fixture(scope="function")
-def membership(request, team, gr_user, team_manager) -> "Membership":
+def membership(
+ request, team: "Team", gr_user: "GRUser", team_manager: "TeamManager"
+) -> "Membership":
assert team.id, "Team must be saved"
assert gr_user.id, "GRUser must be saved"
return team_manager.add_user(team=team, gr_user=gr_user)
@@ -470,11 +495,12 @@ def membership(request, team, gr_user, team_manager) -> "Membership":
@pytest.fixture(scope="function")
def membership_factory(
- team: "Team", gr_user: "GRUser", membership_manager, team_manager, gr_um
+ team: "Team",
+ gr_user: "GRUser",
+ membership_manager: "MembershipManager",
+ team_manager: "TeamManager",
+ gr_um: "GRUserManager",
) -> Callable[..., "Membership"]:
- from generalresearch.managers.gr.team import MembershipManager
-
- membership_manager: MembershipManager
def _inner(**kwargs) -> "Membership":
_team = kwargs.get("team", team_manager.create_dummy())
@@ -486,18 +512,15 @@ def membership_factory(
@pytest.fixture
-def audit_log(audit_log_manager, user: "User") -> "AuditLog":
- from generalresearch.managers.thl.userhealth import AuditLogManager
+def audit_log(audit_log_manager: "AuditLogManager", user: "User") -> "AuditLog":
- audit_log_manager: AuditLogManager
return audit_log_manager.create_dummy(user_id=user.user_id)
@pytest.fixture
-def audit_log_factory(audit_log_manager) -> Callable[..., "AuditLog"]:
- from generalresearch.managers.thl.userhealth import AuditLogManager
-
- audit_log_manager: AuditLogManager
+def audit_log_factory(
+ audit_log_manager: "AuditLogManager",
+) -> Callable[..., "AuditLog"]:
def _inner(
user_id: PositiveInt,
@@ -518,30 +541,25 @@ def audit_log_factory(audit_log_manager) -> Callable[..., "AuditLog"]:
@pytest.fixture
-def ip_geoname(ip_geoname_manager) -> "IPGeoname":
- from generalresearch.managers.thl.ipinfo import IPGeonameManager
-
- ip_geoname_manager: IPGeonameManager
+def ip_geoname(ip_geoname_manager: "IPGeonameManager") -> "IPGeoname":
return ip_geoname_manager.create_dummy()
@pytest.fixture
-def ip_information(ip_information_manager, ip_geoname) -> "IPInformation":
- from generalresearch.managers.thl.ipinfo import IPInformationManager
-
- ip_information_manager: IPInformationManager
+def ip_information(
+ ip_information_manager: "IPInformationManager", ip_geoname: "IPGeoname"
+) -> "IPInformation":
return ip_information_manager.create_dummy(
geoname_id=ip_geoname.geoname_id, country_iso=ip_geoname.country_iso
)
@pytest.fixture
-def ip_information_factory(ip_information_manager) -> Callable:
- from generalresearch.managers.thl.ipinfo import IPInformationManager
+def ip_information_factory(
+ ip_information_manager: "IPInformationManager",
+) -> Callable[..., "IPInformation"]:
- ip_information_manager: IPInformationManager
-
- def _create_ip_info(ip: str, geoname: "IPGeoname", **kwargs):
+ def _inner(ip: str, geoname: "IPGeoname", **kwargs) -> "IPInformation":
return ip_information_manager.create_dummy(
ip=ip,
geoname_id=geoname.geoname_id,
@@ -549,32 +567,30 @@ def ip_information_factory(ip_information_manager) -> Callable:
**kwargs,
)
- return _create_ip_info
+ return _inner
@pytest.fixture
-def ip_record(ip_record_manager, ip_geoname, user: "User") -> "IPRecord":
- from generalresearch.managers.thl.userhealth import IPRecordManager
-
- ip_record_manager: IPRecordManager
+def ip_record(
+ ip_record_manager: "IPRecordManager", ip_geoname: "IPGeoname", user: "User"
+) -> "IPRecord":
return ip_record_manager.create_dummy(user_id=user.user_id)
@pytest.fixture
-def ip_record_factory(ip_record_manager, user: "User") -> Callable:
- from generalresearch.managers.thl.userhealth import IPRecordManager
-
- ip_record_manager: IPRecordManager
+def ip_record_factory(
+ ip_record_manager: "IPRecordManager", user: "User"
+) -> Callable[..., "IPRecord"]:
- def _create_ip_record(user_id: PositiveInt, ip: Optional[str] = None):
+ def _inner(user_id: PositiveInt, ip: Optional[str] = None) -> "IPRecord":
return ip_record_manager.create_dummy(user_id=user_id, ip=ip)
- return _create_ip_record
+ return _inner
@pytest.fixture(scope="session")
-def buyer(buyer_manager) -> Buyer:
+def buyer(buyer_manager: "BuyerManager") -> Buyer:
buyer_code = uuid4().hex
buyer_manager.bulk_get_or_create(source=Source.TESTING, codes=[buyer_code])
b = Buyer(
@@ -585,27 +601,29 @@ def buyer(buyer_manager) -> Buyer:
@pytest.fixture(scope="session")
-def buyer_factory(buyer_manager) -> Callable:
+def buyer_factory(buyer_manager: "BuyerManager") -> Callable[..., Buyer]:
- def inner():
+ def _inner() -> Buyer:
return buyer_manager.bulk_get_or_create(
source=Source.TESTING, codes=[uuid4().hex]
)[0]
- return inner
+ return _inner
@pytest.fixture(scope="session")
-def survey(survey_manager, buyer) -> Survey:
+def survey(survey_manager: "SurveyManager", buyer: "Buyer") -> Survey:
s = Survey(source=Source.TESTING, survey_id=uuid4().hex, buyer_code=buyer.code)
survey_manager.create_bulk([s])
return s
@pytest.fixture(scope="session")
-def survey_factory(survey_manager, buyer_factory) -> Callable:
+def survey_factory(
+ survey_manager: "SurveyManager", buyer_factory: Callable[..., "Buyer"]
+) -> Callable[..., "Survey"]:
- def inner(buyer: Optional[Buyer] = None) -> Survey:
+ def _inner(buyer: Optional[Buyer] = None) -> "Survey":
buyer = buyer or buyer_factory()
s = Survey(
source=Source.TESTING,
@@ -616,4 +634,4 @@ def survey_factory(survey_manager, buyer_factory) -> Callable:
survey_manager.create_bulk([s])
return s
- return inner
+ return _inner
diff --git a/tests/grliq/managers/test_forensic_data.py b/tests/grliq/managers/test_forensic_data.py
index ed4da80..ac2792a 100644
--- a/tests/grliq/managers/test_forensic_data.py
+++ b/tests/grliq/managers/test_forensic_data.py
@@ -1,14 +1,21 @@
from datetime import timedelta
+from typing import TYPE_CHECKING
from uuid import uuid4
import pytest
-from generalresearch.grliq.models.events import TimingData, MouseEvent
-from generalresearch.grliq.models.forensic_data import GrlIqData
-from generalresearch.grliq.models.forensic_result import (
- GrlIqCheckerResults,
- GrlIqForensicCategoryResult,
-)
+if TYPE_CHECKING:
+ from generalresearch.grliq.managers.forensic_data import (
+ GrlIqDataManager,
+ GrlIqEventManager,
+ )
+ from generalresearch.grliq.models.events import MouseEvent, TimingData
+ from generalresearch.grliq.models.forensic_data import GrlIqData
+ from generalresearch.grliq.models.forensic_result import (
+ GrlIqCheckerResults,
+ GrlIqForensicCategoryResult,
+ )
+ from generalresearch.models.thl.product import Product
try:
from psycopg.errors import UniqueViolation
@@ -18,18 +25,16 @@ except ImportError:
class TestGrlIqDataManager:
- def test_create_dummy(self, grliq_dm):
- from generalresearch.grliq.managers.forensic_data import GrlIqDataManager
+ def test_create_dummy(self, grliq_dm: "GrlIqDataManager"):
from generalresearch.grliq.models.forensic_data import GrlIqData
- grliq_dm: GrlIqDataManager
gd1: GrlIqData = grliq_dm.create_dummy(is_attempt_allowed=True)
assert isinstance(gd1, GrlIqData)
assert isinstance(gd1.results, GrlIqCheckerResults)
assert isinstance(gd1.category_result, GrlIqForensicCategoryResult)
- def test_create(self, grliq_data, grliq_dm):
+ def test_create(self, grliq_data: "GrlIqData", grliq_dm: "GrlIqDataManager"):
grliq_dm.create(grliq_data)
assert grliq_data.id is not None
@@ -45,20 +50,16 @@ class TestGrlIqDataManager:
pass
@pytest.mark.skip(reason="todo")
- def test_update_fingerprint(self):
- pass
-
- @pytest.mark.skip(reason="todo")
def test_update_data(self):
pass
- def test_get_id(self, grliq_data, grliq_dm):
+ def test_get_id(self, grliq_data: "GrlIqData", grliq_dm: "GrlIqDataManager"):
grliq_dm.create(grliq_data)
res = grliq_dm.get_data(forensic_id=grliq_data.id)
assert res == grliq_data
- def test_get_uuid(self, grliq_data, grliq_dm):
+ def test_get_uuid(self, grliq_data: "GrlIqData", grliq_dm: "GrlIqDataManager"):
grliq_dm.create(grliq_data)
res = grliq_dm.get_data(forensic_uuid=grliq_data.uuid)
@@ -72,7 +73,7 @@ class TestGrlIqDataManager:
def test_get_unique_user_count_by_fingerprint(self):
pass
- def test_filter_data(self, grliq_data, grliq_dm):
+ def test_filter_data(self, grliq_data: "GrlIqData", grliq_dm: "GrlIqDataManager"):
grliq_dm.create(grliq_data)
res = grliq_dm.filter_data(uuids=[grliq_data.uuid])[0]
assert res == grliq_data
@@ -99,7 +100,7 @@ class TestGrlIqDataManager:
def test_make_filter_str(self):
pass
- def test_filter_count(self, grliq_dm, product):
+ def test_filter_count(self, grliq_dm: "GrlIqDataManager", product: "Product"):
res = grliq_dm.filter_count(product_id=product.uuid)
assert isinstance(res, int)
@@ -115,7 +116,7 @@ class TestGrlIqDataManager:
class TestForensicDataGetAndFilter:
- def test_events(self, grliq_dm, grliq_em):
+ def test_events(self, grliq_dm: "GrlIqDataManager"):
"""If load_events=True, the events and mouse_events attributes should
be an array no matter what. An empty array means that the events were
loaded, but there were no events available.
@@ -140,7 +141,7 @@ class TestForensicDataGetAndFilter:
assert len(instance.events) == 0
assert len(instance.mouse_events) == 0
- def test_timing(self, grliq_dm, grliq_em):
+ def test_timing(self, grliq_dm: "GrlIqDataManager", grliq_em: "GrlIqEventManager"):
forensic_uuid = uuid4().hex
grliq_dm.create_dummy(is_attempt_allowed=True, uuid=forensic_uuid)
@@ -152,13 +153,16 @@ class TestForensicDataGetAndFilter:
client_rtts=[100, 200, 150], server_rtts=[150, 120, 120]
),
)
+
instance = grliq_dm.get_data(forensic_uuid=forensic_uuid, load_events=True)
assert isinstance(instance, GrlIqData)
assert isinstance(instance.events, list)
assert isinstance(instance.mouse_events, list)
assert isinstance(instance.timing_data, TimingData)
- def test_events_events(self, grliq_dm, grliq_em):
+ def test_events_events(
+ self, grliq_dm: "GrlIqDataManager", grliq_em: "GrlIqEventManager"
+ ):
forensic_uuid = uuid4().hex
grliq_dm.create_dummy(is_attempt_allowed=True, uuid=forensic_uuid)
@@ -181,7 +185,9 @@ class TestForensicDataGetAndFilter:
assert len(instance.pointer_move_events) == 0
assert len(instance.keyboard_events) == 0
- def test_events_click(self, grliq_dm, grliq_em):
+ def test_events_click(
+ self, grliq_dm: "GrlIqDataManager", grliq_em: "GrlIqEventManager"
+ ):
forensic_uuid = uuid4().hex
grliq_dm.create_dummy(is_attempt_allowed=True, uuid=forensic_uuid)
instance = grliq_dm.get_data(forensic_uuid=forensic_uuid, load_events=True)
diff --git a/tests/grliq/managers/test_forensic_results.py b/tests/grliq/managers/test_forensic_results.py
index a837a64..68db732 100644
--- a/tests/grliq/managers/test_forensic_results.py
+++ b/tests/grliq/managers/test_forensic_results.py
@@ -1,9 +1,20 @@
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from generalresearch.grliq.managers.forensic_data import GrlIqDataManager
+ from generalresearch.grliq.managers.forensic_results import (
+ GrlIqCategoryResultsReader,
+ )
+
+
class TestGrlIqCategoryResultsReader:
- def test_filter_category_results(self, grliq_dm, grliq_crr):
+ def test_filter_category_results(
+ self, grliq_dm: "GrlIqDataManager", grliq_crr: "GrlIqCategoryResultsReader"
+ ):
from generalresearch.grliq.models.forensic_result import (
- Phase,
GrlIqForensicCategoryResult,
+ Phase,
)
# this is just testing that it doesn't fail
diff --git a/tests/grliq/models/test_forensic_data.py b/tests/grliq/models/test_forensic_data.py
index 653f9a9..4fbf962 100644
--- a/tests/grliq/models/test_forensic_data.py
+++ b/tests/grliq/models/test_forensic_data.py
@@ -1,42 +1,50 @@
+from typing import TYPE_CHECKING
+
import pytest
from pydantic import ValidationError
-from generalresearch.grliq.models.forensic_data import GrlIqData, Platform
+if TYPE_CHECKING:
+ from generalresearch.grliq.models.forensic_data import GrlIqData
class TestGrlIqData:
- def test_supported_fonts(self, grliq_data):
+ def test_supported_fonts(self, grliq_data: "GrlIqData"):
s = grliq_data.supported_fonts_binary
assert len(s) == 1043
assert "Ubuntu" in grliq_data.supported_fonts
- def test_battery(self, grliq_data):
+ def test_battery(self, grliq_data: "GrlIqData"):
assert not grliq_data.battery_charging
assert grliq_data.battery_level == 0.41
- def test_base(self, grliq_data):
- g: GrlIqData = grliq_data
- assert g.timezone == "America/Los_Angeles"
- assert g.platform == Platform.LINUX_X86_64
- assert g.webgl_extensions
+ def test_base(self, grliq_data: "GrlIqData"):
+ from generalresearch.grliq.models.forensic_data import Platform
+
+ assert grliq_data.timezone == "America/Los_Angeles"
+ assert grliq_data.platform == Platform.LINUX_X86_64
+ assert grliq_data.webgl_extensions
# ... more
- assert g.results is None
- assert g.category_result is None
+ assert grliq_data.results is None
+ assert grliq_data.category_result is None
+
+ s = grliq_data.model_dump_json()
+ from generalresearch.grliq.models.forensic_data import GrlIqData, Platform
- s = g.model_dump_json()
g2: GrlIqData = GrlIqData.model_validate_json(s)
assert g2.results is None
assert g2.category_result is None
- assert g == g2
+ assert grliq_data == g2
# Testing things that will cause a validation error, should only be
# because something is "corrupt", not b/c the user is a baddie
- def test_corrupt(self, grliq_data):
+ def test_corrupt(self, grliq_data: "GrlIqData"):
"""Test for timestamp and timezone offset mismatch validation."""
+ from generalresearch.grliq.models.forensic_data import GrlIqData
+
d = grliq_data.model_dump(mode="json")
d.update(
{
diff --git a/tests/grliq/test_utils.py b/tests/grliq/test_utils.py
index d9034d5..7f794e8 100644
--- a/tests/grliq/test_utils.py
+++ b/tests/grliq/test_utils.py
@@ -1,10 +1,13 @@
+from datetime import datetime
from pathlib import Path
from uuid import uuid4
class TestUtils:
- def test_get_screenshot_fp(self, mnt_grliq_archive_dir, utc_hour_ago):
+ def test_get_screenshot_fp(
+ self, mnt_grliq_archive_dir: str, utc_hour_ago: datetime
+ ):
from generalresearch.grliq.utils import get_screenshot_fp
fp1 = get_screenshot_fp(
diff --git a/tests/incite/collections/test_df_collection_base.py b/tests/incite/collections/test_df_collection_base.py
index 5aaa729..10f025b 100644
--- a/tests/incite/collections/test_df_collection_base.py
+++ b/tests/incite/collections/test_df_collection_base.py
@@ -1,15 +1,19 @@
from datetime import datetime, timezone
+from typing import TYPE_CHECKING
import pandas as pd
import pytest
from pandera import DataFrameSchema
from generalresearch.incite.collections import (
- DFCollectionType,
DFCollection,
+ DFCollectionType,
)
from test_utils.incite.conftest import mnt_filepath
+if TYPE_CHECKING:
+ from generalresearch.incite.base import GRLDatasets
+
df_collection_types = [e for e in DFCollectionType if e is not DFCollectionType.TEST]
@@ -20,7 +24,7 @@ class TestDFCollectionBase:
"""
- def test_init(self, mnt_filepath, df_coll_type):
+ def test_init(self, mnt_filepath: "GRLDatasets", df_coll_type: DFCollectionType):
"""Try to initialize the DFCollection with various invalid parameters"""
with pytest.raises(expected_exception=ValueError) as cm:
DFCollection(archive_path=mnt_filepath.data_src)
@@ -42,7 +46,7 @@ class TestDFCollectionBase:
class TestDFCollectionBaseProperties:
@pytest.mark.skip
- def test_df_collection_items(self, mnt_filepath, df_coll_type):
+ def test_df_collection_items(self, mnt_filepath: "GRLDatasets", df_coll_type):
instance = DFCollection(
data_type=df_coll_type,
start=datetime(year=1800, month=1, day=1, tzinfo=timezone.utc),
@@ -54,7 +58,7 @@ class TestDFCollectionBaseProperties:
assert len(instance.interval_range) == len(instance.items)
assert len(instance.items) == 366
- def test_df_collection_progress(self, mnt_filepath, df_coll_type):
+ def test_df_collection_progress(self, mnt_filepath: "GRLDatasets", df_coll_type):
instance = DFCollection(
data_type=df_coll_type,
start=datetime(year=1800, month=1, day=1, tzinfo=timezone.utc),
@@ -67,7 +71,7 @@ class TestDFCollectionBaseProperties:
assert isinstance(instance.progress, pd.DataFrame)
assert instance.progress.shape == (366, 6)
- def test_df_collection_schema(self, mnt_filepath, df_coll_type):
+ def test_df_collection_schema(self, mnt_filepath: "GRLDatasets", df_coll_type):
instance1 = DFCollection(
data_type=DFCollectionType.WALL, archive_path=mnt_filepath.data_src
)
@@ -84,7 +88,7 @@ class TestDFCollectionBaseProperties:
class TestDFCollectionBaseMethods:
@pytest.mark.skip
- def test_initial_load(self, mnt_filepath, thl_web_rr):
+ def test_initial_load(self, mnt_filepath: "GRLDatasets", thl_web_rr):
instance = DFCollection(
pg_config=thl_web_rr,
data_type=DFCollectionType.USER,
diff --git a/tests/incite/collections/test_df_collection_item_base.py b/tests/incite/collections/test_df_collection_item_base.py
index a0c0b0b..136d234 100644
--- a/tests/incite/collections/test_df_collection_item_base.py
+++ b/tests/incite/collections/test_df_collection_item_base.py
@@ -1,13 +1,17 @@
from datetime import datetime, timezone
+from typing import TYPE_CHECKING
import pytest
from generalresearch.incite.collections import (
- DFCollectionType,
- DFCollectionItem,
DFCollection,
+ DFCollectionItem,
+ DFCollectionType,
)
-from test_utils.incite.conftest import mnt_filepath
+from generalresearch.pg_helper import PostgresConfig
+
+if TYPE_CHECKING:
+ from generalresearch.incite.base import GRLDatasets
df_collection_types = [e for e in DFCollectionType if e is not DFCollectionType.TEST]
@@ -15,7 +19,7 @@ df_collection_types = [e for e in DFCollectionType if e is not DFCollectionType.
@pytest.mark.parametrize("df_coll_type", df_collection_types)
class TestDFCollectionItemBase:
- def test_init(self, mnt_filepath, df_coll_type):
+ def test_init(self, mnt_filepath: "GRLDatasets", df_coll_type):
collection = DFCollection(
data_type=df_coll_type,
offset="100d",
@@ -41,7 +45,7 @@ class TestDFCollectionItemProperties:
@pytest.mark.parametrize("df_coll_type", df_collection_types)
class TestDFCollectionItemMethods:
- def test_has_mysql_false(self, mnt_filepath, df_coll_type):
+ def test_has_mysql_false(self, mnt_filepath: "GRLDatasets", df_coll_type):
collection = DFCollection(
data_type=df_coll_type,
offset="100d",
@@ -53,7 +57,9 @@ class TestDFCollectionItemMethods:
instance1: DFCollectionItem = collection.items[0]
assert not instance1.has_mysql()
- def test_has_mysql_true(self, thl_web_rr, mnt_filepath, df_coll_type):
+ def test_has_mysql_true(
+ self, thl_web_rr: PostgresConfig, mnt_filepath: "GRLDatasets", df_coll_type
+ ):
collection = DFCollection(
data_type=df_coll_type,
offset="100d",
diff --git a/tests/incite/collections/test_df_collection_item_thl_web.py b/tests/incite/collections/test_df_collection_item_thl_web.py
index 9c3d67a..29f3677 100644
--- a/tests/incite/collections/test_df_collection_item_thl_web.py
+++ b/tests/incite/collections/test_df_collection_item_thl_web.py
@@ -1,7 +1,8 @@
-from datetime import datetime, timezone, timedelta
+from datetime import datetime, timedelta, timezone
from itertools import product as iter_product
from os.path import join as pjoin
-from pathlib import PurePath, Path
+from pathlib import Path, PurePath
+from typing import TYPE_CHECKING, Callable
from uuid import uuid4
import dask.dataframe as dd
@@ -11,13 +12,13 @@ from distributed import Client, Scheduler, Worker
# noinspection PyUnresolvedReferences
from distributed.utils_test import (
- gen_cluster,
+ cleanup,
+ client,
client_no_amm,
+ cluster_fixture,
+ gen_cluster,
loop,
loop_in_thread,
- cleanup,
- cluster_fixture,
- client,
)
from faker import Faker
from pandera import DataFrameSchema
@@ -29,10 +30,14 @@ from generalresearch.incite.collections import (
DFCollectionType,
)
from generalresearch.incite.schemas import ARCHIVE_AFTER
+from generalresearch.models.thl.product import Product
from generalresearch.models.thl.user import User
from generalresearch.pg_helper import PostgresConfig
from generalresearch.sql_helper import PostgresDsn
-from test_utils.incite.conftest import mnt_filepath, incite_item_factory
+from test_utils.incite.conftest import incite_item_factory, mnt_filepath
+
+if TYPE_CHECKING:
+ from generalresearch.incite.base import GRLDatasets
fake = Faker()
@@ -71,7 +76,7 @@ class TestDFCollectionItemBase:
)
class TestDFCollectionItemProperties:
- def test_filename(self, df_collection_data_type, df_collection, offset):
+ def test_filename(self, df_collection_data_type, df_collection, offset: str):
for i in df_collection.items:
assert isinstance(i.filename, str)
@@ -88,35 +93,37 @@ class TestDFCollectionItemProperties:
)
class TestDFCollectionItemPropertiesBase:
- def test_name(self, df_collection_data_type, offset, df_collection):
+ def test_name(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.name, str)
- def test_finish(self, df_collection_data_type, offset, df_collection):
+ def test_finish(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.finish, datetime)
- def test_interval(self, df_collection_data_type, offset, df_collection):
+ def test_interval(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.interval, pd.Interval)
- def test_partial_filename(self, df_collection_data_type, offset, df_collection):
+ def test_partial_filename(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
for i in df_collection.items:
assert isinstance(i.partial_filename, str)
- def test_empty_filename(self, df_collection_data_type, offset, df_collection):
+ def test_empty_filename(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.empty_filename, str)
- def test_path(self, df_collection_data_type, offset, df_collection):
+ def test_path(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.path, FilePath)
- def test_partial_path(self, df_collection_data_type, offset, df_collection):
+ def test_partial_path(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.partial_path, FilePath)
- def test_empty_path(self, df_collection_data_type, offset, df_collection):
+ def test_empty_path(self, df_collection_data_type, offset: str, df_collection):
for i in df_collection.items:
assert isinstance(i.empty_path, FilePath)
@@ -136,9 +143,9 @@ class TestDFCollectionItemMethod:
def test_has_mysql(
self,
df_collection,
- thl_web_rr,
- offset,
- duration,
+ thl_web_rr: PostgresConfig,
+ offset: str,
+ duration: timedelta,
df_collection_data_type,
delete_df_collection,
):
@@ -166,9 +173,9 @@ class TestDFCollectionItemMethod:
def test_update_partial_archive(
self,
df_collection,
- offset,
- duration,
- thl_web_rw,
+ offset: str,
+ duration: timedelta,
+ thl_web_rw: PostgresConfig,
df_collection_data_type,
delete_df_collection,
):
@@ -181,26 +188,26 @@ class TestDFCollectionItemMethod:
def test_create_partial_archive(
self,
df_collection,
- offset,
- duration,
+ offset: str,
+ duration: str,
create_main_accounts,
- thl_web_rw,
+ thl_web_rw: PostgresConfig,
thl_lm,
df_collection_data_type,
- user_factory,
- product,
+ user_factory: Callable[..., User],
+ product: Product,
client_no_amm,
incite_item_factory,
delete_df_collection,
- mnt_filepath,
+ mnt_filepath: "GRLDatasets",
):
assert 1 + 1 == 2
def test_dict(
self,
df_collection_data_type,
- offset,
- duration,
+ offset: str,
+ duration: timedelta,
df_collection,
delete_df_collection,
):
@@ -224,12 +231,12 @@ class TestDFCollectionItemMethod:
self,
df_collection_data_type,
df_collection,
- offset,
- duration,
+ offset: str,
+ duration: timedelta,
create_main_accounts,
- thl_web_rw,
- user_factory,
- product,
+ thl_web_rw: PostgresConfig,
+ user_factory: Callable[..., User],
+ product: Product,
incite_item_factory,
delete_df_collection,
):
@@ -270,10 +277,10 @@ class TestDFCollectionItemMethod:
self,
df_collection_data_type,
df_collection,
- offset,
- duration,
- user_factory,
- product,
+ offset: str,
+ duration: timedelta,
+ user_factory: Callable[..., User],
+ product: Product,
incite_item_factory,
delete_df_collection,
):
@@ -316,15 +323,15 @@ class TestDFCollectionItemMethod:
def test_from_mysql_ledger(
self,
df_collection,
- user,
+ user: User,
create_main_accounts,
- offset,
- duration,
- thl_web_rw,
+ offset: str,
+ duration: timedelta,
+ thl_web_rw: PostgresConfig,
thl_lm,
df_collection_data_type,
- user_factory,
- product,
+ user_factory: Callable[..., User],
+ product: Product,
client_no_amm,
incite_item_factory,
delete_df_collection,
@@ -371,12 +378,12 @@ class TestDFCollectionItemMethod:
def test_to_archive(
self,
df_collection,
- user,
- offset,
- duration,
+ user: User,
+ offset: str,
+ duration: timedelta,
df_collection_data_type,
- user_factory,
- product,
+ user_factory: Callable[..., User],
+ product: Product,
client_no_amm,
incite_item_factory,
delete_df_collection,
@@ -410,12 +417,12 @@ class TestDFCollectionItemMethod:
self,
df_collection_data_type,
df_collection,
- user_factory,
- product,
- offset,
- duration,
+ user_factory: Callable[..., User],
+ product: Product,
+ offset: str,
+ duration: timedelta,
client_no_amm,
- user,
+ user: User,
incite_item_factory,
delete_df_collection,
mnt_filepath,
@@ -481,19 +488,19 @@ class TestDFCollectionItemMethod:
@pytest.mark.skip
def test_to_archive_numbered_partial(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
def test_initial_load(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
def test_clear_corrupt_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@@ -505,28 +512,36 @@ class TestDFCollectionItemMethod:
class TestDFCollectionItemMethodBase:
@pytest.mark.skip
- def test_path_exists(self, df_collection_data_type, offset, duration):
+ def test_path_exists(
+ self, df_collection_data_type, offset: str, duration: timedelta
+ ):
pass
@pytest.mark.skip
- def test_next_numbered_path(self, df_collection_data_type, offset, duration):
+ def test_next_numbered_path(
+ self, df_collection_data_type, offset: str, duration: timedelta
+ ):
pass
@pytest.mark.skip
def test_search_highest_numbered_path(
- self, df_collection_data_type, offset, duration
+ self, df_collection_data_type, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
- def test_tmp_filename(self, df_collection_data_type, offset, duration):
+ def test_tmp_filename(
+ self, df_collection_data_type, offset: str, duration: timedelta
+ ):
pass
@pytest.mark.skip
- def test_tmp_path(self, df_collection_data_type, offset, duration):
+ def test_tmp_path(self, df_collection_data_type, offset: str, duration: timedelta):
pass
- def test_is_empty(self, df_collection_data_type, df_collection, offset, duration):
+ def test_is_empty(
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
+ ):
"""
test_has_empty was merged into this because item.has_empty is
an alias for is_empty.. or vis-versa
@@ -542,7 +557,7 @@ class TestDFCollectionItemMethodBase:
assert item.has_empty()
def test_has_partial_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
for item in df_collection.items:
assert not item.has_partial_archive()
@@ -550,7 +565,7 @@ class TestDFCollectionItemMethodBase:
assert item.has_partial_archive()
def test_has_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
for item in df_collection.items:
# (1) Originally, nothing exists... so let's just make a file and
@@ -587,7 +602,7 @@ class TestDFCollectionItemMethodBase:
assert item.has_archive(include_empty=True)
def test_delete_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
for item in df_collection.items:
item: DFCollectionItem
@@ -610,7 +625,7 @@ class TestDFCollectionItemMethodBase:
assert not item.partial_path.exists()
def test_should_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
schema: DataFrameSchema = df_collection._schema
aa = schema.metadata[ARCHIVE_AFTER]
@@ -627,11 +642,13 @@ class TestDFCollectionItemMethodBase:
assert not item.should_archive()
@pytest.mark.skip
- def test_set_empty(self, df_collection_data_type, df_collection, offset, duration):
+ def test_set_empty(
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
+ ):
pass
def test_valid_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
# Originally, nothing has been saved or anything.. so confirm it
# always comes back as None
@@ -655,17 +672,19 @@ class TestDFCollectionItemMethodBase:
@pytest.mark.skip
def test_validate_df(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
def test_from_archive(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
- def test__to_dict(self, df_collection_data_type, df_collection, offset, duration):
+ def test__to_dict(
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
+ ):
for item in df_collection.items:
res = item._to_dict()
@@ -683,19 +702,19 @@ class TestDFCollectionItemMethodBase:
@pytest.mark.skip
def test_delete_partial(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
def test_cleanup_partials(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@pytest.mark.skip
def test_delete_dangling_partials(
- self, df_collection_data_type, df_collection, offset, duration
+ self, df_collection_data_type, df_collection, offset: str, duration: timedelta
):
pass
@@ -715,7 +734,7 @@ async def test_client(client, s, worker):
)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
@pytest.mark.anyio
-async def test_client_parametrize(c, s, w, df_collection_data_type, offset):
+async def test_client_parametrize(c, s, w, df_collection_data_type, offset: str):
"""c,s,a are all required - the secondary Worker (b) is not required"""
assert isinstance(c, Client), f"c is not Client, it's {type(c)}"
@@ -740,16 +759,16 @@ class TestDFCollectionItemFunctionalTest:
def test_to_archive_and_ddf(
self,
df_collection_data_type,
- offset,
- duration,
+ offset: str,
+ duration: timedelta,
client_no_amm,
df_collection,
- user,
- user_factory,
- product,
+ user: User,
+ user_factory: Callable[..., User],
+ product: Product,
incite_item_factory,
delete_df_collection,
- mnt_filepath,
+ mnt_filepath: "GRLDatasets",
):
from generalresearch.models.thl.user import User
@@ -790,16 +809,16 @@ class TestDFCollectionItemFunctionalTest:
def test_filesize_estimate(
self,
df_collection,
- user,
- offset,
- duration,
+ user: User,
+ offset: str,
+ duration: timedelta,
client_no_amm,
- user_factory,
- product,
+ user_factory: Callable[..., User],
+ product: Product,
df_collection_data_type,
incite_item_factory,
delete_df_collection,
- mnt_filepath,
+ mnt_filepath: "GRLDatasets",
):
"""A functional test to write some Parquet files for the
DFCollection and then confirm that the files get written
@@ -809,9 +828,11 @@ class TestDFCollectionItemFunctionalTest:
(1) Validating their passing the pandera schema
(2) The file or dir has an expected size on disk
"""
+ import os
+
import pyarrow.parquet as pq
+
from generalresearch.models.thl.user import User
- import os
if df_collection.data_type in unsupported_mock_types:
return
@@ -838,14 +859,14 @@ class TestDFCollectionItemFunctionalTest:
self,
client_no_amm,
df_collection,
- user_factory,
- product,
- offset,
- duration,
+ user_factory: Callable[..., User],
+ product: Product,
+ offset: str,
+ duration: timedelta,
df_collection_data_type,
incite_item_factory,
delete_df_collection,
- mnt_filepath,
+ mnt_filepath: "GRLDatasets",
):
from generalresearch.models.thl.user import User
@@ -875,7 +896,9 @@ class TestDFCollectionItemFunctionalTest:
assert item.has_archive(include_empty=True)
@pytest.mark.skip
- def test_get_items(self, df_collection, product, offset, duration):
+ def test_get_items(
+ self, df_collection, product: Product, offset: str, duration: timedelta
+ ):
with pytest.warns(expected_warning=ResourceWarning) as cm:
df_collection.get_items_last365()
assert "DFCollectionItem has missing archives" in str(
@@ -892,11 +915,11 @@ class TestDFCollectionItemFunctionalTest:
df_collection,
incite_item_factory,
delete_df_collection,
- user_factory,
- product,
- offset,
- duration,
- mnt_filepath,
+ user_factory: Callable[..., User],
+ product: Product,
+ offset: str,
+ duration: timedelta,
+ mnt_filepath: "GRLDatasets",
):
"""Don't allow creating an archive for data that will likely be
overwritten or updated
@@ -934,10 +957,10 @@ class TestDFCollectionItemFunctionalTest:
df_collection,
incite_item_factory,
delete_df_collection,
- user,
- offset,
- duration,
- mnt_filepath,
+ user: User,
+ offset: str,
+ duration: timedelta,
+ mnt_filepath: "GRLDatasets",
):
delete_df_collection(coll=df_collection)
@@ -962,10 +985,10 @@ class TestDFCollectionItemFunctionalTest:
df_collection,
incite_item_factory,
delete_df_collection,
- user_factory,
- product,
- offset,
- duration,
+ user_factory: Callable[..., User],
+ product: Product,
+ offset: str,
+ duration: timedelta,
mnt_filepath,
):
from generalresearch.models.thl.user import User
diff --git a/tests/incite/collections/test_df_collection_thl_marketplaces.py b/tests/incite/collections/test_df_collection_thl_marketplaces.py
index 0a77938..54c27f7 100644
--- a/tests/incite/collections/test_df_collection_thl_marketplaces.py
+++ b/tests/incite/collections/test_df_collection_thl_marketplaces.py
@@ -1,11 +1,11 @@
from datetime import datetime, timezone
from itertools import product
+from typing import TYPE_CHECKING
import pytest
-from pandera import Column, Index, DataFrameSchema
+from pandera import Column, DataFrameSchema, Index
-from generalresearch.incite.collections import DFCollection
-from generalresearch.incite.collections import DFCollectionType
+from generalresearch.incite.collections import DFCollection, DFCollectionType
from generalresearch.incite.collections.thl_marketplaces import (
InnovateSurveyHistoryCollection,
MorningSurveyTimeseriesCollection,
@@ -14,6 +14,9 @@ from generalresearch.incite.collections.thl_marketplaces import (
)
from test_utils.incite.conftest import mnt_filepath
+if TYPE_CHECKING:
+ from generalresearch.incite.base import GRLDatasets
+
def combo_object():
for x in product(
diff --git a/tests/incite/collections/test_df_collection_thl_web.py b/tests/incite/collections/test_df_collection_thl_web.py
index e6f464b..51ca128 100644
--- a/tests/incite/collections/test_df_collection_thl_web.py
+++ b/tests/incite/collections/test_df_collection_thl_web.py
@@ -1,5 +1,6 @@
from datetime import datetime
from itertools import product
+from typing import TYPE_CHECKING
import dask.dataframe as dd
import pandas as pd
@@ -8,6 +9,9 @@ from pandera import DataFrameSchema
from generalresearch.incite.collections import DFCollection, DFCollectionType
+if TYPE_CHECKING:
+ from generalresearch.incite.base import GRLDatasets
+
def combo_object():
for x in product(
@@ -29,7 +33,7 @@ def combo_object():
)
class TestDFCollection_thl_web:
- def test_init(self, df_collection_data_type, offset, df_collection):
+ def test_init(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection_data_type, DFCollectionType)
assert isinstance(df_collection, DFCollection)
@@ -39,12 +43,12 @@ class TestDFCollection_thl_web:
)
class TestDFCollection_thl_web_Properties:
- def test_items(self, df_collection_data_type, offset, df_collection):
+ def test_items(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection.items, list)
for i in df_collection.items:
assert i._collection == df_collection
- def test__schema(self, df_collection_data_type, offset, df_collection):
+ def test__schema(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection._schema, DataFrameSchema)
@@ -54,16 +58,16 @@ class TestDFCollection_thl_web_Properties:
class TestDFCollection_thl_web_BaseProperties:
@pytest.mark.skip
- def test__interval_range(self, df_collection_data_type, offset, df_collection):
+ def test__interval_range(self, df_collection_data_type, offset: str, df_collection):
pass
- def test_interval_start(self, df_collection_data_type, offset, df_collection):
+ def test_interval_start(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection.interval_start, datetime)
- def test_interval_range(self, df_collection_data_type, offset, df_collection):
+ def test_interval_range(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection.interval_range, list)
- def test_progress(self, df_collection_data_type, offset, df_collection):
+ def test_progress(self, df_collection_data_type, offset: str, df_collection):
assert isinstance(df_collection.progress, pd.DataFrame)
@@ -78,7 +82,7 @@ class TestDFCollection_thl_web_Methods:
@pytest.mark.skip
def test_fetch_force_rr_latest(
- self, df_collection_data_type, df_collection, offset
+ self, df_collection_data_type, df_collection, offset: str
):
pass
@@ -92,55 +96,63 @@ class TestDFCollection_thl_web_Methods:
)
class TestDFCollection_thl_web_BaseMethods:
- def test_fetch_all_paths(self, df_collection_data_type, offset, df_collection):
+ def test_fetch_all_paths(self, df_collection_data_type, offset: str, df_collection):
res = df_collection.fetch_all_paths(
items=None, force_rr_latest=False, include_partial=False
)
assert isinstance(res, list)
@pytest.mark.skip
- def test_ddf(self, df_collection_data_type, offset, df_collection):
+ def test_ddf(self, df_collection_data_type, offset: str, df_collection):
res = df_collection.ddf()
assert isinstance(res, dd.DataFrame)
# -- cleanup --
@pytest.mark.skip
- def test_schedule_cleanup(self, df_collection_data_type, offset, df_collection):
+ def test_schedule_cleanup(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
@pytest.mark.skip
- def test_cleanup(self, df_collection_data_type, offset, df_collection):
+ def test_cleanup(self, df_collection_data_type, offset: str, df_collection):
pass
@pytest.mark.skip
- def test_cleanup_partials(self, df_collection_data_type, offset, df_collection):
+ def test_cleanup_partials(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
@pytest.mark.skip
- def test_clear_tmp_archives(self, df_collection_data_type, offset, df_collection):
+ def test_clear_tmp_archives(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
@pytest.mark.skip
def test_clear_corrupt_archives(
- self, df_collection_data_type, offset, df_collection
+ self, df_collection_data_type, offset: str, df_collection
):
pass
@pytest.mark.skip
- def test_rebuild_symlinks(self, df_collection_data_type, offset, df_collection):
+ def test_rebuild_symlinks(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
# -- Source timing --
@pytest.mark.skip
- def test_get_item(self, df_collection_data_type, offset, df_collection):
+ def test_get_item(self, df_collection_data_type, offset: str, df_collection):
pass
@pytest.mark.skip
- def test_get_item_start(self, df_collection_data_type, offset, df_collection):
+ def test_get_item_start(self, df_collection_data_type, offset: str, df_collection):
pass
@pytest.mark.skip
- def test_get_items(self, df_collection_data_type, offset, df_collection):
+ def test_get_items(self, df_collection_data_type, offset: str, df_collection):
# If we get all the items from the start of the collection, it
# should include all the items!
res1 = df_collection.items
@@ -148,13 +160,19 @@ class TestDFCollection_thl_web_BaseMethods:
assert len(res1) == len(res2)
@pytest.mark.skip
- def test_get_items_from_year(self, df_collection_data_type, offset, df_collection):
+ def test_get_items_from_year(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
@pytest.mark.skip
- def test_get_items_last90(self, df_collection_data_type, offset, df_collection):
+ def test_get_items_last90(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
@pytest.mark.skip
- def test_get_items_last365(self, df_collection_data_type, offset, df_collection):
+ def test_get_items_last365(
+ self, df_collection_data_type, offset: str, df_collection
+ ):
pass
diff --git a/tests/incite/mergers/foundations/test_enriched_session.py b/tests/incite/mergers/foundations/test_enriched_session.py
index ec15d38..47f243e 100644
--- a/tests/incite/mergers/foundations/test_enriched_session.py
+++ b/tests/incite/mergers/foundations/test_enriched_session.py
@@ -1,16 +1,16 @@
-from datetime import timedelta, timezone, datetime
+from datetime import datetime, timedelta, timezone
from decimal import Decimal
from itertools import product
from typing import Optional
-from generalresearch.incite.schemas.admin_responses import (
- AdminPOPSessionSchema,
-)
-
import dask.dataframe as dd
import pandas as pd
import pytest
+from generalresearch.incite.schemas.admin_responses import (
+ AdminPOPSessionSchema,
+)
+from generalresearch.pg_helper import PostgresConfig
from test_utils.incite.collections.conftest import (
session_collection,
wall_collection,
@@ -36,7 +36,7 @@ class TestEnrichedSession:
wall_collection,
session_collection,
enriched_session_merge,
- thl_web_rr,
+ thl_web_rr: PostgresConfig,
delete_df_collection,
incite_item_factory,
):
@@ -95,7 +95,7 @@ class TestEnrichedSessionAdmin:
client_no_amm,
wall_collection,
session_collection,
- thl_web_rr,
+ thl_web_rr: PostgresConfig,
session_report_request,
user_factory,
start,