import asyncio
from crawlee.crawlers import PlaywrightCrawler, PlaywrightCrawlingContext, PlaywrightPreNavCrawlingContext
from crawlee.sessions import SessionPool
from crawlee.browsers import BrowserPool
from crawlee.sessions import CookieParam

#we're using the netscape format for this, look up https://docs.cyotek.com/cyowcopy/1.10/netscapecookieformat.html for details about how that works
with open('cookieFile.txt') as f:
    data  = f.read()
    cookies = []
    for line in data.splitlines():
        parts = line.split('\t')
        if len(parts) >= 7:
            cookie = CookieParam({
                "name": parts[5],
                "value": parts[6],
                "domain": 'Your domain',
                "path": parts[2],
                "expires": int(parts[4]) if parts[4] else None, #type: ignore
                "http_only": parts[3].lower() == 'true',
                "secure": parts[5].startswith('__')
            })
            cookies.append(cookie)


async def main() -> None:
    browserPoolVar = BrowserPool()

    crawler = PlaywrightCrawler(
        use_session_pool=True,
        max_session_rotations=0,
        browser_pool=browserPoolVar,
        session_pool=SessionPool(max_pool_size=1),
    )

    @crawler.pre_navigation_hook
    async def pre_navigation_handler(context: PlaywrightPreNavCrawlingContext) -> None:
        if context.session:
            context.session.cookies.set_cookies(cookies)
            for cookie in context.session.cookies.get_cookies_as_dicts():
                try:
                    await context.page.context.add_cookies([cookie]) #type: ignore
                except Exception as e:
                    context.log.error(f'Failed to add cookie {cookie}: {e}')


    @crawler.router.default_handler
    async def request_handler(context: PlaywrightCrawlingContext) -> None:
        context.log.info(f'Processing {context.request.url}...')
        currentPage = context.page
        title = await currentPage.title()
        print(title)
        #print(currentPage)

    await crawler.run(['Your URL'])


if __name__ == '__main__':
    asyncio.run(main())