"""
Polymarket Profile Scraper
Scrapes user positions (bets) from Polymarket profile pages
"""

import time
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.chrome.options import Options

# Configuration
max_pages = 2  # Set to None for unlimited pages
base_url = "https://polymarket.com/profile/{username}?tab=positions"
output_file = "polymarket_positions.csv"

def setup_driver():
    """Setup Chrome driver with options"""
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')
    chrome_options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
    
    driver = webdriver.Chrome(options=chrome_options)
    driver.set_page_load_timeout(30)
    return driver

def scroll_to_load_all(driver, max_scrolls=50):
    """Scroll down to load all positions"""
    last_height = driver.execute_script("return document.body.scrollHeight")
    scrolls = 0
    
    while scrolls < max_scrolls:
        # Scroll down
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        
        # Calculate new scroll height
        new_height = driver.execute_script("return document.body.scrollHeight")
        
        if new_height == last_height:
            # No more content loaded
            break
            
        last_height = new_height
        scrolls += 1
        print(f"Scrolled {scrolls} times, loaded more content...")
    
    print(f"Finished scrolling after {scrolls} scrolls")

def extract_positions(driver):
    """Extract all position data from the current page"""
    positions = []
    
    try:
        # Wait for positions to load
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "div[class*='c-dhzjXW']"))
        )
        
        # Find all position containers
        position_elements = driver.find_elements(By.CSS_SELECTOR, "div[class*='c-dhzjXW']")
        print(f"Found {len(position_elements)} position elements")
        
        for idx, element in enumerate(position_elements):
            try:
                position_data = {}
                
                # Extract status (Won/Lost)
                try:
                    status_elem = element.find_element(By.CSS_SELECTOR, "div[class*='c-PJLV'] p")
                    position_data['status'] = status_elem.text.strip()
                except:
                    position_data['status'] = 'Unknown'
                
                # Extract market question/title
                try:
                    title_elem = element.find_element(By.CSS_SELECTOR, "p[class*='font-semibold']")
                    position_data['market_title'] = title_elem.text.strip()
                except:
                    position_data['market_title'] = ''
                
                # Extract volume/shares info
                try:
                    volume_elem = element.find_element(By.CSS_SELECTOR, "p[class*='text-xs']:not([class*='font-semibold'])")
                    position_data['volume_info'] = volume_elem.text.strip()
                except:
                    position_data['volume_info'] = ''
                
                # Extract invested amount (left column)
                try:
                    invested_elems = element.find_elements(By.CSS_SELECTOR, "div[class*='c-PJLV'] p[class*='font-medium']")
                    if len(invested_elems) >= 2:
                        position_data['invested'] = invested_elems[0].text.strip()
                    else:
                        position_data['invested'] = ''
                except:
                    position_data['invested'] = ''
                
                # Extract current value (right column - top)
                try:
                    value_elems = element.find_elements(By.CSS_SELECTOR, "div[class*='c-PJLV'] p[class*='font-medium']")
                    if len(value_elems) >= 2:
                        position_data['current_value'] = value_elems[1].text.strip()
                    else:
                        position_data['current_value'] = ''
                except:
                    position_data['current_value'] = ''
                
                # Extract profit/loss (right column - bottom, green text)
                try:
                    profit_elem = element.find_element(By.CSS_SELECTOR, "p[class*='text-green'], p[class*='text-red']")
                    position_data['profit_loss'] = profit_elem.text.strip()
                except:
                    position_data['profit_loss'] = ''
                
                # Only add if we have at least a title
                if position_data['market_title']:
                    positions.append(position_data)
                    
            except Exception as e:
                print(f"Error extracting position {idx}: {e}")
                continue
        
    except TimeoutException:
        print("Timeout waiting for positions to load")
    except Exception as e:
        print(f"Error extracting positions: {e}")
    
    return positions

def scrape_profile(username):
    """Main scraping function"""
    driver = setup_driver()
    all_positions = []
    
    try:
        # Navigate to profile
        url = base_url.format(username=username)
        print(f"Navigating to {url}")
        driver.get(url)
        time.sleep(3)
        
        # Scroll to load all positions
        print("Scrolling to load all positions...")
        scroll_to_load_all(driver)
        
        # Extract positions
        print("Extracting position data...")
        positions = extract_positions(driver)
        all_positions.extend(positions)
        
        print(f"Extracted {len(positions)} positions")
        
    except Exception as e:
        print(f"Error during scraping: {e}")
    finally:
        driver.quit()
    
    return all_positions

def save_to_csv(positions, filename):
    """Save positions to CSV file"""
    if not positions:
        print("No positions to save")
        return
    
    fieldnames = ['status', 'market_title', 'volume_info', 'invested', 'current_value', 'profit_loss']
    
    with open(filename, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerows(positions)
    
    print(f"Saved {len(positions)} positions to {filename}")

if __name__ == "__main__":
    # Example: scrape a specific user profile
    username = "@ZXWP"
    
    print(f"Starting scrape for user: {username}")
    positions = scrape_profile(username)
    
    if positions:
        save_to_csv(positions, output_file)
        print(f"\nScraping complete! Found {len(positions)} positions")
    else:
        print("No positions found")
