import json, re, time, urllib.parse, urllib.request, urllib.error
from collections import Counter, defaultdict

UA = {'User-Agent':'HermesResearch/1.0 by Miguel'}
BASE = 'https://www.reddit.com'

def get_json(url, retries=4):
    for i in range(retries):
        try:
            req = urllib.request.Request(url, headers=UA)
            with urllib.request.urlopen(req, timeout=30) as r:
                return json.loads(r.read().decode('utf-8'))
        except urllib.error.HTTPError as e:
            if e.code in (429, 500, 502, 503, 504):
                time.sleep(2+i*3); continue
            raise
        except Exception:
            if i == retries-1: raise
            time.sleep(1+i)

def add_query(path, qs):
    sep = '&' if '?' in path else '?'
    return BASE + path + sep + urllib.parse.urlencode(qs)

def listing(path, limit_total=250):
    out=[]; after=None
    while len(out)<limit_total:
        qs={'limit': min(100, limit_total-len(out)), 'raw_json': 1}
        if after: qs['after']=after
        data=get_json(add_query(path, qs))
        children=data['data'].get('children', [])
        if not children: break
        out += [c['data'] for c in children if c.get('kind')=='t3']
        after=data['data'].get('after')
        if not after: break
        time.sleep(.6)
    return out

subreddits=['EDC','everydaycarry']
posts=[]
for sub in subreddits:
    for path, n in [
        (f'/r/{sub}/top.json?t=year', 100),
        (f'/r/{sub}/top.json?t=month', 75),
        (f'/r/{sub}/top.json?t=all', 100),
        (f'/r/{sub}/hot.json', 50),
        (f'/r/{sub}/search.json?q={urllib.parse.quote("listed pocket dump")}&restrict_sr=on&sort=relevance&t=year', 50),
    ]:
        try:
            batch=listing(path,n)
            for p in batch: p['_source_query']=path
            posts.extend(batch)
        except Exception as e:
            print('ERR listing', path, type(e).__name__, e)

seen=set(); uniq=[]
for p in posts:
    k=(p.get('subreddit'), p.get('id'))
    if k not in seen:
        seen.add(k); uniq.append(p)
posts=uniq

comments_by_post={}
# comments: top engaged + text-rich posts
sample = sorted(posts, key=lambda x: (x.get('score',0)+x.get('num_comments',0)*8 + min(len(x.get('selftext') or ''),1000)), reverse=True)[:25]
for idx,p in enumerate(sample):
    try:
        url=BASE+p['permalink'].rstrip('/')+'.json?limit=60&sort=top&raw_json=1'
        data=get_json(url)
        comm=[]
        def walk(children):
            for c in children:
                if c.get('kind')!='t1': continue
                d=c['data']; body=d.get('body','')
                if body and body not in ('[deleted]','[removed]'):
                    comm.append(body)
                replies=d.get('replies')
                if isinstance(replies, dict):
                    walk(replies['data'].get('children',[]))
        if len(data)>1: walk(data[1]['data'].get('children',[]))
        comments_by_post[p['subreddit']+'_'+p['id']]=comm[:60]
        time.sleep(.45)
    except Exception as e:
        comments_by_post[p['subreddit']+'_'+p['id']]=[]

patterns = {
 'phone': r'\b(phone|iphone|pixel|android)\b',
 'wallet/cards/cash': r'\b(wallet|card holder|cardholder|cards?|cash|money clip|ridge|chums)\b',
 'keys/key organizer': r'\b(keys?|keybar|keysmart|key organizer|keychain|key ring|orbitkey)\b',
 'watch': r'\b(watch|g[- ]?shock|casio|seiko|garmin|apple watch|timex|citizen)\b',
 'knife': r'\b(knife|knives|blade|spyderco|benchmade|civivi|kershaw|microtech|crkt|opinel|sak|swiss army|para ?3|bugout|rat ?2|delica)\b',
 'multitool': r'\b(multitool|multi-tool|leatherman|gerber dime|victorinox|skeletool|wave\+?|micra|squirt|bit kit|swiss tool)\b',
 'flashlight': r'\b(flashlight|torch|olight|streamlight|rovyvon|wurkkos|zebra\s?light|emisar|lumintop|sofirn|aaa light|arkfeld)\b',
 'pen/marker': r'\b(pen|fisher space pen|rite in the rain|sharpie|marker|pencil|zebra f-701|tactile turn)\b',
 'notebook': r'\b(notebook|notepad|field notes|rite in the rain|memo book|journal)\b',
 'lighter/fire': r'\b(lighter|zippo|bic|matches|firestarter|ferro rod)\b',
 'earbuds/headphones': r'\b(earbuds|airpods|headphones|iem|buds|earphones)\b',
 'power bank/charger/cable': r'\b(power bank|battery bank|portable charger|charger|charging cable|usb[- ]?c|lightning cable|anker|cable|cord)\b',
 'water bottle': r'\b(water bottle|bottle|hydro flask|nalgene|yeti|owala|canteen|stanley)\b',
 'bag/sling/pouch/organizer': r'\b(backpack|bag|sling|pouch|organizer|maxpedition|alpaka|garage built gear|gbg|tale of knives|vanquest|peak design|bellroy)\b',
 'medical/first aid/meds': r'\b(first aid|band-?aid|bandage|meds|medicine|ibuprofen|advil|tylenol|allergy|narcan|tourniquet|tq|ifak|boo boo|trauma kit)\b',
 'handkerchief/bandana/cloth': r'\b(hank|handkerchief|bandana|microfiber cloth|cleaning cloth)\b',
 'coins/fidget/talisman': r'\b(coin|challenge coin|fidget|worry stone|spinner|bead|worry coin|begleri)\b',
 'pry bar': r'\b(pry bar|prybar|pry tool|lynch|countycomm)\b',
 'pepper spray/self defense': r'\b(pepper spray|oc spray|mace|self defense|taser|pocket stick)\b',
 'firearm/ccw': r'\b(glock|sig sauer|p365|p320|ccw|concealed carry|firearm|pistol|holster|magazine|mag\b|ruger|s&w|smith & wesson)\b',
 'sunglasses/glasses': r'\b(sunglasses|glasses|ray-ban|oakley|shades|eyewear)\b',
 'personal care': r'\b(lip balm|chapstick|sanitizer|wipes|tissues|gum|mints|deodorant|comb|toothpick|floss|lotion)\b',
 'work badge/transit': r'\b(badge|access card|id card|transit card|metro card|bus pass)\b',
 'gloves/mask/weather': r'\b(gloves|mask|umbrella|rain jacket|jacket|hat|beanie|sunscreen)\b',
}

cat_counts=Counter(); evidence=defaultdict(list); mentions=defaultdict(Counter)
brand_terms = ['leatherman','victorinox','spyderco','benchmade','civivi','kershaw','olight','streamlight','rovyvon','casio','g-shock','seiko','garmin','ridge','field notes','fisher space pen','sharpie','zippo','bic','maxpedition','alpaka','bellroy','anker','airpods','glock','sig','p365']
for p in posts:
    key=p['subreddit']+'_'+p['id']
    text=' '.join([p.get('title',''), p.get('selftext','') or ''] + comments_by_post.get(key, [])[:40]).lower()
    for cat,pat in patterns.items():
        if re.search(pat, text, flags=re.I):
            cat_counts[cat]+=1
            if len(evidence[cat])<8:
                evidence[cat].append({'subreddit':p['subreddit'],'title':p.get('title'),'score':p.get('score'),'comments':p.get('num_comments'),'url':'https://reddit.com'+p.get('permalink',''),'selftext':(p.get('selftext') or '')[:700]})
    for term in brand_terms:
        if term in text:
            mentions[term][p['subreddit']]+=1

list_lines=[]
for p in posts:
    st=p.get('selftext') or ''
    if len(st)>10:
        lines=[ln.strip(' -*•\t') for ln in st.splitlines() if 2<len(ln.strip())<140]
        if lines:
            list_lines.append({'subreddit':p['subreddit'],'title':p['title'],'score':p['score'],'url':'https://reddit.com'+p['permalink'],'lines':lines[:40]})

report={
    'subreddits': subreddits,
    'post_count': len(posts),
    'comment_posts_sampled': len(comments_by_post),
    'category_counts': cat_counts.most_common(),
    'brand_mentions': {k:sum(v.values()) for k,v in sorted(mentions.items(), key=lambda kv:sum(kv[1].values()), reverse=True)},
    'brand_mentions_by_sub': {k:dict(v) for k,v in mentions.items()},
    'evidence': evidence,
    'top_posts': [{'subreddit':p['subreddit'],'title':p['title'],'score':p['score'],'comments':p['num_comments'],'url':'https://reddit.com'+p['permalink'],'selftext':(p.get('selftext') or '')[:900]} for p in sorted(posts, key=lambda x:x.get('score',0), reverse=True)[:35]],
    'explicit_lists': list_lines[:80]
}
path='/Users/cynthia/.hermes/hermes-agent/edc_reddit_report.json'
open(path,'w').write(json.dumps(report, indent=2))
print(path)
print('posts', len(posts), 'comment sets', len(comments_by_post))
print('top categories')
for k,v in cat_counts.most_common(35): print(f'{k}: {v}')
print('brands', report['brand_mentions'])
