That's the first draft. IBKR has a hard limit of 50 stocks (scanner). GPT told me to scrap Yahoo for top 200 active tickers Then use IBKR for historical data and compute Sharpe ratio. Here is the python code for yahoo+ibkr: Code: import time import threading import numpy as np import requests from ibapi.client import EClient from ibapi.wrapper import EWrapper from ibapi.contract import Contract from ibapi.common import * from ibapi.utils import iswrapper # ----------------------------- # Step 1: Get tickers from Yahoo # ----------------------------- def get_top_200_yahoo(): print("Fetching top 200 most active tickers from Yahoo Finance (JSON API)...") url = "https://query1.finance.yahoo.com/v1/finance/screener/predefined/saved" params = { "scrIds": "most_actives", "count": 200, "start": 0 } headers = { "User-Agent": "Mozilla/5.0" } try: response = requests.get(url, params=params, headers=headers) response.raise_for_status() data = response.json() quotes = data["finance"]["result"][0]["quotes"] tickers = [q["symbol"] for q in quotes if q.get("symbol")] print(f"✅ Retrieved {len(tickers)} tickers from Yahoo.\n") return tickers except Exception as e: print(f"❌ Failed to retrieve tickers: {e}") return [] # ----------------------------- # Step 2: IBKR App # ----------------------------- class IBKRApp(EClient, EWrapper): def __init__(self): EClient.__init__(self, self) self.hist_data = [] self.data_received = False self.sharpe_results = [] @iswrapper def historicalData(self, reqId: int, bar): self.hist_data.append(bar) @iswrapper def historicalDataEnd(self, reqId: int, start: str, end: str): self.data_received = True def get_historical_closes(self, symbol): contract = Contract() contract.symbol = symbol contract.secType = "STK" contract.exchange = "SMART" contract.currency = "USD" self.hist_data = [] self.data_received = False self.reqHistoricalData( reqId=9001, contract=contract, endDateTime='', durationStr='50 D', barSizeSetting='1 day', whatToShow='TRADES', useRTH=1, formatDate=1, keepUpToDate=False, chartOptions=[] ) timeout = 4 # seconds for _ in range(timeout * 10): if self.data_received: break time.sleep(0.1) closes = [bar.close for bar in self.hist_data] return closes def compute_sharpe(self, closes): if len(closes) < 2: return None prices = np.array(closes) log_returns = np.log(prices[1:] / prices[:-1]) if np.std(log_returns) == 0: return None sharpe = np.mean(log_returns) / np.std(log_returns) return sharpe def process(self, tickers): print("Processing tickers for Sharpe ratio...\n") for i, ticker in enumerate(tickers): print(f"[{i+1:>3}/200] {ticker:<6} ", end="") try: closes = self.get_historical_closes(ticker) if len(closes) < 2: print("⛔ Not enough data") continue sharpe = self.compute_sharpe(closes) if sharpe is not None: self.sharpe_results.append((ticker, sharpe, len(closes))) print(f"✅ Sharpe: {sharpe:.4f} ({len(closes)} closes)") else: print("⚠️ Invalid Sharpe") except Exception as e: print(f"❌ Error: {e}") time.sleep(1) # obey IB rate limit # ----------------------------- # Step 3: Run App # ----------------------------- def run_loop(app): app.run() def main(): # Step 1: Get tickers tickers = get_top_200_yahoo() # Step 2: Start IBKR app app = IBKRApp() app.connect("127.0.0.1", 7496, clientId=1) thread = threading.Thread(target=run_loop, args=(app,)) thread.start() time.sleep(1) # Step 3: Process app.process(tickers) # Step 4: Show results sorted_results = sorted(app.sharpe_results, key=lambda x: x[1], reverse=True) print("\n Top Stocks by Sharpe Ratio (50-day log returns)") print("Ticker | Sharpe(50) | Num Closes") print("-" * 35) for ticker, sharpe, count in sorted_results: print(f"{ticker:6} | {sharpe:.4f} | {count}") app.disconnect() if __name__ == "__main__": main() 50D Sharpes are different than on my TradingView.
Alright last spam post about Sharpes & Co. Here is a version that fetches nasdaqtraders for ~5k tickers, Then use yfinance to get the last price, volume and compute vol*price, sharpe(50) & sharpe(200). Code: import requests import yfinance as yf import pandas as pd import numpy as np import time from io import StringIO def get_active_us_tickers(): print("Fetching active US tickers from NasdaqTrader...") valid_suffixes = ("Common Stock", "Common Shares", "Ordinary Shares", "Depositary Shares") # NASDAQ-listed nasdaq_url = "https://www.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt" nasdaq = pd.read_csv(StringIO(requests.get(nasdaq_url).text), sep="|")[:-1] nasdaq["Security Name"] = nasdaq["Security Name"].str.strip() nasdaq_filtered = nasdaq[nasdaq["Security Name"].str.endswith(valid_suffixes, na=False)] nasdaq_tickers = nasdaq_filtered["Symbol"].tolist() # NYSE/AMEX-listed other_url = "https://www.nasdaqtrader.com/dynamic/SymDir/otherlisted.txt" other = pd.read_csv(StringIO(requests.get(other_url).text), sep="|")[:-1] other["Security Name"] = other["Security Name"].str.strip() other_filtered = other[other["Security Name"].str.endswith(valid_suffixes, na=False)] other_tickers = other_filtered["ACT Symbol"].tolist() all_tickers = nasdaq_tickers + other_tickers print(f"✅ Retrieved {len(all_tickers)} active tickers matching suffix criteria.\n") return all_tickers def calculate_sharpe(returns): if len(returns) == 0: return None mean = returns.mean() std = returns.std() if std == 0 or np.isnan(std): return None return mean / std def fetch_ticker_data(ticker): try: dat = yf.Ticker(ticker) df = dat.history(period="200d", auto_adjust=True) if df.empty or 'Close' not in df.columns or 'Volume' not in df.columns: return None df['daily_return'] = np.log(df['Close'] / df['Close'].shift(1)) df.dropna(inplace=True) # Sharpe Ratios sharpe_50 = calculate_sharpe(df['daily_return'].tail(50)) sharpe_200 = calculate_sharpe(df['daily_return'].tail(200)) latest = df.iloc[-1] price = latest['Close'] volume = latest['Volume'] size = len(df) return { 'Ticker': ticker, 'Size': size, 'Price': price, 'Volume': volume, 'Volume*Price': volume * price, 'Sharpe(50)': sharpe_50, 'Sharpe(200)': sharpe_200 } except Exception as e: print(f"⚠️ Error with {ticker}: {e}") return None def main(): tickers = get_active_us_tickers() results = [] for i, ticker in enumerate(tickers): print(f"[{i+1}/{len(tickers)}] Processing {ticker}...") data = fetch_ticker_data(ticker) if data: results.append(data) time.sleep(0.1) # Be kind to the API df = pd.DataFrame(results) df.sort_values(by='Sharpe(50)', ascending=False, inplace=True) print("\n Top 10 by Sharpe(50):") print(df.head(10)) df.to_csv("nasdaqtrader_sharpes.csv", index=False) if __name__ == "__main__": main() Takes about 20min to get the linked CSV. It's free. Just need python + yfinance, numpy & pandas. We can filter and sort the CSV using a spreadsheet.
I was not complaining because following the Demon Lord’s principle of selling in to the crowd’s fury works. EBAY and your TDUP idea, TDUP worked great!