๐ Scraper Tutorials
Extract data from X/Twitter without paying for API access
๐ฅ 1. Scrape Followers List
๐ What This Does
Extracts a list of all followers for any public account. Exports usernames, names, and bios to JSON or CSV format.
๐ฏ Quick Start
- Go to
x.com/USERNAME/followers - Open DevTools (F12) โ Console
- Paste the code below and press Enter
// Scrape Followers List - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
const targetUser = window.location.pathname.split('/')[1];
const followers = new Map();
const scrape = async () => {
console.log(`๐ Scraping followers for @${targetUser}...`);
let retries = 0;
const maxRetries = 10;
while (retries < maxRetries) {
const prevSize = followers.size;
// Get all user cells
document.querySelectorAll('[data-testid="UserCell"]').forEach(cell => {
const link = cell.querySelector('a[href^="/"]');
const nameEl = cell.querySelector('[dir="ltr"] > span');
const bioEl = cell.querySelector('[data-testid="UserDescription"]');
const avatarEl = cell.querySelector('img[src*="profile_images"]');
const verifiedEl = cell.querySelector('svg[aria-label*="Verified"]');
if (link) {
const username = link.getAttribute('href').replace('/', '').split('/')[0];
if (username && !followers.has(username)) {
followers.set(username, {
username,
name: nameEl?.textContent || '',
bio: bioEl?.textContent || '',
avatar: avatarEl?.src || '',
verified: !!verifiedEl
});
}
}
});
console.log(` Found ${followers.size} followers...`);
if (followers.size === prevSize) {
retries++;
} else {
retries = 0;
}
window.scrollTo(0, document.body.scrollHeight);
await sleep(1500);
}
return Array.from(followers.values());
};
const downloadJSON = (data, filename) => {
const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = filename;
a.click();
};
const downloadCSV = (data, filename) => {
const headers = ['username', 'name', 'bio', 'verified'];
const csv = [
headers.join(','),
...data.map(row => headers.map(h => `"${(row[h] || '').toString().replace(/"/g, '""')}"`).join(','))
].join('\n');
const blob = new Blob([csv], { type: 'text/csv' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = filename;
a.click();
};
const run = async () => {
const data = await scrape();
console.log(`\nโ
DONE! Scraped ${data.length} followers\n`);
// Store in window for access
window.scrapedFollowers = data;
console.log('๐ Data available at: window.scrapedFollowers');
// Download both formats
downloadJSON(data, `${targetUser}-followers-${Date.now()}.json`);
downloadCSV(data, `${targetUser}-followers-${Date.now()}.csv`);
console.log('๐ฅ Downloaded JSON and CSV files!');
};
run();
})();
๐ 2. Scrape Following List
๐ What This Does
Extracts who a user is following. Also detects which accounts follow them back (mutual follows).
// Scrape Following List - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
const targetUser = window.location.pathname.split('/')[1];
const following = new Map();
const scrape = async () => {
console.log(`๐ Scraping following for @${targetUser}...`);
let retries = 0;
while (retries < 10) {
const prevSize = following.size;
document.querySelectorAll('[data-testid="UserCell"]').forEach(cell => {
const link = cell.querySelector('a[href^="/"]');
const nameEl = cell.querySelector('[dir="ltr"] > span');
const bioEl = cell.querySelector('[data-testid="UserDescription"]');
const followsBack = cell.querySelector('[data-testid="userFollowIndicator"]');
if (link) {
const username = link.getAttribute('href').replace('/', '').split('/')[0];
if (username && !following.has(username)) {
following.set(username, {
username,
name: nameEl?.textContent || '',
bio: bioEl?.textContent || '',
followsBack: !!followsBack
});
}
}
});
console.log(` Found ${following.size} accounts...`);
if (following.size === prevSize) retries++;
else retries = 0;
window.scrollTo(0, document.body.scrollHeight);
await sleep(1500);
}
return Array.from(following.values());
};
const run = async () => {
if (!window.location.pathname.includes('/following')) {
console.error('โ Go to x.com/USERNAME/following first!');
return;
}
const data = await scrape();
const mutuals = data.filter(u => u.followsBack);
const nonFollowers = data.filter(u => !u.followsBack);
console.log(`\nโ
DONE!`);
console.log(`๐ Total following: ${data.length}`);
console.log(`๐ค Mutual follows: ${mutuals.length}`);
console.log(`๐ป Non-followers: ${nonFollowers.length}`);
window.scrapedFollowing = data;
window.nonFollowers = nonFollowers;
// Download
const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `${targetUser}-following-${Date.now()}.json`;
a.click();
console.log('\n๐ฅ Downloaded! Also check window.nonFollowers for accounts that don\'t follow back.');
};
run();
})();
โค๏ธ 3. Scrape Tweet Likes
๐ What This Does
Get a list of everyone who liked a specific tweet. Great for finding engaged users in your niche.
๐ฏ Quick Start
- Go to any tweet:
x.com/user/status/123456 - Click the likes count to open likes list
- Open DevTools and paste the code
// Scrape Tweet Likes - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
// Check we're on a likes page
if (!window.location.pathname.includes('/likes')) {
console.error('โ Click on the likes count of a tweet first!');
console.log('๐ You should be on a URL like: x.com/user/status/123/likes');
return;
}
const likers = new Map();
const scrape = async () => {
console.log('๐ Scraping users who liked this tweet...');
let retries = 0;
while (retries < 10) {
const prevSize = likers.size;
document.querySelectorAll('[data-testid="UserCell"]').forEach(cell => {
const link = cell.querySelector('a[href^="/"]');
const nameEl = cell.querySelector('[dir="ltr"] > span');
const bioEl = cell.querySelector('[data-testid="UserDescription"]');
if (link) {
const username = link.getAttribute('href').replace('/', '').split('/')[0];
if (username && !likers.has(username)) {
likers.set(username, {
username,
name: nameEl?.textContent || '',
bio: bioEl?.textContent || ''
});
}
}
});
console.log(` Found ${likers.size} likers...`);
if (likers.size === prevSize) retries++;
else retries = 0;
window.scrollTo(0, document.body.scrollHeight);
await sleep(1500);
}
return Array.from(likers.values());
};
const run = async () => {
const data = await scrape();
console.log(`\nโ
DONE! Found ${data.length} users who liked this tweet\n`);
// Print usernames
console.log('๐ Usernames:');
data.forEach(u => console.log(` @${u.username}`));
window.tweetLikers = data;
// Download
const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `tweet-likers-${Date.now()}.json`;
a.click();
console.log('\n๐ฅ Downloaded! Data also at window.tweetLikers');
};
run();
})();
#๏ธโฃ 4. Scrape Hashtag Results
๐ What This Does
Scrape tweets from hashtag search results. Get tweet text, author, engagement metrics, and URLs.
// Scrape Hashtag/Search Results - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
const tweets = new Map();
const extractTweet = (article) => {
try {
const textEl = article.querySelector('[data-testid="tweetText"]');
const timeEl = article.querySelector('time');
const authorEl = article.querySelector('[data-testid="User-Name"] a');
const linkEl = article.querySelector('a[href*="/status/"]');
const likesEl = article.querySelector('[data-testid="like"] span');
const retweetsEl = article.querySelector('[data-testid="retweet"] span');
const url = linkEl?.href || '';
const id = url.match(/status\/(\d+)/)?.[1];
return {
id,
text: textEl?.textContent || '',
author: authorEl?.href?.split('/')[3] || '',
timestamp: timeEl?.getAttribute('datetime') || '',
url,
likes: likesEl?.textContent || '0',
retweets: retweetsEl?.textContent || '0'
};
} catch (e) {
return null;
}
};
const scrape = async (maxTweets = 100) => {
console.log('๐ Scraping search results...');
let retries = 0;
while (retries < 10 && tweets.size < maxTweets) {
const prevSize = tweets.size;
document.querySelectorAll('article[data-testid="tweet"]').forEach(article => {
const tweet = extractTweet(article);
if (tweet?.id && !tweets.has(tweet.id)) {
tweets.set(tweet.id, tweet);
}
});
console.log(` Found ${tweets.size} tweets...`);
if (tweets.size === prevSize) retries++;
else retries = 0;
window.scrollTo(0, document.body.scrollHeight);
await sleep(1500);
}
return Array.from(tweets.values());
};
const run = async () => {
const data = await scrape(200);
console.log(`\nโ
DONE! Scraped ${data.length} tweets\n`);
window.scrapedTweets = data;
// Download
const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `search-results-${Date.now()}.json`;
a.click();
console.log('๐ฅ Downloaded! Data at window.scrapedTweets');
};
run();
})();
First search for your hashtag on X, then run this script on the search results page.
๐ฌ 5. Video Downloader
๐ What This Does
Download any video from X/Twitter posts. Finds all available qualities and lets you download the best one.
// X Video Downloader - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const CONFIG = {
QUALITY: 'highest',
AUTO_DOWNLOAD: true,
SHOW_ALL_QUALITIES: true
};
const getTweetId = () => {
const match = window.location.href.match(/status\/(\d+)/);
return match ? match[1] : null;
};
const getAuthor = () => {
const match = window.location.href.match(/x\.com\/(\w+)/);
return match ? match[1] : 'unknown';
};
// Find video URLs in page data
const findVideoUrls = () => {
const videos = [];
const pageContent = document.documentElement.innerHTML;
const patterns = [
/https:\/\/video\.twimg\.com\/[^"'\s]+\.mp4[^"'\s]*/g,
/https:\/\/video\.twimg\.com\/[^"'\s]+\.m3u8[^"'\s]*/g,
/https:\/\/[^"'\s]*\/amplify_video[^"'\s]*\.mp4[^"'\s]*/g,
/https:\/\/[^"'\s]*\/ext_tw_video[^"'\s]*\.mp4[^"'\s]*/g,
];
patterns.forEach(pattern => {
const matches = pageContent.match(pattern) || [];
matches.forEach(url => {
let cleanUrl = url.replace(/\\u002F/g, '/').replace(/\\/g, '');
cleanUrl = cleanUrl.split('"')[0].split("'")[0].split(' ')[0];
if (cleanUrl.includes('.mp4')) {
const qualityMatch = cleanUrl.match(/\/(\d+x\d+)\//);
const quality = qualityMatch ? qualityMatch[1] : 'unknown';
videos.push({ url: cleanUrl, quality, type: 'mp4' });
}
});
});
// Deduplicate
const unique = [];
const seen = new Set();
videos.forEach(v => {
const key = v.url.split('?')[0];
if (!seen.has(key)) {
seen.add(key);
unique.push(v);
}
});
return unique;
};
// Sort by quality (highest first)
const sortByQuality = (videos) => {
return videos.sort((a, b) => {
const getRes = (q) => {
const match = q.quality?.match(/(\d+)x(\d+)/);
return match ? parseInt(match[1]) * parseInt(match[2]) : 0;
};
return getRes(b) - getRes(a);
});
};
// Download video
const downloadVideo = async (url, filename) => {
console.log('โฌ๏ธ Downloading...');
try {
const response = await fetch(url, { mode: 'cors' });
if (response.ok) {
const blob = await response.blob();
const blobUrl = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = blobUrl;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(blobUrl);
console.log('โ
Downloaded!');
return true;
}
} catch (e) {
// Fallback: open in new tab
console.log('๐ Opening in new tab (right-click to save)...');
window.open(url, '_blank');
return true;
}
return false;
};
// Main
const run = async () => {
const tweetId = getTweetId();
const author = getAuthor();
if (!tweetId) {
console.error('โ Please navigate to a tweet with a video!');
return;
}
console.log(`๐ฌ XActions Video Downloader`);
console.log(`๐ Tweet: ${tweetId} by @${author}\n`);
const videos = sortByQuality(findVideoUrls());
if (videos.length === 0) {
console.error('โ No video found on this page.');
console.log('๐ก Make sure the video has loaded (play it first).');
return;
}
console.log(`โ
Found ${videos.length} video(s):\n`);
videos.forEach((v, i) => {
console.log(`${i + 1}. Quality: ${v.quality}`);
console.log(` URL: ${v.url.slice(0, 80)}...`);
console.log('');
});
// Auto-download best quality
if (CONFIG.AUTO_DOWNLOAD && videos.length > 0) {
const best = videos[0];
const filename = `${author}-${tweetId}-${best.quality}.mp4`;
console.log(`\nโฌ๏ธ Downloading best quality (${best.quality})...`);
await downloadVideo(best.url, filename);
}
// Store for manual access
window.videoUrls = videos;
console.log('\n๐ All URLs available at: window.videoUrls');
};
run();
})();
Make sure to play the video first so it loads in the browser. The script finds video URLs from the page content.
๐งต 6. Thread Unroller
๐ What This Does
Save any X thread as clean text or markdown. Perfect for saving valuable threads for later reading or sharing.
// Thread Unroller - XActions
// by nichxbt - https://github.com/nirholas/XActions
(() => {
const CONFIG = {
FORMAT: 'markdown', // 'text', 'markdown', 'json'
INCLUDE_MEDIA: true,
MAX_TWEETS: 50,
SCROLL_DELAY: 1000,
};
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
const getAuthor = () => {
const match = window.location.pathname.match(/\/(\w+)\/status/);
return match ? match[1] : null;
};
const extractTweet = (article) => {
try {
const text = article.querySelector('[data-testid="tweetText"]')?.textContent || '';
const time = article.querySelector('time')?.getAttribute('datetime') || '';
const timeLink = article.querySelector('time')?.closest('a');
const url = timeLink?.href || '';
const images = Array.from(article.querySelectorAll('img[src*="media"]'))
.map(img => img.src)
.filter(src => !src.includes('profile'));
const userName = article.querySelector('[data-testid="User-Name"]')?.textContent || '';
const handle = userName.match(/@(\w+)/)?.[1] || '';
return { text, time, url, images, handle };
} catch (e) {
return null;
}
};
const run = async () => {
const author = getAuthor();
if (!author) {
console.error('โ Please navigate to a tweet first!');
return;
}
console.log(`๐งต Unrolling thread by @${author}...`);
const tweets = [];
let lastCount = 0;
let attempts = 0;
while (attempts < 10) {
const articles = document.querySelectorAll('article[data-testid="tweet"]');
articles.forEach(article => {
const tweet = extractTweet(article);
if (tweet && tweet.handle.toLowerCase() === author.toLowerCase()) {
const exists = tweets.find(t => t.url === tweet.url);
if (!exists && tweet.text) {
tweets.push(tweet);
console.log(`๐ Tweet ${tweets.length}: "${tweet.text.slice(0, 50)}..."`);
}
}
});
if (tweets.length === lastCount) attempts++;
else attempts = 0;
lastCount = tweets.length;
if (tweets.length >= CONFIG.MAX_TWEETS) break;
window.scrollBy(0, 500);
await sleep(CONFIG.SCROLL_DELAY);
}
// Sort by time (oldest first)
tweets.sort((a, b) => new Date(a.time) - new Date(b.time));
// Format output
let output = '';
if (CONFIG.FORMAT === 'markdown') {
output = `# Thread by @${author}\n\n`;
output += `> ${tweets.length} tweets | ${new Date(tweets[0]?.time).toLocaleDateString()}\n\n`;
output += `---\n\n`;
tweets.forEach((t, i) => {
output += `**${i + 1}/${tweets.length}**\n\n`;
output += `${t.text}\n\n`;
if (CONFIG.INCLUDE_MEDIA) {
t.images.forEach(img => {
output += `\n\n`;
});
}
output += `---\n\n`;
});
output += `\n[Original Thread](${tweets[0]?.url})\n`;
} else if (CONFIG.FORMAT === 'json') {
output = JSON.stringify({ author, tweets }, null, 2);
} else {
output = `Thread by @${author}\n`;
output += `${'='.repeat(40)}\n\n`;
tweets.forEach((t, i) => {
output += `[${i + 1}/${tweets.length}]\n`;
output += `${t.text}\n\n`;
});
}
console.log('\n' + '='.repeat(60));
console.log(`๐งต THREAD UNROLLED: ${tweets.length} tweets`);
console.log('='.repeat(60) + '\n');
console.log(output);
// Copy to clipboard
try {
await navigator.clipboard.writeText(output);
console.log('๐ Thread copied to clipboard!');
} catch (e) {
console.log('๐ Copy from window.unrolledThread');
}
window.unrolledThread = output;
window.threadData = { author, tweets };
// Download
const ext = CONFIG.FORMAT === 'json' ? 'json' : 'md';
const blob = new Blob([output], { type: 'text/plain' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `thread-${author}-${Date.now()}.${ext}`;
a.click();
console.log('๐ฅ Thread downloaded!');
};
run();
})();
๐ฆ Node.js / CLI Method
For more powerful scraping with Puppeteer:
# Install XActions
npm install xactions
# Scrape followers (requires auth cookie)
npx xactions scrape followers @username --limit 1000 --output followers.json
# Scrape following
npx xactions scrape following @username --limit 500
# Scrape tweets from a user
npx xactions scrape tweets @username --limit 100
# Search tweets
npx xactions search "web3 developer" --limit 50