diff --git a/scripts/gettweets-copy.mjs b/scripts/gettweets-copy.mjs deleted file mode 100644 index 3c6dd9a8e1..0000000000 --- a/scripts/gettweets-copy.mjs +++ /dev/null @@ -1,76 +0,0 @@ -import { Scraper } from "agent-twitter-client"; -import dotenv from "dotenv"; -import fs from "fs"; - -dotenv.config(); - -const TWEETS_FILE = "tweets.json"; - -(async () => { - try { - // Create a new instance of the Scraper - const scraper = new Scraper(); - - // Log in to Twitter using the configured environment variables - await scraper.login( - process.env.TWITTER_USERNAME, - process.env.TWITTER_PASSWORD - ); - - // Check if login was successful - if (await scraper.isLoggedIn()) { - console.log("Logged in successfully!"); - - // Fetch all tweets for the user "@realdonaldtrump" - const tweets = scraper.getTweets("pmarca", 2000); - - // Initialize an empty array to store the fetched tweets - let fetchedTweets = []; - - // Load existing tweets from the JSON file if it exists - if (fs.existsSync(TWEETS_FILE)) { - const fileContent = fs.readFileSync(TWEETS_FILE, "utf-8"); - fetchedTweets = JSON.parse(fileContent); - } - - // skip first 200 - - let count = 0; - - // Fetch and process tweets - for await (const tweet of tweets) { - if (count < 1000) { - count++; - continue; - } - - console.log("--------------------"); - console.log("Tweet ID:", tweet.id); - console.log("Text:", tweet.text); - console.log("Created At:", tweet.createdAt); - console.log("Retweets:", tweet.retweetCount); - console.log("Likes:", tweet.likeCount); - console.log("--------------------"); - - // Add the new tweet to the fetched tweets array - fetchedTweets.push(tweet); - - // Save the updated fetched tweets to the JSON file - fs.writeFileSync( - TWEETS_FILE, - JSON.stringify(fetchedTweets, null, 2) - ); - } - - console.log("All tweets fetched and saved to", TWEETS_FILE); - - // Log out from Twitter - await scraper.logout(); - console.log("Logged out successfully!"); - } else { - console.log("Login failed. Please check your credentials."); - } - } catch (error) { - console.error("An error occurred:", error); - } -})(); diff --git a/scripts/gettweets.mjs b/scripts/gettweets.mjs index 5a8cc3bcd7..3c6dd9a8e1 100644 --- a/scripts/gettweets.mjs +++ b/scripts/gettweets.mjs @@ -1,37 +1,28 @@ import { Scraper } from "agent-twitter-client"; +import dotenv from "dotenv"; import fs from "fs"; -import path from "path"; -import { fileURLToPath } from 'url'; -// Get the directory name properly in ES modules -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); +dotenv.config(); -const TWEETS_FILE = path.join(__dirname, "tweets.json"); - -// Direct credentials -const credentials = { - username: "evepredict", - password: "Roving4-Avoid0-Revival6-Snide3", - email: "ilessio.aimaster@gmail.com" -}; +const TWEETS_FILE = "tweets.json"; (async () => { try { - console.log(`Tweets will be saved to: ${TWEETS_FILE}`); - // Create a new instance of the Scraper const scraper = new Scraper(); - // Log in to Twitter using the configured credentials - await scraper.login(credentials.username, credentials.password); + // Log in to Twitter using the configured environment variables + await scraper.login( + process.env.TWITTER_USERNAME, + process.env.TWITTER_PASSWORD + ); // Check if login was successful if (await scraper.isLoggedIn()) { console.log("Logged in successfully!"); - // Fetch all tweets for the user "@aixbt_agent" - const tweets = scraper.getTweets("aixbt_agent", 2000); + // Fetch all tweets for the user "@realdonaldtrump" + const tweets = scraper.getTweets("pmarca", 2000); // Initialize an empty array to store the fetched tweets let fetchedTweets = []; @@ -40,10 +31,10 @@ const credentials = { if (fs.existsSync(TWEETS_FILE)) { const fileContent = fs.readFileSync(TWEETS_FILE, "utf-8"); fetchedTweets = JSON.parse(fileContent); - console.log(`Loaded ${fetchedTweets.length} existing tweets`); } // skip first 200 + let count = 0; // Fetch and process tweets @@ -64,18 +55,11 @@ const credentials = { // Add the new tweet to the fetched tweets array fetchedTweets.push(tweet); - try { - // Save the updated fetched tweets to the JSON file - fs.writeFileSync( - TWEETS_FILE, - JSON.stringify(fetchedTweets, null, 2) - ); - if (count % 50 === 0) { - console.log(`Saved ${fetchedTweets.length} tweets to ${TWEETS_FILE}`); - } - } catch (err) { - console.error("Error saving file:", err); - } + // Save the updated fetched tweets to the JSON file + fs.writeFileSync( + TWEETS_FILE, + JSON.stringify(fetchedTweets, null, 2) + ); } console.log("All tweets fetched and saved to", TWEETS_FILE); diff --git a/scripts/tweetextractor.mjs b/scripts/tweetextractor.mjs deleted file mode 100644 index 753da4c8bf..0000000000 --- a/scripts/tweetextractor.mjs +++ /dev/null @@ -1,131 +0,0 @@ -import { Scraper } from "agent-twitter-client"; -import fs from "fs"; -import path from "path"; -import { fileURLToPath } from 'url'; - -// Get the directory name properly in ES modules -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); - -const OUTPUT_FILE = path.join(__dirname, "tweet_scraped.json"); -const TARGET_USERNAME = "aixbt_agent"; -const MAX_TWEETS = 3000; - -// Direct credentials -const credentials = { - username: "evepredict", - password: "Roving4-Avoid0-Revival6-Snide3", - email: "ilessio.aimaster@gmail.com" -}; - -async function sleep(ms) { - return new Promise(resolve => setTimeout(resolve, ms)); -} - -async function scrapeTweets() { - try { - console.log(`Starting tweet extraction for @${TARGET_USERNAME}`); - console.log(`Tweets will be saved to: ${OUTPUT_FILE}`); - - // Create a new instance of the Scraper - const scraper = new Scraper(); - - // Login to Twitter - console.log("Attempting to login..."); - await scraper.login(credentials.username, credentials.password); - - if (!(await scraper.isLoggedIn())) { - throw new Error("Login failed. Please check your credentials."); - } - console.log("Successfully logged in to Twitter"); - - // Initialize tweet storage - let allTweets = []; - if (fs.existsSync(OUTPUT_FILE)) { - const existingContent = fs.readFileSync(OUTPUT_FILE, "utf-8"); - allTweets = JSON.parse(existingContent); - console.log(`Loaded ${allTweets.length} existing tweets`); - } - - // Get tweets iterator - const tweets = scraper.getTweets(TARGET_USERNAME, MAX_TWEETS); - let count = 0; - - // Fetch and process tweets - for await (const tweet of tweets) { - count++; - - // Process tweet - const processedTweet = { - id: tweet.id, - text: tweet.text, - createdAt: tweet.createdAt, - metrics: { - retweets: tweet.retweetCount, - likes: tweet.likeCount, - replies: tweet.replyCount, - quotes: tweet.quoteCount - }, - isRetweet: tweet.isRetweet, - isReply: tweet.isReply, - hasMedia: tweet.hasMedia - }; - - // Skip retweets and replies for cleaner content - if (!processedTweet.isRetweet && !processedTweet.isReply) { - allTweets.push(processedTweet); - - // Log progress - console.log(`\n--- Tweet ${count} ---`); - console.log(`Text: ${processedTweet.text.substring(0, 100)}...`); - console.log(`Engagement: ${processedTweet.metrics.likes} likes, ${processedTweet.metrics.retweets} RTs`); - - // Save periodically to avoid losing progress - if (count % 50 === 0) { - try { - fs.writeFileSync(OUTPUT_FILE, JSON.stringify(allTweets, null, 2)); - console.log(`\nSaved ${allTweets.length} tweets to ${OUTPUT_FILE}`); - } catch (err) { - console.error("Error saving file:", err); - } - - // Add a small delay to avoid rate limiting - await sleep(1000); - } - } - - if (count >= MAX_TWEETS) { - break; - } - } - - // Final save - try { - fs.writeFileSync(OUTPUT_FILE, JSON.stringify(allTweets, null, 2)); - console.log(`\nCompleted! Total tweets saved: ${allTweets.length}`); - } catch (err) { - console.error("Error saving final file:", err); - } - - // Create a cleaned version with just tweet texts - const cleanedTweets = allTweets.map(tweet => tweet.text); - const cleanFile = path.join(__dirname, 'tweet_scraped_clean.json'); - try { - fs.writeFileSync(cleanFile, JSON.stringify(cleanedTweets, null, 2)); - console.log("Created cleaned version in tweet_scraped_clean.json"); - } catch (err) { - console.error("Error saving cleaned file:", err); - } - - // Logout - await scraper.logout(); - console.log("Successfully logged out from Twitter"); - - } catch (error) { - console.error("An error occurred:", error); - process.exit(1); - } -} - -// Run the scraper -scrapeTweets(); \ No newline at end of file