news-intelligence-agent
4
总安装量
4
周安装量
#53065
全站排名
安装命令
npx skills add https://github.com/psh355q-ui/szdi57465yt --skill news-intelligence-agent
Agent 安装分布
claude-code
3
windsurf
2
trae
2
opencode
2
codex
2
antigravity
2
Skill 文档
News Intelligence Agent – ë´ì¤ ì¸í 리ì ì¤
Role
/news íì´ì§ìì ë°°ì¹ë¡ ì¬ë¬ ë´ì¤ë¥¼ ëì ë¶ìíì¬ ìì¥ ì ì²´ íë¦ì íì
í©ëë¤.
Core Capabilities
1. Batch News Processing
async def analyze_batch(
news_articles: List[NewsArticle],
batch_size: int = 50
) -> Dict:
"""Process multiple articles in parallel"""
results = []
# Process in batches to avoid API rate limits
for i in range(0, len(news_articles), batch_size):
batch = news_articles[i:i+batch_size]
# Parallel processing
batch_results = await asyncio.gather(*[
analyze_single_article(article)
for article in batch
])
results.extend(batch_results)
return aggregate_batch_results(results)
2. Sentiment Analysis
Sentiment Scoring
def calculate_sentiment(text: str) -> float:
"""Calculate sentiment score -1 to +1"""
# Positive keywords
positive = ["surge", "beat", "record", "growth", "bullish", "upgrade"]
# Negative keywords
negative = ["plunge", "miss", "loss", "decline", "bearish", "downgrade"]
# Count occurrences
pos_count = sum(text.lower().count(word) for word in positive)
neg_count = sum(text.lower().count(word) for word in negative)
# Normalize
total = pos_count + neg_count
if total == 0:
return 0.0
sentiment = (pos_count - neg_count) /total
# Clamp to [-1, 1]
return max(-1.0, min(1.0, sentiment))
Sentiment Categories
SENTIMENT_LEVELS = {
"VERY_POSITIVE": (0.6, 1.0),
"POSITIVE": (0.3, 0.6),
"NEUTRAL": (-0.3, 0.3),
"NEGATIVE": (-0.6, -0.3),
"VERY_NEGATIVE": (-1.0, -0.6)
}
3. Keyword Extraction
from sklearn.feature_extraction.text import TfidfVectorizer
def extract_keywords(texts: List[str], top_n: int = 10) -> List[str]:
"""Extract important keywords using TF-IDF"""
vectorizer = TfidfVectorizer(
max_features=top_n,
stop_words='english',
ngram_range=(1, 2) # Unigrams and bigrams
)
tfidf_matrix = vectorizer.fit_transform(texts)
feature_names = vectorizer.get_feature_names_out()
# Get top keywords
scores = tfidf_matrix.sum(axis=0).A1
top_indices = scores.argsort()[-top_n:][::-1]
keywords = [feature_names[i] for i in top_indices]
return keywords
4. Theme Detection
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
def detect_themes(articles: List[str], n_themes: int = 5) -> List[Dict]:
"""Cluster articles into themes"""
# Vectorize
vectorizer = TfidfVectorizer(max_features=100, stop_words='english')
tfidf_matrix = vectorizer.fit_transform(articles)
# Cluster
kmeans = KMeans(n_clusters=n_themes, random_state=42)
kmeans.fit(tfidf_matrix)
# Extract theme keywords
themes = []
feature_names = vectorizer.get_feature_names_out()
for i, cluster_center in enumerate(kmeans.cluster_centers_):
top_indices = cluster_center.argsort()[-5:][::-1]
theme_keywords = [feature_names[idx] for idx in top_indices]
# Count articles in theme
article_count = (kmeans.labels_ == i).sum()
themes.append({
"theme_id": i,
"keywords": theme_keywords,
"article_count": article_count,
"theme_name": generate_theme_name(theme_keywords)
})
return sorted(themes, key=lambda x: x['article_count'], reverse=True)
5. Ticker Buzz Score
def calculate_ticker_buzz(
ticker: str,
news_articles: List[NewsArticle],
timeframe_hours: int = 24
) -> Dict:
"""Calculate how much a ticker is being discussed"""
# Filter articles mentioning ticker
ticker_articles = [
a for a in news_articles
if ticker in (a.ticker or '') or ticker in (a.content or '').upper()
]
# Recency weight (more recent = higher weight)
now = datetime.now()
weighted_mentions = 0
for article in ticker_articles:
hours_ago = (now - article.created_at).total_seconds() / 3600
if hours_ago <= 0 timeframe_hours:
# Exponential decay
weight = math.exp(-hours_ago / (timeframe_hours / 2))
weighted_mentions += weight
# Normalize to 0-100 scale
buzz_score = min(100, weighted_mentions * 10)
# Sentiment breakdown
sentiments = [a.sentiment_score for a in ticker_articles if a.sentiment_score]
avg_sentiment = sum(sentiments) / len(sentiments) if sentiments else 0
return {
"ticker": ticker,
"buzz_score": buzz_score,
"mention_count": len(ticker_articles),
"avg_sentiment": avg_sentiment,
"timeframe_hours": timeframe_hours,
"trending": "UP" if buzz_score > 50 else "NORMAL"
}
Decision Framework
Step 1: Fetch News Articles
articles = db.query(NewsArticle).filter(
NewsArticle.created_at >= datetime.now() - timedelta(hours=24)
).all()
Step 2: Batch Sentiment Analysis
FOR each article in articles:
sentiment = calculate_sentiment(article.content)
article.sentiment_score = sentiment
article.sentiment_label = categorize_sentiment(sentiment)
Step 3: Extract Keywords
all_text = [a.content for a in articles]
keywords = extract_keywords(all_text, top_n=20)
Step 4: Detect Themes
themes = detect_themes([a.headline + ' ' + a.content for a in articles])
Step 5: Calculate Ticker Buzz
unique_tickers = set(a.ticker for a in articles if a.ticker)
buzz_scores = {}
FOR ticker in unique_tickers:
buzz_scores[ticker] = calculate_ticker_buzz(ticker, articles)
Step 6: Aggregate Results
return {
"total_articles": len(articles),
"sentiment_distribution": count_by_sentiment(articles),
"top_keywords": keywords,
"trending_themes": themes,
"ticker_buzz": buzz_scores,
"timestamp": datetime.now()
}
Output Format
{
"analysis_timestamp": "2025-12-21T13:00:00Z",
"timeframe": "last_24_hours",
"total_articles_analyzed": 237,
"sentiment_distribution": {
"VERY_POSITIVE": 45,
"POSITIVE": 89,
"NEUTRAL": 67,
"NEGATIVE": 28,
"VERY_NEGATIVE": 8
},
"market_sentiment_summary": {
"overall_score": 0.32,
"overall_label": "POSITIVE",
"confidence": 0.85,
"interpretation": "ìì¥ ì ë°ì ì¼ë¡ ê¸ì ì ë´ì¤ ì°ì¸"
},
"top_keywords": [
{
"keyword": "ai growth",
"frequency": 67,
"importance_score": 0.92
},
{
"keyword": "earnings beat",
"frequency": 54,
"importance_score": 0.88
},
{
"keyword": "fed rate",
"frequency": 48,
"importance_score": 0.85
},
{
"keyword": "semiconductor",
"frequency": 42,
"importance_score": 0.80
},
{
"keyword": "tech rally",
"frequency": 38,
"importance_score": 0.75
}
],
"trending_themes": [
{
"theme_id": 0,
"theme_name": "AI ë¶",
"keywords": ["ai", "chip", "nvidia", "demand", "growth"],
"article_count": 78,
"avg_sentiment": 0.68,
"interpretation": "AI ê´ë ¨ ê¸ì ì ë´ì¤ 주ë"
},
{
"theme_id": 1,
"theme_name": "Fed ê¸ë¦¬ ë
¼ì",
"keywords": ["fed", "rate", "inflation", "policy", "powell"],
"article_count": 56,
"avg_sentiment": 0.12,
"interpretation": "ê¸ë¦¬ ê´ë ¨ ì¤ë¦½ì ë
¼ì"
},
{
"theme_id": 2,
"theme_name": "ì¤ì ìì¦",
"keywords": ["earnings", "beat", "guidance", "revenue", "profit"],
"article_count": 43,
"avg_sentiment": 0.45,
"interpretation": "ì¤ì í¸ì¡° ë´ì¤ ë¤ì"
}
],
"ticker_buzz_rankings": [
{
"rank": 1,
"ticker": "NVDA",
"buzz_score": 92,
"mention_count": 45,
"avg_sentiment": 0.75,
"trending": "UP",
"summary": "AI ìì ê¸ì¦ ê´ë ¨ ìëì ì¸ê¸"
},
{
"rank": 2,
"ticker": "AAPL",
"buzz_score": 78,
"mention_count": 38,
"avg_sentiment": 0.58,
"trending": "UP",
"summary": "iPhone í매 í¸ì¡° ë´ì¤"
},
{
"rank": 3,
"ticker": "TSLA",
"buzz_score": 65,
"mention_count": 32,
"avg_sentiment": -0.25,
"trending": "UP",
"summary": "ê°ê²© ì¸í ê´ë ¨ ì°ë ¤ ìì¸ ë
¼ì"
}
],
"sector_sentiment": {
"Technology": {
"article_count": 128,
"avg_sentiment": 0.52,
"label": "POSITIVE",
"top_tickers": ["NVDA", "AAPL", "MSFT"]
},
"Finance": {
"article_count": 45,
"avg_sentiment": 0.18,
"label": "NEUTRAL",
"top_tickers": ["JPM", "BAC", "GS"]
},
"Healthcare": {
"article_count": 34,
"avg_sentiment": 0.35,
"label": "POSITIVE",
"top_tickers": ["JNJ", "PFE", "MRNA"]
}
},
"alerts": [
{
"type": "HIGH_BUZZ",
"ticker": "NVDA",
"message": "NVDA buzz score 92 (ë§¤ì° ëì)",
"severity": "INFO"
},
{
"type": "SENTIMENT_SPIKE",
"theme": "AI ë¶",
"message": "AI ê´ë ¨ ë´ì¤ sentiment +0.68 (ë§¤ì° ê¸ì )",
"severity": "INFO"
}
]
}
Examples
Example 1: Tech Rally Day
Input: 237 articles (last 24h)
Output:
- Overall Sentiment: +0.45 (POSITIVE)
- Top Theme: "AI Growth" (78 articles)
- Top Buzz: NVDA (92), AAPL (78), MSFT (65)
- Keywords: "ai growth", "earnings beat", "chip demand"
Example 2: Market Correction Day
Input: 189 articles
Output:
- Overall Sentiment: -0.38 (NEGATIVE)
- Top Theme: "Fed Rate Hike Fears" (92 articles)
- Top Buzz: SPY (88), VIX (76), TLT (54)
- Keywords: "rate hike", "inflation", "recession fears"
Guidelines
Do’s â
- ë°°ì¹ ì²ë¦¬: í¨ì¨ì± ê·¹ëí
- Ticker Buzz ì¶ì : ìì¥ ì£¼ëª©ë íì
- Theme Detection: ì¨ê²¨ì§ í¨í´ ë°ê²¬
- Sector Breakdown: ì¹í°ë³ sentiment
Don’ts â
- ë¨ì¼ 기ì¬ë§ ë¶ì ê¸ì§ (Quick/Deep Reasoning ìí )
- Theme ë무 ì¸ë¶í ê¸ì§ (5ê° ì´ë´)
- Buzz score ê³¼ì ê¸ì§ (quality over quantity)
- Historical context 무ì ê¸ì§
Integration
Batch Processing Endpoint
@router.post("/api/news/batch-analyze")
async def batch_analyze_news(
timeframe_hours: int = 24,
db: Session = Depends(get_db)
):
"""Batch analyze recent news"""
# Fetch articles
cutoff = datetime.now() - timedelta(hours=timeframe_hours)
articles = db.query(NewsArticle).filter(
NewsArticle.created_at >= cutoff
).all()
# Run News Intelligence Agent
agent = NewsIntelligenceAgent()
result = await agent.execute({
'articles': articles,
'timeframe_hours': timeframe_hours
})
return result
Real-Time Updates (WebSocket)
from fastapi import WebSocket
@router.websocket("/ws/news-intel")
async def news_intel_websocket(websocket: WebSocket):
"""Stream news intelligence updates"""
await websocket.accept()
while True:
# Run analysis every 5 minutes
result = await batch_analyze_news(timeframe_hours=1)
await websocket.send_json(result)
await asyncio.sleep(300) # 5 minutes
Performance Metrics
- Batch Processing Speed: 목í < 10ì´ for 100 articles
- Sentiment Accuracy: > 80%
- Theme Detection Quality: > 75% (ì¬ë íë¨ê³¼ ì¼ì¹)
- Ticker Buzz Precision: > 85%
Comparison
| Agent | Scope | Speed | Use Case |
|---|---|---|---|
| News Intelligence | ë°°ì¹ (100+ articles) | 10ì´ | ìì¥ ì ì²´ íë¦ |
| Quick Analyzer | ë¨ì¼ ticker | 5ì´ | ê°ë³ ì¢ ëª© íì¸ |
| Deep Reasoning | ë¨ì¼ news | 30ì´ | ì¤ìí ë´ì¤ ì¬ì¸µ ë¶ì |
Version History
- v1.0 (2025-12-21): Initial release with batch processing and theme detection