Real-Time Web Intelligence Layer for Generative AI
TLD.lat is a lightweight real-time web scanning layer that enhances GPT and HuggingFace transformer-based chatbots with near real-time event awareness.
It fetches live web data, summarizes it using pretrained models, and injects contextual intelligence into chatbot prompts.
"""
TLD.lat Real-Time AI Web Scanner Layer
Adds near real-time event awareness to GPT / HuggingFace chatbots
"""
import requests
from transformers import pipeline
from datetime import datetime
# ---------------------------
# CONFIG
# ---------------------------
SEARCH_API = "https://api.duckduckgo.com/"
USER_AGENT = {"User-Agent": "TLD-Lat-Scanner/1.0"}
# Choose summarization model
summarizer = pipeline(
"summarization",
model="facebook/bart-large-cnn"
)
# ---------------------------
# WEB SEARCH FUNCTION
# ---------------------------
def search_web(query):
params = {
"q": query,
"format": "json"
}
response = requests.get(SEARCH_API, params=params, headers=USER_AGENT)
data = response.json()
results = []
if "RelatedTopics" in data:
for item in data["RelatedTopics"][:5]:
if "Text" in item:
results.append(item["Text"])
return results
# ---------------------------
# SUMMARIZE LIVE DATA
# ---------------------------
def summarize_results(results):
combined = " ".join(results)[:3000]
if not combined:
return "No recent information found."
summary = summarizer(
combined,
max_length=180,
min_length=60,
do_sample=False
)
return summary[0]["summary_text"]
# ---------------------------
# AUGMENT GPT PROMPT
# ---------------------------
def augment_prompt(user_query):
print("Fetching live data...")
live_results = search_web(user_query)
print("Summarizing...")
live_summary = summarize_results(live_results)
augmented_prompt = f"""
You are a real-time AI assistant.
Current date: {datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')}
Live Web Context:
{live_summary}
User Question:
{user_query}
Answer using the live context above when relevant.
"""
return augmented_prompt
# ---------------------------
# EXAMPLE GPT CALL
# ---------------------------
def send_to_gpt(augmented_prompt, openai_api_key):
import openai
openai.api_key = openai_api_key
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful AI."},
{"role": "user", "content": augmented_prompt}
]
)
return response["choices"][0]["message"]["content"]
# ---------------------------
# MAIN EXECUTION
# ---------------------------
if __name__ == "__main__":
query = input("Ask something about recent events: ")
prompt = augment_prompt(query)
print("\n--- Augmented Prompt ---\n")
print(prompt)
# To enable GPT response:
# api_key = "YOUR_OPENAI_KEY"
# answer = send_to_gpt(prompt, api_key)
# print("\n--- GPT Response ---\n")
# print(answer)