my code is
def summarization():
"""Optimized summarization using RunnableSequence in LangChain."""
logging.info("Received request for summarization")
# Check if request contains JSON data
if not request.is_json:
logging.error("Request does not contain JSON data")
return jsonify({"error": "Request must be JSON"}), 400
data = request.json
logging.debug(f"Request summarization Data: {data}")
# Retrieve input data
text = data.get("text", "").strip()
summarization_type = data.get("summarization_type", "abstractive").strip()
max_length = int(data.get("max_length", 100)) # Default max length = 100 words
logging.info(f"Processing text: {text[:50]}... | Max Length: {max_length}")
if not text:
logging.error("Text input is missing")
return jsonify({"error": "Text input is required"}), 400
try:
# Define prompt templates
prompts = {
"Summarization": PromptTemplate(
input_variables=["text", "max_length", "summarization_type"],
template="Summarize the following text into bullet points "
"in at most {max_length} words based on {summarization_type}: {text}"
),
"Insight Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text by highlighting the key insights: {text}"
),
"Emotion Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text and detect the overall emotional tone (positive, negative, neutral): {text}"
),
"Bullet Point Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text into clear bullet points: {text}"
),
"Headline Summary": PromptTemplate(
input_variables=["text"],
template="Generate 5 engaging headlines based on the following text: {text}"
),
"Fact-Check Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text and perform fact-checking on the claims made: {text}"
),
"Keywords": PromptTemplate(
input_variables=["text"],
template="Extract the 5 most important keywords from the following text: {text}"
)
}
# Validate LLM instance
if llm is None:
logging.error("LLM instance is not initialized")
return jsonify({"error": "LLM is not available"}), 500
# Replace LLMChain with RunnableSequence (Using Pipe `|` Operator)
chains = {key: prompts[key] | llm for key in prompts}
# Execute all chains in parallel
parallel_chain = RunnableParallel(**chains)
logging.info("Executing parallel summarization chains")
results = parallel_chain.invoke({"text": text, "max_length": max_length})
# **Convert AIMessage to strings for JSON serialization & Append to List**
result = []
for key, value in results.items():
# Extract AIMessage content or convert to string
content = value.content if hasattr(value, "content") else str(value)
result.append({key: content})
logging.info("Summarization completed successfully")
return jsonify({"results": result})
except Exception as e:
logging.error(f"Error in summarization: {str(e)}", exc_info=True)
return jsonify({"error": str(e)}), 500
Solution:
Fix 1: Always Pass summarization_type When Required
Modify the invoke() call to always include summarization_type:
results = parallel_chain.invoke({
"text": text,
"max_length": max_length,
"summarization_type": summarization_type # 🔹 Ensure this is passed
})
Corrected code is
def summarization():
"""Optimized summarization using RunnableSequence in LangChain."""
logging.info("Received request for summarization")
# Check if request contains JSON data
if not request.is_json:
logging.error("Request does not contain JSON data")
return jsonify({"error": "Request must be JSON"}), 400
data = request.json
logging.debug(f"Request summarization Data: {data}")
# Retrieve input data
text = data.get("text", "").strip()
summarization_type = data.get("summarization_type", "abstractive").strip()
max_length = int(data.get("max_length", 100)) # Default max length = 100 words
logging.info(f"Processing text: {text[:50]}... | Max Length: {max_length}")
if not text:
logging.error("Text input is missing")
return jsonify({"error": "Text input is required"}), 400
try:
# Define prompt templates
prompts = {
"Summarization": PromptTemplate(
input_variables=["text", "max_length", "summarization_type"],
template="Summarize the following text into bullet points "
"in at most {max_length} words based on {summarization_type}: {text}"
),
"Insight Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text by highlighting the key insights: {text}"
),
"Emotion Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text and detect the overall emotional tone (positive, negative, neutral): {text}"
),
"Bullet Point Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text into clear bullet points: {text}"
),
"Headline Summary": PromptTemplate(
input_variables=["text"],
template="Generate 5 engaging headlines based on the following text: {text}"
),
"Fact-Check Summary": PromptTemplate(
input_variables=["text"],
template="Summarize the following text and perform fact-checking on the claims made: {text}"
),
"Keywords": PromptTemplate(
input_variables=["text"],
template="Extract the 5 most important keywords from the following text: {text}"
)
}
# Validate LLM instance
if llm is None:
logging.error("LLM instance is not initialized")
return jsonify({"error": "LLM is not available"}), 500
# Replace LLMChain with RunnableSequence (Using Pipe `|` Operator)
chains = {key: prompts[key] | llm for key in prompts}
# Execute all chains in parallel
parallel_chain = RunnableParallel(**chains)
logging.info("Executing parallel summarization chains")
results = parallel_chain.invoke({"text": text, "max_length": max_length,"summarization_type": summarization_type})
# **Convert AIMessage to strings for JSON serialization & Append to List**
result = []
for key, value in results.items():
# Extract AIMessage content or convert to string
content = value.content if hasattr(value, "content") else str(value)
result.append({key: content})
logging.info("Summarization completed successfully")
return jsonify({"results": result})
except Exception as e:
logging.error(f"Error in summarization: {str(e)}", exc_info=True)
return jsonify({"error": str(e)}), 500
Top comments (0)