Prompt Engineering
Prompt engineering is the art and science of crafting inputs that elicit desired behaviors from language models. Effective prompts can dramatically improve model performance without any parameter changes.
Basic Prompt Structure
Well-structured prompts provide clear context, instructions, and output format specifications.
class PromptTemplate:
def __init__(self, template):
self.template = template
def format(self, **kwargs):
return self.template.format(**kwargs)
class StructuredPrompt:
def __init__(self):
self.sections = []
def add_system(self, content):
self.sections.append(("system", content))
return self
def add_context(self, content):
self.sections.append(("context", content))
return self
def add_instruction(self, content):
self.sections.append(("instruction", content))
return self
def add_examples(self, examples):
self.sections.append(("examples", examples))
return self
def add_output_format(self, format_spec):
self.sections.append(("format", format_spec))
return self
def build(self):
prompt = ""
for section_type, content in self.sections:
if section_type == "system":
prompt += f"System: {content}\n\n"
elif section_type == "context":
prompt += f"Context:\n{content}\n\n"
elif section_type == "instruction":
prompt += f"Task: {content}\n\n"
elif section_type == "examples":
prompt += "Examples:\n"
for inp, out in content:
prompt += f"Input: {inp}\nOutput: {out}\n\n"
elif section_type == "format":
prompt += f"Output format: {content}\n\n"
return prompt
# Example usage
prompt = (
StructuredPrompt()
.add_system("You are a helpful assistant that extracts structured data.")
.add_instruction("Extract the person's name, age, and occupation from the text.")
.add_examples([
("John Smith is a 35-year-old engineer.", '{"name": "John Smith", "age": 35, "occupation": "engineer"}'),
("Dr. Jane Doe, 42, works as a surgeon.", '{"name": "Jane Doe", "age": 42, "occupation": "surgeon"}')
])
.add_output_format("JSON object with keys: name, age, occupation")
.build()
)Few-Shot Prompting
Providing examples helps models understand the desired task format and style.
class FewShotPrompt:
def __init__(self, task_description):
self.task_description = task_description
self.examples = []
def add_example(self, input_text, output_text):
self.examples.append((input_text, output_text))
return self
def build_prompt(self, query):
prompt = f"{self.task_description}\n\n"
for i, (inp, out) in enumerate(self.examples, 1):
prompt += f"Example {i}:\n"
prompt += f"Input: {inp}\n"
prompt += f"Output: {out}\n\n"
prompt += f"Now complete this:\nInput: {query}\nOutput:"
return prompt
class DynamicFewShot:
def __init__(self, example_bank, embedder, k=3):
self.example_bank = example_bank
self.embedder = embedder
self.k = k
def select_examples(self, query):
query_embedding = self.embedder.encode(query)
similarities = []
for example in self.example_bank:
example_embedding = self.embedder.encode(example["input"])
sim = self.cosine_similarity(query_embedding, example_embedding)
similarities.append((sim, example))
similarities.sort(reverse=True)
return [ex for _, ex in similarities[:self.k]]
def cosine_similarity(self, a, b):
return sum(x * y for x, y in zip(a, b)) / (
sum(x**2 for x in a)**0.5 * sum(y**2 for y in b)**0.5
)
def build_prompt(self, query):
selected = self.select_examples(query)
prompt = "Complete the task based on these examples:\n\n"
for ex in selected:
prompt += f"Input: {ex['input']}\nOutput: {ex['output']}\n\n"
prompt += f"Input: {query}\nOutput:"
return promptChain-of-Thought Prompting
Encouraging step-by-step reasoning improves performance on complex tasks.
class ChainOfThoughtPrompt:
COT_TRIGGER = "Let's think step by step."
def __init__(self, task_type="reasoning"):
self.task_type = task_type
def build_zero_shot_cot(self, question):
return f"{question}\n\n{self.COT_TRIGGER}"
def build_few_shot_cot(self, question, examples):
prompt = "Solve each problem by thinking step by step.\n\n"
for q, reasoning, answer in examples:
prompt += f"Question: {q}\n"
prompt += f"Reasoning: {reasoning}\n"
prompt += f"Answer: {answer}\n\n"
prompt += f"Question: {question}\nReasoning:"
return prompt
class SelfConsistency:
def __init__(self, model, num_samples=5, temperature=0.7):
self.model = model
self.num_samples = num_samples
self.temperature = temperature
def solve(self, prompt):
answers = []
for _ in range(self.num_samples):
response = self.model.generate(
prompt,
temperature=self.temperature
)
answer = self.extract_answer(response)
answers.append(answer)
return self.majority_vote(answers)
def extract_answer(self, response):
lines = response.strip().split("\n")
for line in reversed(lines):
if line.startswith("Answer:"):
return line.replace("Answer:", "").strip()
return lines[-1]
def majority_vote(self, answers):
from collections import Counter
counts = Counter(answers)
return counts.most_common(1)[0][0]Structured Output Prompting
Guiding models to produce specific output formats improves reliability.
import json
class JSONPrompt:
def __init__(self, schema):
self.schema = schema
def build_prompt(self, task, input_text):
schema_str = json.dumps(self.schema, indent=2)
return f"""{task}
Input: {input_text}
Respond with a JSON object matching this schema:
{schema_str}
JSON response:"""
def parse_response(self, response):
try:
start = response.find("{")
end = response.rfind("}") + 1
json_str = response[start:end]
return json.loads(json_str)
except (json.JSONDecodeError, ValueError):
return None
class XMLPrompt:
def __init__(self, tags):
self.tags = tags
def build_prompt(self, task, input_text):
tag_list = ", ".join(self.tags)
return f"""{task}
Input: {input_text}
Respond using these XML tags: {tag_list}
Wrap each piece of information in the appropriate tag.
Response:"""
def parse_response(self, response):
import re
result = {}
for tag in self.tags:
pattern = f"<{tag}>(.*?)</{tag}>"
match = re.search(pattern, response, re.DOTALL)
if match:
result[tag] = match.group(1).strip()
return resultRole and Persona Prompting
Assigning roles shapes model behavior and expertise level.
class PersonaPrompt:
PERSONAS = {
"expert": "You are a world-renowned expert with decades of experience.",
"teacher": "You are a patient teacher who explains concepts clearly.",
"critic": "You are a thorough critic who identifies flaws and improvements.",
"assistant": "You are a helpful assistant focused on practical solutions."
}
def __init__(self, persona, domain=None):
self.persona = persona
self.domain = domain
def build_system_prompt(self):
base = self.PERSONAS.get(self.persona, self.persona)
if self.domain:
base += f" Your expertise is in {self.domain}."
return base
def build_full_prompt(self, task):
system = self.build_system_prompt()
return f"System: {system}\n\nUser: {task}\n\nAssistant:"
class MultiPersonaDebate:
def __init__(self, personas, rounds=3):
self.personas = personas
self.rounds = rounds
def build_debate_prompt(self, topic, previous_arguments=None):
prompt = f"Topic: {topic}\n\n"
if previous_arguments:
prompt += "Previous arguments:\n"
for persona, argument in previous_arguments:
prompt += f"{persona}: {argument}\n\n"
prompt += "Provide your perspective, addressing previous points if any:"
return promptKey Takeaways
Prompt engineering is essential for effective LLM use. Structured prompts with clear sections improve reliability. Few-shot examples guide model behavior without training. Chain-of-thought prompting enhances reasoning tasks. Output format specifications ensure parseable responses. Role prompting shapes expertise and communication style.