Skip to content

Part V: AI Integration

In this part:


Jac’s AI integration goes beyond simple API calls. With “Meaning Typed Programming,” you write function signatures that describe what you want, and the LLM figures out how to do it. This inverts the traditional programming model: instead of writing algorithms, you declare intent and let AI provide the implementation. The by llm operator makes this seamless.

Prerequisites:

Required Plugin: pip install byllm

Meaning Typed Programming (MTP) is Jac’s core AI paradigm. Your function signature — the name, parameter names, and types — becomes the specification. The LLM reads this “meaning” and generates appropriate behavior. This works because well-named functions already describe their intent; MTP just makes that intent executable.

Meaning Typed Programming treats semantic intent as a first-class type. You declare what you want, and AI provides how:

# The function signature IS the specification
def classify_sentiment(text: str) -> str by llm;
# Usage - the LLM infers behavior from the name and types
with entry {
result = classify_sentiment("I love this product!");
# result = "positive"
}

Implicit — derived from function/parameter names:

def translate_to_spanish(text: str) -> str by llm;

Explicit — using sem for detailed descriptions:

sem classify = """
Analyze the emotional tone of the input text.
Return exactly one of: 'positive', 'negative', 'neutral'.
Consider context and sarcasm.
""";
def classify(text: str) -> str by llm;

When function names alone don’t provide enough context, use sem (semantic) declarations to add detailed descriptions. The LLM reads these descriptions as part of its prompt, giving you precise control over behavior. Think of sem as documentation that actually affects execution.

sem classify_sentiment = """
Analyze the emotional tone of the text.
Return 'positive', 'negative', or 'neutral'.
Consider nuance, sarcasm, and context.
""";
def classify_sentiment(text: str) -> str by llm;
sem analyze_code.code = "The source code to analyze";
sem analyze_code.language = "Programming language (python, javascript, etc.)";
sem analyze_code.return = "A structured analysis with issues and suggestions";
def analyze_code(code: str, language: str) -> dict by llm;
obj CodeAnalysis {
has issues: list[str];
has suggestions: list[str];
has complexity_score: int;
has summary: str;
}
sem analyze.return = """
Return a CodeAnalysis object with:
- issues: List of problems found
- suggestions: Improvement recommendations
- complexity_score: 1-10 complexity rating
- summary: One paragraph overview
""";
def analyze(code: str) -> CodeAnalysis by llm;

# Function delegation
def translate(text: str, target_lang: str) -> str by llm;
def summarize(article: str) -> str by llm;
def extract_entities(text: str) -> list[str] by llm;
# Inline expression
with entry {
response = "Explain quantum computing in simple terms" by llm;
}
with entry {
text = "Some input text";
result = text
|> (lambda t: str -> str: t by llm("Correct grammar"))
|> (lambda t: str -> str: t by llm("Simplify language"))
|> (lambda t: str -> str: t by llm("Translate to Spanish"));
}
def summarize(text: str) -> str by llm(
model_name="gpt-4",
temperature=0.7,
max_tokens=2000
);
def creative_story(prompt: str) -> str by llm(
model_name="claude-3-opus-20240229",
temperature=1.0
);

Configuration Parameters:

ParameterTypeDescription
model_namestrLLM provider/model identifier
temperaturefloatCreativity (0.0-2.0)
max_tokensintMaximum response tokens
streamboolEnable streaming output
toolslistFunctions for tool calling (enables ReAct loop)
contextlist[str]Additional system instructions
# Stub implementations for API calls
def fetch_weather_api(city: str) -> str {
return f"Weather data for {city}";
}
def web_search_api(query: str) -> list[str] {
return [f"Result for {query}"];
}
def get_weather(city: str) -> str {
# Actual implementation
return fetch_weather_api(city);
}
def search_web(query: str) -> list[str] {
# Actual implementation
return web_search_api(query);
}
def answer_question(question: str) -> str by llm(
tools=[get_weather, search_web]
);
# The LLM can now call these tools to answer questions
with entry {
result = answer_question("What's the weather in Paris?");
}
def stream_story(prompt: str) -> str by llm(stream=True);
# Returns generator
with entry {
for chunk in stream_story("Tell me a story") {
print(chunk, end="");
}
}
import from byllm { Image, Video }
def describe_image(image: Image) -> str by llm;
def analyze_video(video: Video) -> str by llm;
# Usage
with entry {
description = describe_image(Image("photo.jpg"));
description = describe_image(Image("https://example.com/image.png"));
}
def classify(text: str) -> str by llm(
model_name="mockllm",
config={"outputs": ["positive", "negative", "neutral"]}
);
test "classification" {
result = classify("Great product!");
assert result in ["positive", "negative", "neutral"];
}
[plugins.byllm.model]
default = "gpt-4"
[plugins.byllm.call_params]
temperature = 0.7
max_tokens = 1000
[plugins.byllm.litellm]
api_base = "http://localhost:4000"

Use by in pure Python with decorators:

from byllm import by, Model
@by(Model("gpt-4"))
def summarize(text: str) -> str:
"""Summarize the given text."""
pass
result = summarize("Long article text...")

walker AIAgent {
has goal: str;
has memory: list = [];
can decide with Node entry {
context = f"Goal: {self.goal}\nCurrent: {here}\nMemory: {self.memory}";
decision = context by llm("Decide next action");
self.memory.append({"location": here, "decision": decision});
# Act on decision
visit [-->];
}
}
walker ResearchAgent {
has query: str;
def search(query: str) -> list[str] {
return web_search(query);
}
def read_page(url: str) -> str {
return fetch_content(url);
}
can research with Root entry by llm(
tools=[self.search, self.read_page]
);
}
walker Coordinator {
can coordinate with Root entry {
# Spawn specialized agents
research = root spawn Researcher(topic="AI");
writer = root spawn Writer(style="technical");
reviewer = root spawn Reviewer();
# Combine results
report {
"research": research.reports,
"draft": writer.reports,
"review": reviewer.reports
};
}
}

Tutorials:

Examples:

Related Reference: