diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -808,6 +808,1530 @@ def _():
)
+@app.cell(hide_code=True)
+def _(get_current_language, mo, translations):
+ def create_enhanced_interactive_playground(
+ technique_name,
+ bad_prompt,
+ good_prompt,
+ ):
+ """
+ Creates an interactive playground structure (without form values)
+ """
+ # Create the playground header
+ playground_header = mo.md(f"""
+
+
Hey there!
+
I know, I know, you already regret clicking this link. But you can't resist the temptation to procrastinate. I'm giving you an excellent reason, please be thankful.
+
This is an ✨interactive✨ notebook, split in err... too many lessons. The goal is to dig in a bit more, so you can pretend (like me), that you really studied hard all those papers and understand the reason behind the format. The same level of our bar exam preparation course (10 years for me, ouch!).
+
You will have a general rule, a use-case, then an example of a bad and a good prompt. Then why the good is good or amazing, or less bad.
+
Then, if you reeeeeally want to spend your time, we have two clickable accordions (the thingy below the tabs), one giving more information and the other providing theoretical or experience support.
+
+
What You'll Learn Today:
+
+ - Role-Based Prompts - Make the AI think it graduated top of its class at Harvard Law
+ - Context-Rich Instructions - Because models can't read your mind (yet!)
+ - Constraint-Based Commands - For when you want JUST the termination clause, not the entire 50-page agreement
+ - Example-Driven Templates - Show, don't tell (just like that partner who never explains what they want)
+ - Step-by-Step Legal Analysis - Force the AI to show its work like your 1L professor
+ - Contract Extraction Wizardry - Getting the needle from the haystack without the haystack
+ - MSA Clause Drafting - Making clauses that won't make your GC cry
+ - Ambiguity Handling - Because legal language is confusing (on purpose!)
+ - Comparative Law Techniques - For when one jurisdiction isn't complicated enough
+ - Le Gran Finale - The SURPRISE technique! Placing critical instructions at the end ...
+
+
+
Each technique comes with real-world examples of what works, what flops, and why. By the end, you'll be extracting genuinely useful legal analysis instead of whatever that thing is the AI usually gives you on the first try. Or maybe you will get even worse results, I can't guarantee.
+
Also, to torture you well tortured souls, after each section you will be able to test your ✨deepmind deepknowledge deepresearch✨ mind.
+
In all seriousness, I'm confident this will be useful to you and I hope it actually is!
+
+
Best,
Arthur
+
Ps. Hey! If you read this, follow me on LinkedIn, pretty please?
+
Ps2. Yes, Claude corrected me on 'Le Gran Finale.' But just to prove that I wrote this myself (I mean, kind off), I decided to leave as is.
+
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def _():
+ # Cell 7: Technique Data
+ technique1_slug = 'persona'
+ technique1_name = {'en': 'Role-Based Prompting (Persona Priming)', 'pt': 'Prompting Baseado em Papéis (Preparação de Persona)'}
+ technique1_description = {
+ 'en': 'Role-based prompting involves explicitly assigning the AI a specific role or persona relevant to the task. For example, you might begin a prompt with "You are an experienced contracts attorney…" or "Act as a judge writing an opinion…". This primes the model to adopt the perspective, knowledge, and tone of that role. It can also include defining the audience or viewpoint (e.g. "explain like I\'m a client" vs. "write as a legal scholar"). By setting a persona, the prompt guides the LLM to produce answers aligned with that expertise or viewpoint.',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_why_it_works = {
+ 'en': 'Specifying a role focuses the LLM on domain-specific knowledge and style, narrowing the scope of its response. Authoritative guidance suggests treating the LLM "as a brilliant but very new employee…who needs explicit instructions," which includes giving it a clear role. Research has shown that responses can improve in accuracy when the prompt asks for analysis in the voice of a particular expert; for instance, GPT-4 gave a correct answer when asked to analyze a case "as [Harvard Law Professor] Cass Sunstein might," whereas a generic prompt yielded a hallucination. In practice, a persona provides context that the model can implicitly use (such as legal terminology or methodology familiar to that role), resulting in more on-point and technically accurate answers.',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_example_question = {
+ 'en': "A client's supplier breached a contract. What should the client do?",
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_example_bad_prompt = {
+ 'en': 'What should a company do if a supplier breaks a contract?',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_example_good_prompt = {
+ 'en': "You are a seasoned contracts attorney advising a tech company. The company's supplier failed to deliver goods, breaching their contract. Explain the legal steps the company should take next (e.g. sending a breach notice, seeking damages under the contract or UCC), in plain language for a business executive.",
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_example_explanation = {
+ 'en': 'By assigning the AI the role of a "seasoned contracts attorney," the model focuses on providing legally sound advice about contract breaches. The prompt also specifies the audience (a business executive), which guides the AI to use "plain language" while still covering technical legal concepts like breach notices and UCC remedies. The resulting response is likely to include structured legal steps, appropriate citations to relevant law, and practical advice—all delivered in the professional tone of an attorney advising a client. Without this role priming, the response might lack legal specificity or fail to address formal remedies available under contract law.',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique1_resource_title = {'en': 'GenAI Prompting Tips for Lawyers', 'pt': '[Placeholder for Portuguese translation]'}
+ technique1_resource_url = 'https://cl.cobar.org/departments/genai-prompting-tips-for-lawyers/'
+ technique1_resource_description = {
+ 'en': 'Comprehensive guide on effective prompting techniques for legal professionals',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ return (
+ technique1_description,
+ technique1_example_bad_prompt,
+ technique1_example_explanation,
+ technique1_example_good_prompt,
+ technique1_example_question,
+ technique1_name,
+ technique1_resource_description,
+ technique1_resource_title,
+ technique1_resource_url,
+ technique1_slug,
+ technique1_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ # Cell: Technique 2 Definition
+ technique2_slug = 'context-rich'
+ technique2_name = {'en': 'Context-Rich Prompting (Including Details and Background)', 'pt': 'Placeholder for Portuguese translation'}
+ technique2_description = {
+ 'en': 'Context-rich prompting means supplying the LLM with all relevant background facts, documents, and parameters of the query. Rather than asking a question in isolation, you include essential details such as the jurisdiction, involved parties, key facts, and the specific legal issue at hand. For instance, instead of "Can I fire an employee for social media posts?", you would ask, "As an employer in California, can I lawfully fire an at-will employee who posted negative comments about our company on social media?". You might also provide the text of a law or contract clause if interpretation is needed. By giving this specific context, you reduce ambiguity and guide the AI to consider the correct factual and legal framework.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_why_it_works = {
+ 'en': "LLMs do not have the ability to ask clarifying questions; they rely entirely on the prompt to understand the situation. Providing detailed context closes knowledge gaps and prevents the AI from making incorrect assumptions. Ambiguous or overly broad prompts can lead to irrelevant or wrong answers, whereas detailed prompts yield more tailored and accurate results. In fact, experts note that the more elaborate and specific the prompt (goals, facts, desired output, tone), the better the results. Including context like the type of case, relevant dates, jurisdiction, or document excerpts focuses the model on the correct scenario and laws, much like how a lawyer frames an issue before analysis. This technique ensures the AI's answer is grounded in the scenario you care about, rather than a generic summary of the law.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_example_question = {
+ 'en': 'An HR manager wants legal advice on firing an employee over social media posts. (The legality can depend on context like jurisdiction and circumstances.)',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_example_bad_prompt = {
+ 'en': 'Can I fire an employee for what they said on social media?',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_example_good_prompt = {
+ 'en': 'You are an employment lawyer advising a manager in California. The company is considering firing an at-will employee who posted critical comments about the company on Facebook after work hours. Provide an analysis of whether this termination would be legal under California law, considering any free speech or labor law protections.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_example_explanation = {
+ 'en': "By identifying both the jurisdiction (California) and the employment context (at-will employment, social media post, off-duty conduct), the prompt guides the LLM to apply California-specific law and relevant statutes. It also specifies the angle of analysis (legal protections for the employee's speech vs. the employer's rights). This contextual information helps the AI deliver a more nuanced, legally accurate answer that references specific state laws rather than providing generic advice. The output would include references to California Labor Code § 96(k) and § 98.6 which protect employees' speech and activities, as well as considerations under the National Labor Relations Act if the posts could be considered concerted activity about workplace conditions. The response would be tailored to California's specific legal framework rather than giving generic advice.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique2_resource_title = {'en': 'Writing Effective Legal AI Prompts', 'pt': 'Placeholder for Portuguese translation'}
+ technique2_resource_url = 'https://legal.thomsonreuters.com/blog/writing-effective-legal-ai-prompts/'
+ technique2_resource_description = {
+ 'en': 'Thomson Reuters guide on crafting effective prompts for legal AI applications',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ return (
+ technique2_description,
+ technique2_example_bad_prompt,
+ technique2_example_explanation,
+ technique2_example_good_prompt,
+ technique2_example_question,
+ technique2_name,
+ technique2_resource_description,
+ technique2_resource_title,
+ technique2_resource_url,
+ technique2_slug,
+ technique2_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ # Cell: Define Technique 3
+ technique3_slug = 'constraint-based'
+ technique3_name = {'en': 'Constraint-Based Prompting (Conditional and Focused Instructions)'}
+ technique3_description = {
+ 'en': 'Constraint-based prompting introduces explicit conditions or limits into your prompt to narrow the scope of the AI\'s response. This can take the form of conditional instructions (e.g., "If X is true, do Y; if not, say it\'s not applicable.") or other constraints like word limits, format requirements, or focusing on a specific subsection of content. The goal is to have the LLM only address a particular area or follow certain rules, rather than responding broadly. For example, when analyzing a lengthy contract, you might write: "If the contract contains a termination clause, summarize that clause. Ignore other provisions." Similarly, you can constrain output length ("in 100 words") or style ("list 3 key points"). By setting clear boundaries or prerequisites in the prompt, you guide the model to produce a more targeted answer.',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique3_why_it_works = {
+ 'en': 'Constraints help narrow down the AI\'s focus so it doesn\'t stray into irrelevant territory. Large language models will try to use everything in the prompt to generate an answer, so if you tell it exactly what not to do or what specific subset to concentrate on, you reduce noise and off-point results. Legal professionals often only need certain information (for instance, just the holding of a case, or just one contract clause) — constraints ensure the AI filters its output to those needs. Authoritative sources recommend setting conditions or scope in prompts to make the analysis "contextually appropriate and relevant to your needs," thereby cutting out unnecessary results. Additionally, adding constraints like length or format limits can improve clarity; it forces the model to be concise and stick to the requested structure. In essence, constraint-based prompting is about precision: it directs the LLM to comply with specific requirements, much like a lawyer telling a junior associate, "Give me only the relevant facts on X and nothing else."',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique3_example_question = {
+ 'en': 'Summarizing a specific part of a contract. (The user has a long employment contract but only cares about termination terms.)',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique3_example_bad_prompt = {'en': 'Summarize this employment contract.', 'pt': '[Placeholder for Portuguese translation]'}
+ technique3_example_good_prompt = {
+ 'en': 'If the following employment contract contains a Termination or Severance clause, summarize those provisions in detail, focusing only on termination conditions and any severance pay terms. If not, respond that the contract has no such provisions. Ignore other sections.',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique3_example_explanation = {
+ 'en': "The prompt explicitly sets a condition and scope: it tells the AI to look for termination or severance clauses and report on those and nothing else. It also provides a conditional fallback (\"if not, say there are none\") so the AI won't wander off-topic if the condition isn't met. This focused instruction ensures the AI's output will directly address the user's need (termination terms) without extraneous contract details. It also implicitly instructs the AI to read the contract text (provided in the prompt) with an eye only for a specific subject, which is akin to running a targeted search within the text.\n\nExample output: \"Termination Clause (Section 5): The contract allows either party to terminate with 30 days' written notice. However, if the employee is terminated for cause (defined as gross misconduct or violation of company policy), the employer can terminate immediately without notice. The clause specifies that termination must be communicated in writing and outlines a post-termination non-compete period of 6 months.\n\nSeverance Provision (Section 6): In cases of termination without cause, the employee is entitled to a severance payment equal to 3 months' salary. The severance is conditioned on the employee signing a release of claims. No severance is given if the termination is for cause or if the employee resigns.\"",
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ technique3_resource_title = {'en': 'Prompt Engineering and Priming in Law', 'pt': '[Placeholder for Portuguese translation]'}
+ technique3_resource_url = 'https://www.researchgate.net/publication/382878312_Prompt_Engineering_and_Priming_in_Law'
+ technique3_resource_description = {
+ 'en': 'Research on effective prompt engineering techniques for legal applications',
+ 'pt': '[Placeholder for Portuguese translation]',
+ }
+ return (
+ technique3_description,
+ technique3_example_bad_prompt,
+ technique3_example_explanation,
+ technique3_example_good_prompt,
+ technique3_example_question,
+ technique3_name,
+ technique3_resource_description,
+ technique3_resource_title,
+ technique3_resource_url,
+ technique3_slug,
+ technique3_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ # Cell: Technique 4 Definition
+ technique4_slug = 'example-few-shot'
+ technique4_name = {
+ 'en': 'Example (Few-Shot) Prompting (Providing Exemplars or Templates)',
+ 'pt': 'Prompt por Exemplos (Few-Shot) (Fornecimento de Exemplares ou Modelos)',
+ }
+ technique4_description = {
+ 'en': "Example prompting, also known as few-shot prompting, involves including sample inputs/outputs or a template in your prompt to demonstrate the desired format, style, or level of detail. This can mean giving the AI one or more Q&A examples before your actual question, or providing a model answer structure. In the legal context, you might show an example of a well-written clause, then ask the AI to draft a similar clause for a new scenario. For instance: \"Example – Clause: 'In the event of breach, the non-breaching party shall… [legal language] …' Now draft a liability waiver clause for a service contract in a similar style.\" Another use is to provide a few sample legal questions with correct answers (few-shot Q&A) before posing a new question, which primes the model on the approach. By doing so, you leverage the AI's pattern recognition strength: it will mimic the structure or reasoning of the examples when generating the new answer.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_why_it_works = {
+ 'en': 'Large language models learn and operate by recognizing patterns. When you provide a sample of a good response, you essentially program the model with a mini example of the task at hand. The model will infer the style, tone, and logic from the example and apply it to the new prompt. This few-shot prompting technique is well-documented to improve performance, especially for niche tasks or formats that the model might not guess on its own. Instead of relying on the AI to deduce the desired output style, you show it explicitly. Authoritative guidelines for legal AI suggest offering a template or bullet-point structure to guide the AI\'s response. For example, telling the model, "Follow this structure: 1) Facts, 2) Issue, 3) Holding" can lead to an answer in that format. Similarly, providing a placeholder-filled template (e.g., using bracketed placeholders in an example contract clause) lets the AI know exactly how to format the answer. By demonstration, we reduce ambiguity — the AI doesn\'t have to "figure out" the format or level of detail, it just continues the pattern. This results in output that is closer to the user\'s expected answer in both form and substance.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_example_question = {
+ 'en': 'Drafting a contract clause with a specific style. (The user wants a liability waiver clause similar to an example they like.)',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_example_bad_prompt = {
+ 'en': '"Draft a liability waiver clause for a service contract."',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_example_good_prompt = {
+ 'en': '"Draft a liability waiver clause for a service contract. Use the following clause as a style guide and follow a similar structure and tone:\n\nExample Clause: "In no event shall [Party] be liable for any indirect, incidental, or consequential damages arising out of or related to this Agreement, except in cases of gross negligence or willful misconduct…"\n\nNow, write the liability waiver for our contract in a similar style, adjusting details for our context (a software service provider)."',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_example_explanation = {
+ 'en': 'The good prompt provides a concrete example clause that demonstrates the desired style (it\'s concise, includes a standard exclusion of indirect damages, and has an exception for gross negligence). By instructing the AI to use it as a guide, the model will mirror that phrasing and structure when drafting the new clause. The prompt also specifies the context (software service provider) so the AI can adjust any particulars (for instance, referencing "software or data" in the waiver if relevant). This approach reduces the guesswork for the AI – it knows exactly the kind of clause the user wants, resulting in a clause that likely aligns with industry standards or the user\'s preference as shown in the example.\n\nExample output: "Liability Waiver Clause: In no event shall either party be liable to the other for any indirect, special, incidental, or consequential damages (including lost profits or data loss) arising out of or related to this Agreement or the services provided, even if such party has been advised of the possibility of such damages. The foregoing limitation applies to all causes of action, whether arising in contract, tort, or otherwise, except that nothing in this Agreement shall limit or exclude liability for a party\'s gross negligence or willful misconduct."\n\nThe output clause closely follows the style of the example: it uses the "In no event shall…" phrasing, disclaims indirect damages, and includes an exception for gross negligence/willful misconduct. By contrast, a clause generated without the example might have been structured differently or missed including the exception. The example prompt ensured the result was aligned with the desired template.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique4_resource_title = {'en': 'Minnesota Law Review Article on LLMs in Legal Practice', 'pt': 'Placeholder for Portuguese translation'}
+ technique4_resource_url = 'https://minnesotalawreview.org/wp-content/uploads/2023/10/FL1-Choi-Schwarcz.pdf'
+ technique4_resource_description = {
+ 'en': 'Comprehensive exploration of few-shot prompting techniques in legal contexts',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ return (
+ technique4_description,
+ technique4_example_bad_prompt,
+ technique4_example_explanation,
+ technique4_example_good_prompt,
+ technique4_example_question,
+ technique4_name,
+ technique4_resource_description,
+ technique4_resource_title,
+ technique4_resource_url,
+ technique4_slug,
+ technique4_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ technique5_slug = 'step-by-step'
+ technique5_name = {'en': 'Step-by-Step Prompting (Chain-of-Thought Legal Reasoning)', 'pt': 'Placeholder for Portuguese translation'}
+ technique5_description = {
+ 'en': 'Step-by-step prompting involves asking the LLM to work through the problem in a logical sequence, rather than jumping straight to a conclusion. In legal tasks, this often means prompting the model to apply a structured analysis (for example, the IRAC method: Issue, Rule, Application, Conclusion, or breaking down elements of a legal test). You can achieve this by explicitly instructing the AI how to structure its reasoning. For instance: "Analyze this scenario step by step: first identify the legal issues, then state the relevant law for each issue, apply the facts, and finally give a conclusion." or simply "Let\'s think this through step-by-step.". Another variant is telling the model to enumerate its reasoning (e.g., "1, 2, 3…"). The idea is to mimic how a lawyer would deliberate on a problem methodically. This technique is especially useful for complex scenarios with multiple factors (such as determining if negligence is present, which requires analyzing duty, breach, causation, damages in turn).',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_why_it_works = {
+ 'en': 'Prompting an LLM to show its work leads to more transparent and often more accurate results. Recent findings highlight that users can significantly improve answer quality by asking the model to "reason step by step." This approach, known as chain-of-thought prompting, has been widely adopted because it helps the AI break down complex tasks instead of making a leap and possibly an error. By structuring the analysis (much like IRAC or element-by-element examination), you not only get a thorough answer but can also verify each step of the reasoning. If the model makes a mistake in a step, you can catch it and correct it, resulting in a more reliable final answer. In legal reasoning, where analytical rigor is key, this method ensures the AI considers all necessary components (for example, each element of a claim or each prong of a test). It effectively guides the model to "think like a lawyer," aligning its process with how a legal professional would logically approach the issue. Even if newer LLMs can sometimes do this internally, explicitly prompting for step-by-step reasoning is a safe way to enforce completeness and clarity in the output.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_example_question = {
+ 'en': 'Assessing legal liability with multiple elements. (A customer slipped and fell in a store – does the store have negligence liability?)',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_example_bad_prompt = {
+ 'en': 'The customer slipped on a wet floor in the store and was injured. Is the store liable for negligence?',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_example_good_prompt = {
+ 'en': 'You are a legal analyst. Analyze the following scenario step by step to determine if the store could be held liable for negligence: A customer in a grocery store slipped on a spilled liquid and broke their arm. First, identify the elements required to prove negligence in U.S. law (duty, breach, causation, damages). Then, discuss each element one by one with respect to the facts (e.g., did the store owe a duty, did it breach that duty by not cleaning the spill, etc.). Conclude whether the elements are satisfied and thus if the store is likely liable.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_example_explanation = {
+ 'en': "This prompt explicitly asks for a step-by-step IRAC-style analysis. It not only tells the AI to consider each negligence element in order but also reminds it of what those elements are. The instruction ensures the answer will be organized (perhaps with headings or paragraphs for Duty, Breach, Causation, Damages, Conclusion). The model is less likely to omit a part of the analysis, and the user will get a structured explanation rather than a one-liner. By guiding the reasoning process, the prompt helps the AI reach a well-founded conclusion (or identify if information is missing) just as a human lawyer would.\n\nExample output: \"*Duty: The store, as a business open to the public, owes its customers a legal duty of care to maintain the premises in a reasonably safe condition. Here, that duty means regularly inspecting for hazards like spills and either cleaning them promptly or warning customers (e.g., with a 'Wet Floor' sign).\n\nBreach: We examine if the store breached its duty by failing to address the spilled liquid. If the spill was present long enough that employees should have noticed it (say, it had been on the floor for 30 minutes without cleanup), not cleaning it would likely be a breach of duty. If the spill happened moments before the fall, the store might argue it had no reasonable opportunity to discover and fix it, which could negate breach.\n\nCausation: The customer's injury must be caused by the store's breach. Here, the causal link is direct: the unsafe condition (wet floor) led to the slip-and-fall accident. We'd also consider proximate cause – it's foreseeable that an unaddressed spill leads to this kind of injury, so causation is established.\n\nDamages: The customer suffered actual harm (a broken arm, medical bills, etc.), satisfying the damages element required for negligence.\n\nConclusion: If evidence shows the spill was on the floor long enough that the store should have known and cleaned it, the store likely breached its duty. In that case, all four elements of negligence are met, and the store would likely be held liable for the customer's injury. If the spill was truly sudden and unforeseeable, the store might avoid liability, but absent such proof, this scenario points toward negligence on the store's part.*\"",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique5_resource_title = {'en': "Deloitte's Guide to Legal Prompting", 'pt': 'Placeholder for Portuguese translation'}
+ technique5_resource_url = 'https://www2.deloitte.com/dl/en/pages/legal/articles/grundkurs-legal-prompting.html'
+ technique5_resource_description = {
+ 'en': 'Comprehensive guide on effective legal prompting techniques including step-by-step reasoning',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ return (
+ technique5_description,
+ technique5_example_bad_prompt,
+ technique5_example_explanation,
+ technique5_example_good_prompt,
+ technique5_example_question,
+ technique5_name,
+ technique5_resource_description,
+ technique5_resource_title,
+ technique5_resource_url,
+ technique5_slug,
+ technique5_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ technique6_slug = 'contract-extraction'
+ technique6_name = {'en': 'Extracting Key Provisions and Data from Contracts', 'pt': 'Placeholder for Portuguese translation'}
+ technique6_description = {
+ 'en': 'This technique involves directing an LLM to locate and extract specific information from legal documents like contracts, rather than summarizing the entire document. By focusing the model on particular provisions, clauses, or data points, attorneys can quickly find relevant information such as dates, obligations, defined terms, or conditions. The approach is similar to using targeted questions with a colleague who has read a document - except the LLM does the quick read-through and extraction for you.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_why_it_works = {
+ 'en': "Legal documents are often lengthy and complex, with critical details buried within dense paragraphs. By prompting the LLM to focus on specific provisions or information types, you eliminate the noise and zero in on what matters. This technique works because LLMs have strong pattern recognition abilities that can identify the relevant clauses or data points when properly directed. Rather than processing the entire document (which might exceed the model's context window anyway), a targeted extraction prompt creates efficiency by pulling only the needed information. This is particularly valuable when reviewing multiple agreements or when specific contractual elements (like termination rights, payment terms, or warranty provisions) need quick assessment across documents.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_example_question = {
+ 'en': "Understanding a contract's liquidated damages provision. (You need to know how damages for breach are handled in a long agreement.)",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_example_bad_prompt = {
+ 'en': 'Tell me about this contract. [entire 50-page contract pasted]',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_example_good_prompt = {
+ 'en': "In the clause below, what do the parties agree regarding damages for breach?\n\n12.2 Liquidated Damages Not Penalty. Because of the unique nature of the economic damages that may be sustained by the Company in the event of a breach of certain provisions of this Agreement by Executive, it is acknowledged and agreed by the Parties that it would be impracticable and extremely difficult to ascertain with any degree of certainty the amount of damages which the Company would sustain as a result of such breach. Accordingly, if Executive breaches certain provisions of this Agreement, the Parties agree that any sums payable under this Agreement in such circumstances are in the nature of liquidated damages and not a penalty, and represent a reasonable estimate of the damages that the Company will suffer in the event of Executive's breach.\n\nSummarize the effect of this clause in bullet points, explaining what this means for both parties if there's a breach.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_example_explanation = {
+ 'en': 'The good prompt does several things right: it isolates just the relevant clause (12.2) rather than sending the entire contract, asks a specific question about breach damages, and requests a structured response format (bullet points). This leads the LLM to focus solely on interpreting the liquidated damages provision, which establishes that certain payments for breach: 1) are considered liquidated damages not penalties, 2) are justified because actual damages would be difficult to measure, and 3) represent what the parties agree is a reasonable estimate of potential harm. The model doesn\'t waste time analyzing unrelated sections of the agreement, and the attorney gets precisely the information needed: the nature and justification of the damages provision. This extraction approach is significantly more efficient than asking for a general contract summary and then hunting through it for damage provisions.\n\nExample output: "• Effect of Clause 12.2 - Liquidated Damages:\n• The clause establishes that certain breach payments are classified as liquidated damages, not penalties\n• Both parties acknowledge that actual economic damages from specific breaches would be difficult to calculate with certainty\n• The payments represent a reasonable pre-estimate of potential damages, not punishment\n• This classification matters legally because courts generally enforce liquidated damages provisions but may invalidate penalty clauses\n• For the executive: limits potential argument that the damages are excessive or punitive\n• For the company: provides more certainty that the damage amounts will be enforceable if challenged in court"',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique6_resource_title = {'en': 'Ethylene Sales Agreement', 'pt': 'Placeholder for Portuguese translation'}
+ technique6_resource_url = 'https://www.sec.gov/Archives/edgar/data/1604665/000119312514263367/d715499dex104.htm'
+ technique6_resource_description = {
+ 'en': 'A complex and long agreement, perfect for this example.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ return (
+ technique6_description,
+ technique6_example_bad_prompt,
+ technique6_example_explanation,
+ technique6_example_good_prompt,
+ technique6_example_question,
+ technique6_name,
+ technique6_resource_description,
+ technique6_resource_title,
+ technique6_resource_url,
+ technique6_slug,
+ technique6_why_it_works,
+ )
+
+
+@app.cell(hide_code=True)
+def _():
+ technique7_slug = 'msa-clause-drafting'
+ technique7_name = {'en': 'Master Service Agreement Clause Drafting and Refinement'}
+ technique7_description = {
+ 'en': 'This technique involves using LLMs to draft new clauses or refine existing language in Master Service Agreements (MSAs). By setting clear parameters about the purpose, required terms, governing law, and desired style, you can generate high-quality legal text that meets your specific needs. The model can either create clauses from scratch or suggest improvements to existing language, accelerating the drafting process while ensuring the output aligns with legal standards.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique7_why_it_works = {
+ 'en': "LLMs have been trained on vast repositories of legal documents, including agreements filed with the SEC. This training enables them to understand the structure, terminology, and conventional language of MSAs and other legal agreements. By providing specific context (jurisdiction, industry, desired complexity level) and parameters (required elements, formatting preferences), you narrow the model's focus to produce relevant, properly structured legal text. The model can quickly generate initial drafts that follow standard legal conventions or modernize outdated language, saving significant time compared to manual drafting. However, human review remains essential to ensure the output is legally sound and contextually appropriate for your specific transaction.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique7_example_question = {
+ 'en': 'Drafting or refining confidentiality clauses in a Master Service Agreement. (You need to create or improve language around confidential information protection.)',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique7_example_bad_prompt = {'en': 'Write a confidentiality clause for my MSA.', 'pt': 'Placeholder for Portuguese translation'}
+ technique7_example_good_prompt = {
+ 'en': 'You are a lawyer drafting a confidentiality clause for a Master Service Agreement between a technology vendor and healthcare client under California law. Reference the structure in the SEC filing at https://www.sec.gov/Archives/edgar/data/1042134/000119312505162630/dex1033.htm. The clause should cover: (1) definition of confidential information, (2) obligations to maintain secrecy, (3) standard exceptions (public information, independently developed information, etc.), (4) duration of obligations (3 years post-termination), and (5) return of confidential information upon termination. Write the clause in plain English while maintaining necessary legal protections. Format with numbered subsections for readability.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique7_example_explanation = {
+ 'en': "The good prompt provides extensive context and direction for creating an effective confidentiality clause. It specifies the type of agreement (MSA), the parties involved (tech vendor and healthcare client), the governing law (California), and references a specific SEC filing as a structural guide. The prompt also clearly outlines the five key elements that must be included in the clause and provides specific parameters (3-year post-termination duration). Furthermore, it guides the style ('plain English') and formatting ('numbered subsections'), ensuring the output will be both legally sound and reader-friendly.\n\nBy contrast, the bad prompt gives virtually no information about context, content requirements, style preferences, or formatting needs, which would likely result in a generic clause that might not address the specific needs of a technology-healthcare relationship or conform to California law. The specificity in the good prompt ensures the model produces a clause that closely matches what would appear in a professionally drafted MSA, with appropriate attention to healthcare data concerns and technology service specifics.\n\nThe resulting clause might begin with a definition section that carefully defines confidential information in the healthcare technology context, outline specific security measures required for protected health information, list standard exceptions to confidentiality obligations, specify the 3-year post-termination period, and detail the procedures for returning or destroying confidential information when the agreement ends.",
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ technique7_resource_title = {'en': 'Master Service Agreement', 'pt': 'Placeholder for Portuguese translation'}
+ technique7_resource_url = 'https://www.sec.gov/Archives/edgar/data/1042134/000119312505162630/dex1033.htm'
+ technique7_resource_description = {
+ 'en': 'A complex and long agreement, perfect for this example, but now techy.',
+ 'pt': 'Placeholder for Portuguese translation',
+ }
+ return (
+ technique7_description,
+ technique7_example_bad_prompt,
+ technique7_example_explanation,
+ technique7_example_good_prompt,
+ technique7_example_question,
+ technique7_name,
+ technique7_resource_description,
+ technique7_resource_title,
+ technique7_resource_url,
+ technique7_slug,
+ technique7_why_it_works,
+ )
+
+
+@app.cell
+def _(DirectLLMClient, mo):
+ def display_response(user_prompt: str = None):
+ """Create a nice Marimo UI element to display the prompt and response with a loading spinner."""
+ if not user_prompt.value:
+ return mo.md(
+ f"""
+