diff --git a/.gitignore b/.gitignore index d1cd9abd75..619e4691a1 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,7 @@ build openai-key.txt *.code-workspace + +# Ignore run_experiments.sh results +evals/elsuite/**/logs/ +evals/elsuite/**/outputs/ diff --git a/evals/elsuite/self_prompting/eval.py b/evals/elsuite/self_prompting/eval.py new file mode 100644 index 0000000000..7db858f5d4 --- /dev/null +++ b/evals/elsuite/self_prompting/eval.py @@ -0,0 +1,261 @@ +import json +import logging +import random +from pathlib import Path +from typing import Any, Optional, Union + +import numpy as np + +import evals +import evals.metrics +from evals.api import CompletionFn +from evals.elsuite.self_prompting.task_description import sample_in_token, task_description_template +from evals.eval import SolverEval +from evals.registry import registry +from evals.solvers.solver import Solver +from evals.task_state import TaskState +from evals.utils.log_utils import extract_final_results, extract_spec + +logger = logging.getLogger(__name__) + + +class SelfPrompting(SolverEval): + def __init__( + self, + completion_fns: list[CompletionFn], + samples_jsonl: str, + tasker_models: list[str], + n_tasks: int = 50, + n_samples_per_task: int = 10, + n_preview_samples: int = 5, + baseline_logpath: Optional[str] = None, + *args, + **kwargs, + ): + super().__init__(completion_fns, *args, **kwargs) + # CI doesn't have access to model APIs, so replace tasker_models with dummy models + # if we're running in CI (i.e. if the first completion_fn is a DummyCompletionFn) + if isinstance(completion_fns[0], evals.api.DummyCompletionFn): + tasker_models = ["dummy" for _ in tasker_models] + + self.samples_jsonl = samples_jsonl + self.tasker_models = tasker_models + self.n_tasks = n_tasks + self.n_samples_per_task = n_samples_per_task + self.n_preview_samples = n_preview_samples + self.baseline_logpath = ( + self._prefix_registry_path(baseline_logpath) if baseline_logpath else None + ) + assert len(self.tasker_models) > 0, "Must provide at least one tasker model" + assert self.n_tasks > 0, "Must provide at least one task" + assert self.n_samples_per_task > 0, "Must provide at least one sample per task" + + np.random.seed(self.seed) + + self.tasker_completion_fns = {} + for tasker_model in self.tasker_models: + self.tasker_completion_fns[tasker_model] = registry.make_completion_fn(tasker_model) + + def eval_sample(self, solver: Solver, sample: Any, rng: random.Random): + if sample["stage"] == "prompting": + return self._run_prompting(solver, sample) + elif sample["stage"] == "tasking": + return self._run_tasking(sample) + else: + raise ValueError(f"Invalid stage {sample['stage']}") + + def _run_prompting(self, solver: Solver, sample: Any, *_): + # Prompt the prompter_model to generate a prompt for the tasker_model + task_description = task_description_template.format( + instruction=sample["task"]["instruction"], + samples=json.dumps(sample["task"]["train_samples"], indent=2), + tasker_model=sample["tasker_model"], + ) + task_state = TaskState( + task_description=task_description, + current_state={ + "instruction": sample["task"]["instruction"], + "samples": sample["task"]["train_samples"], + "tasker_model": sample["tasker_model"], + }, + ) + solver_result = solver(task_state) + model_instruction = solver_result.output + + prompt_rule_violation = sample_in_token not in model_instruction + + output = { + **sample, + "task_description": task_description, + "current_state": task_state.current_state, + "prompting_solver_metadata": solver_result.to_json(), + "model_instruction": model_instruction, + "prompt_rule_violation": prompt_rule_violation, + } + return output + + def _run_tasking(self, sample: Any, *_): + tasker_completion_fn = self.tasker_completion_fns[sample["tasker_model"]] + + if sample_in_token in sample["model_instruction"]: + # Fill in the sample input + full_prompt = sample["model_instruction"].replace(sample_in_token, sample["input"]) + else: + # Append the sample input + full_prompt = f"{sample['model_instruction']}\n{sample['input']}" + tasker_output = tasker_completion_fn(full_prompt).get_completions()[0] + + exact = 1 if tasker_output == sample["output"] else 0 + fuzzy = 1 if tasker_output in sample["output"] or sample["output"] in tasker_output else 0 + + output = { + **sample, + "full_prompt": full_prompt, + "tasker_output": tasker_output, + "exact": exact, + "fuzzy": fuzzy, + } + evals.record.record_metrics(**output) + return output + + def _calculate_improvement_wrt_baseline( + self, current_res: dict[str, float] + ) -> dict[str, float]: + if self.baseline_logpath is None: + logger.warn("SKIPPING IMPROVEMENT METRICS. (No baseline logpath provided.)") + return {} + + # Check that baseline was run on the same tasker models, tasks, and samples + baseline_spec = extract_spec(Path(self.baseline_logpath)) + try: + spec_args = baseline_spec["run_config"]["eval_spec"]["args"] + except KeyError: + logger.warn("SKIPPING IMPROVEMENT METRICS. (Failed to validate baseline spec.)") + return {} + if set(spec_args["tasker_models"]) != set(self.tasker_models): + logger.warn( + f"SKIPPING IMPROVEMENT METRICS. (Baseline tasker_models {spec_args['tasker_models']} do not match {self.tasker_models}.)" + ) + return {} + if ( + spec_args["n_tasks"] != self.n_tasks + ): # TODO: Ideally we would check that the tasks are the same + logger.warn( + f"SKIPPING IMPROVEMENT METRICS. (Baseline n_tasks {spec_args['n_tasks']} does not match {self.n_tasks}.)" + ) + return {} + if spec_args["n_samples_per_task"] != self.n_samples_per_task: + logger.warn( + f"SKIPPING IMPROVEMENT METRICS. (Baseline n_samples_per_task {spec_args['n_samples_per_task']} does not match {self.n_samples_per_task}.)" + ) + return {} + + baseline_res = extract_final_results(Path(self.baseline_logpath)) + + def normalized_improvement(current, baseline): + """ + Returns a score between -1 and 1, where + -1 means the current score maximally regresses from the baseline (i.e. the current score is 0) + 0 means the current score is the same as the baseline + +1 means the current score achieves max improvement over the baseline + """ + if current < baseline: + return (current - baseline) / baseline + else: + return (current - baseline) / (1 - baseline) + + improvement_scores = { + "accuracy_improvement_wrt_oriprompt": normalized_improvement( + current_res["accuracy"], baseline_res["accuracy"] + ), + "accuracy_fuzzy_improvement_wrt_oriprompt": normalized_improvement( + current_res["accuracy_fuzzy"], baseline_res["accuracy_fuzzy"] + ), + "baseline_accuracy": baseline_res["accuracy"], + "baseline_accuracy_fuzzy": baseline_res["accuracy_fuzzy"], + } + logger.info(f"Improvement scores: {improvement_scores}") + return improvement_scores + + def run(self, recorder: evals.record.Recorder) -> dict[str, Union[float, int]]: + samples = self.get_samples() + + # Shuffle and limit samples + np.random.shuffle(samples) + samples_by_task = samples[: self.n_tasks] + assert len(samples_by_task) == self.n_tasks + for task in samples_by_task: + np.random.shuffle(task["test_samples"]) + np.random.shuffle(task["train_samples"]) + task["test_samples"] = task["test_samples"][: self.n_samples_per_task] + task["train_samples"] = task["train_samples"][: self.n_preview_samples] + assert len(task["test_samples"]) == self.n_samples_per_task + assert len(task["train_samples"]) == self.n_preview_samples + + # Run prompting + prompting_samples = [] + for task in samples_by_task: + for tasker_model in self.tasker_models: + prompting_samples.append( + { + "stage": "prompting", + "tasker_model": tasker_model, + "task": task, + } + ) + assert len(prompting_samples) == len(self.tasker_models) * self.n_tasks + prompting_results = self.eval_all_samples(recorder, prompting_samples) + + # Run tasking + tasking_samples = [] # Store in flattened list for parallel eval + for prompt_res in prompting_results: + prompt_res["stage"] = "tasking" # Update stage + for sample in prompt_res["task"]["test_samples"]: + tasking_samples.append( + { + **prompt_res, + "input": sample["input"], + "output": sample["output"], + } + ) + assert len(tasking_samples) == len(prompting_results) * self.n_samples_per_task + self.eval_all_samples(recorder, tasking_samples) + + # The score of a Prompter is the average score of all Tasker models it writes prompts for + metrics = recorder.get_metrics() + + # Primary metrics + result = { + "accuracy": np.mean([metric["exact"] for metric in metrics]), + "accuracy_fuzzy": np.mean([metric["fuzzy"] for metric in metrics]), + } + # Relative improvement against baseline + improvement_scores = self._calculate_improvement_wrt_baseline(result) + if improvement_scores: + result.update(improvement_scores) + + # Peripheral metrics + result.update( + { + "prompt_rule_violation_rate": np.mean( + [int(metric["prompt_rule_violation"]) for metric in metrics] + ), + "n_samples": len(metrics), + } + ) + + # Breakdown by tasker model + def compute_mean_tasker(key, tasker_model): + return np.mean( + [metric[key] for metric in metrics if metric["tasker_model"] == tasker_model] + ) + + for tasker in self.tasker_models: + result.update( + { + f"accuracy_{tasker}": compute_mean_tasker("exact", tasker), + f"accuracy_fuzzy_{tasker}": compute_mean_tasker("fuzzy", tasker), + } + ) + + return result diff --git a/evals/elsuite/self_prompting/readme.md b/evals/elsuite/self_prompting/readme.md new file mode 100644 index 0000000000..ec705fd32b --- /dev/null +++ b/evals/elsuite/self_prompting/readme.md @@ -0,0 +1,58 @@ +# Eval description + +How well can LMs write prompts for themselves to perform various tasks? + +In the Self-Prompting eval, models (Prompters) write prompts for other models (Taskers) to perform various tasks -- which are other evals from this repository (listed below). Prompters are given an initial human-written prompt for the task, and asked to rewrite it for a given Tasker model. The effectiveness of the Prompters are measured in terms of the accuracy of downstream Taskers on the tasks. We measure this prompting ability for a variety of different downstream models: gpt-3.5-turbo, gpt-4-base, and gpt-4. + +The headline metric for a Prompter’s success is the mean accuracy of the predictions of all its Taskers on all tasks. +- For our primary metric `accuracy`, the accuracy score uses an exact match criterion to judge if the tasker response is correct or not (a response is correct if and only if it exactly matches the true label in the dataset). +- As a secondary metric `accuracy_fuzzy`, we also compute results with a fuzzy match criterion, which counts a response as correct if either the model response contains the label or the label contains the response. + +Additionally, we also present `accuracy_improvement_wrt_oriprompt` and `accuracy_fuzzy_improvement_wrt_oriprompt` which are the accuracies normalized relative to the score of the original prompt baseline. This is a score between -1 and +1, where -1 means the current score maximally regresses from the baseline (i.e. the current score is 0), 0 means the current score is the same as the baseline, and +1 means the current score achieves max improvement over the baseline. By default, the baseline score is a cached score of the original prompt (`self_prompting/oriprompt/baseline`) on the `self_prompting.full` eval. + +# Usage + +To run the eval, use the following command: +```bash +oaieval {solver} self_prompting +``` +where `{solver}` is the name of the solver you want to evaluate, e.g. `self_prompting/chat_completion/gpt-4-32k`. + +# Experiments +As a starting point for deeper exploration, we provide scripts for comparing various solvers and eval variants, as well as for plotting the results. To run these: +``` +cd scripts/ +bash run_experiments.sh +``` + +# Dataset + +To form the self-prompting dataset, we extract tasks from this `evals` repository, selecting for datasets with +1. A system prompt that can be straightforwardly converted into a generic instruction for all task samples +2. A straightforward input-output format for each task sample. +3. Designed to be evaluated with an exact match criterion. + +The full list of 50 evals we use can be found in `scripts/dataset/eval_list.py`. + +# Token estimate +Below, we present a rough estimate of the total number of tokens consumed by the eval, including both input and output tokens. + +For self-prompting, each eval run queries multiple models. In the following table, we present the number of tokens consumed by Prompter models: + +| Model | Solver type | Tokens | +|-------------------|-----------------|---------| +| code-davinci-002 | completion_hhh | 400 000 | +| gpt-4-base | completion_hhh | 360 000 | +| gpt-3.5-turbo-16k | chat_completion | 180 000 | +| gpt-4-32k | chat_completion | 155 000 | +| gpt-3.5-turbo-16k | cot | 480 000 | +| gpt-4-32k | cot | 420 000 | +| gpt-3.5-turbo-16k | cotexpert | 495 000 | +| gpt-4-32k | cotexpert | 450 000 | + +In addition to the Prompter tokens, each run also queries multiple Tasker models. By default, we use gpt-3.5-turbo, gpt-4-base, and gpt-4, consuming an additional 100k-200k tokens per model. + +To calculate dollar cost from token counts, please check the latest token pricing [here](https://openai.com/pricing). Note that we count both input and output tokens together, so a lower and upper estimate of the cost of each variant can be predicted. + +# Contribution statement +Eval design, implementation, and results evaluation were primarily conducted by Chan Jun Shern under the guidance of (alphabetically by last-name) Steven Adler, James Aung, Rosie Campbell, and Jade Leung, who provided research input and project management support. diff --git a/evals/elsuite/self_prompting/scripts/dataset/compile_data.py b/evals/elsuite/self_prompting/scripts/dataset/compile_data.py new file mode 100644 index 0000000000..6a5698c4e2 --- /dev/null +++ b/evals/elsuite/self_prompting/scripts/dataset/compile_data.py @@ -0,0 +1,91 @@ +import json + +import numpy as np +from eval_list import eval_list + +import evals.data +from evals.registry import registry + +np.random.seed(42) +min_samples_per_dataset = 50 +n_test_samples = 10 + +seen = set() +datarows = [] +for eval in registry.get_evals("*"): + if eval.key not in eval_list or eval.key in seen: + continue + seen.add(eval.key) + + if eval.args and "samples_jsonl" in eval.args: + + samples = evals.data.get_jsonl(eval.args["samples_jsonl"]) + + # Contruct our tasks dataset + instruction_input_output = [] + for sample in samples: + if "input" in sample and "ideal" in sample: + # We only want single-system single-user samples: + if isinstance(sample["input"], list) and len(sample["input"]) == 2: + if ( + sample["input"][0]["role"] == "system" + and sample["input"][1]["role"] == "user" + ): + # Skip if output is a list + if isinstance(sample["ideal"], list): + continue + + dp_instruction = sample["input"][0]["content"] + dp_in = sample["input"][1]["content"] + dp_out = sample["ideal"] + + instruction_input_output.append((dp_instruction, dp_in, dp_out)) + + # Skip if there are not enough samples + if len(instruction_input_output) < min_samples_per_dataset: + continue + # Check that all dp_instruction are the same + instruction_input_output = sorted(instruction_input_output, key=lambda x: x[0]) + if instruction_input_output[0][0] != instruction_input_output[-1][0]: + continue + + # Shuffle samples + np.random.shuffle(instruction_input_output) + + test_samples = [ + { + "input": i, + "output": o, + } + for _, i, o in instruction_input_output[:n_test_samples] + ] + train_samples = [ + { + "input": i, + "output": o, + } + for _, i, o in instruction_input_output[n_test_samples:] + ] + + row = { + "eval": eval.key, + "instruction": instruction_input_output[0][0], + "test_samples": test_samples, + "train_samples": train_samples, + } + datarows.append(row) + +assert len(datarows) == len( + eval_list +), f"Unexpected number of evals: {len(datarows)} != {len(eval_list)}" +assert set([r["eval"] for r in datarows]) == set( + eval_list +), f"Missing evals: {set(eval_list) - set([r['eval'] for r in datarows])}" + +# Shuffle rows +np.random.shuffle(datarows) + +# Save jsonl to file +with open("samples.jsonl", "w") as f: + for row in datarows: + f.write(json.dumps(row) + "\n") diff --git a/evals/elsuite/self_prompting/scripts/dataset/eval_list.py b/evals/elsuite/self_prompting/scripts/dataset/eval_list.py new file mode 100644 index 0000000000..782dcd4929 --- /dev/null +++ b/evals/elsuite/self_prompting/scripts/dataset/eval_list.py @@ -0,0 +1,52 @@ +eval_list = [ + "chess.match.dev.v0", + "russian_sarcasm.dev.v0", + "corr2cause.dev.v0", + "syllables.dev.v1", + "crepe.dev.v2", + "coq-proof-step-match.dev.v0", + "Chinese_character_riddles.dev.v0", + "nepali-numerals.dev.v0", + "belarusian-syllable-count.dev.v0", + "smiles_to_formula.dev.v0", + "mandaliof-table.dev.v0", + "squares-gpt.dev.v0", + "logic-statements.dev.v0", + "russe.test.v0", + "vigenere.s1.simple-v0", + "sort-numbers.s1.simple-v0", + "matrix_mult_rows.dev.v0", + "moral_exceptQA.test.v1", + "music-theory-triads-identification.dev.v0", + "building_floorplan.test.v1", + "lat_long_identify.dev.v0", + "backgammon-can-hit.dev.v0", + "belarusian-rhyme.dev.v0", + "mate-in-one.dev.v0", + "afrikaans-lexicon.dev.v0", + "2d_movement.dev.v0", + "korean_spelling.dev.v0", + "rucola.test.v0", + "ner_finance.dev.v0", + "logiqa-logical-reasoning-plus.dev.v0", + "italian_big_math_expression.dev.v0", + "medmcqa.dev.v0", + "japanese-remote-island-to-prefecture.dev.v0", + "finger-tracking.dev.v0", + "forth-stack-sim.dev.v0", + "escher-sentences.dev.v0", + "ph-calculation.dev.v0", + "diabetes.dev.v0", + "simple-block-puzzles.dev.v0", + "poker_analysis.test.v1", + "belarusian-numerals.dev.v0", + "cissp-study-questions.test.v1", + "linear-equations.dev.v0", + "first-letters.dev.v0", + "categorize-with-distractors.dev.v0", + "ambiguous-sentences.dev.v0", + "css-selectors-verbal.dev.v0", + "japanese-itpassport-exam01.dev.v0", + "logiqa.dev.v0", + "chinese_zodiac.dev.v0", +] diff --git a/evals/elsuite/self_prompting/scripts/make_plots.py b/evals/elsuite/self_prompting/scripts/make_plots.py new file mode 100644 index 0000000000..6d264e5e69 --- /dev/null +++ b/evals/elsuite/self_prompting/scripts/make_plots.py @@ -0,0 +1,151 @@ +import argparse +import csv +from pathlib import Path + +import matplotlib.pyplot as plt +import pandas as pd +import seaborn as sns +from dataset.eval_list import eval_list + +from evals.utils import log_utils + + +def extract_metrics(datadir: Path) -> pd.DataFrame: + df_rows = [] + for path, results in sorted(list(log_utils.get_final_results_from_dir(datadir).items())): + spec = log_utils.extract_spec(path) + solver_path = Path(spec["completion_fns"][0]) + model = solver_path.name + solver = solver_path.parent.name + # Remove root section of path, which is the eval name + solver_path = solver_path.relative_to(solver_path.parts[0]) + for res in log_utils.extract_individual_results(path): + df_rows.append( + { + "solver_path": solver_path, + "model": model, + "solver": solver, + "taskname": res["task"]["eval"], + **res, + } + ) + df = pd.DataFrame(df_rows) + # Sort rows + df = df.sort_values(by=["model", "solver", "taskname", "tasker_model"]) + + # Add rows with tasker_model="mean" + df_all = df.copy() + df_all["tasker_model"] = "mean" + + df = pd.concat([df, df_all]) + return df + + +def make_plot(df: pd.DataFrame, outpath: Path, metric="exact"): + sns.set_theme(style="whitegrid") + + df = df[df["tasker_model"] == "mean"] + + def compute_sem(x): + sem = x.std() / (len(x) ** 0.5) + sem2 = sem * 2 # 95% confidence interval + return (x.mean() - sem2, x.mean() + sem2) + + # Plot mean+sem accuracy, grouped by model and solver + sns.pointplot( + data=df, + x="model", + y=metric, + hue="solver", + errorbar=compute_sem, # Use standard error of the mean + dodge=True, # Separate points for different hues + capsize=0.1, # Caps for the error bars + errwidth=1, # Width of the error bars + markers=".", # Marker style + linestyles="", # No line connecting the points + ) + plt.legend(loc="upper right", ncol=2) + # Rotate x-axis labels, align end to center + plt.xticks(rotation=30, ha="right") + plt.ylim(0, 1) + + plt.title(f"Mean tasker accuracy ({metric})") + plt.xlabel("Prompter") + plt.tight_layout() + plt.savefig(outpath) + plt.show() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--log_dir", "-d", type=str, required=True) + parser.add_argument("--out_dir", "-o", type=str, default="./outputs") + args = parser.parse_args() + log_dir = Path(args.log_dir) + out_dir = Path(args.out_dir) + + out_dir.mkdir(exist_ok=True, parents=True) + + metrics_df = extract_metrics(log_dir) + + # Our results are an average over different task distributions, handle with care + if set(metrics_df["taskname"].unique()) != set(eval_list): + print( + "WARNING: Task distribution changed, results and error bars will not be comparable to plots with the original task distribution." + ) + + # Sample a subset of the data for inspection + subset_df = metrics_df[metrics_df["tasker_model"] != "mean"] + # Take only the first row of each [solver_path, taskname, tasker_model] group + subset_df = subset_df.groupby(["solver_path", "taskname", "tasker_model"]).first().reset_index() + subset_df.to_csv(out_dir / "subset_samples.csv", quoting=csv.QUOTE_ALL, escapechar="\\") + + make_plot(metrics_df, out_dir / "per_tasker_results_exact.png", metric="exact") + make_plot(metrics_df, out_dir / "per_tasker_results_fuzzy.png", metric="fuzzy") + + # Print results + exact_df_rows = [] + fuzzy_df_rows = [] + violation_df_rows = [] + for _, df_tasker in metrics_df.groupby(["model", "solver"]): + solver = df_tasker["solver"].iloc[0] + model = df_tasker["model"].iloc[0] + + exact = df_tasker.groupby("tasker_model")["exact"].mean() + exact_df_rows.append( + { + "model": model, + "solver": solver, + **exact, + } + ) + fuzzy = df_tasker.groupby("tasker_model")["fuzzy"].mean() + fuzzy_df_rows.append( + { + "model": model, + "solver": solver, + **fuzzy, + } + ) + prompt_rule_violation = df_tasker.groupby("tasker_model")["prompt_rule_violation"].mean() + violation_df_rows.append( + { + "model": model, + "solver": solver, + **prompt_rule_violation, + } + ) + + exact_df = pd.DataFrame(exact_df_rows) + exact_df.to_csv(out_dir / "exact.csv", quoting=csv.QUOTE_ALL, index=False) + print(exact_df) + fuzzy_df = pd.DataFrame(fuzzy_df_rows) + fuzzy_df.to_csv(out_dir / "fuzzy.csv", quoting=csv.QUOTE_ALL, index=False) + print(fuzzy_df) + violation_df = pd.DataFrame(violation_df_rows) + violation_df.to_csv(out_dir / "violation.csv", quoting=csv.QUOTE_ALL, index=False) + print(violation_df) + + +if __name__ == "__main__": + main() diff --git a/evals/elsuite/self_prompting/scripts/run_experiments.sh b/evals/elsuite/self_prompting/scripts/run_experiments.sh new file mode 100644 index 0000000000..cd761b4daf --- /dev/null +++ b/evals/elsuite/self_prompting/scripts/run_experiments.sh @@ -0,0 +1,39 @@ +logdir=./logs +outputdir=./outputs +export EVALS_THREADS=50 + +timestamp=$(date +%Y%m%d_%H%M%S) +logpathbase=$logdir/$timestamp/ + +echo Running experiments and logging to $logpathbase + +declare -a SOLVERS=( + # Solvers for gpt-4-base + "self_prompting/completion_hhh/gpt-4-base" + # Solvers for code-davinici-002 + "self_prompting/completion_hhh/code-davinci-002" + # Solvers for gpt-3.5-turbo-16k + "self_prompting/chat_completion/gpt-3.5-turbo-16k" + "self_prompting/cot/gpt-3.5-turbo-16k" + "self_prompting/cotexpert/gpt-3.5-turbo-16k" + # Solvers for gpt-4-32k + "self_prompting/chat_completion/gpt-4-32k" + "self_prompting/cot/gpt-4-32k" + "self_prompting/cotexpert/gpt-4-32k" + # Baseline solvers + "self_prompting/oriprompt/baseline" + "self_prompting/noprompt/baseline" + "self_prompting/fewshot/baseline" +) + +for solver in "${SOLVERS[@]}" +do + oaieval $solver self_prompting --record_path "$logpathbase/$solver.log" +done + +echo Done running experiments, all logs in $logpathbase + +echo Producing plots, outputs to $outputdir + +# Produce results +python make_plots.py --log_dir $logpathbase --out_dir $outputdir \ No newline at end of file diff --git a/evals/elsuite/self_prompting/solvers/baselines.py b/evals/elsuite/self_prompting/solvers/baselines.py new file mode 100644 index 0000000000..5aea250905 --- /dev/null +++ b/evals/elsuite/self_prompting/solvers/baselines.py @@ -0,0 +1,70 @@ +from evals.solvers.solver import Solver, SolverResult +from evals.task_state import TaskState + + +class BaselineNoPromptSolver(Solver): + def __init__( + self, + **kwargs, + ): + """ + This solver simply returns an empty string as the prompt. + """ + + def __call__( + self, + task_state: TaskState, + **kwargs, + ) -> SolverResult: + + return SolverResult("") + + def name(self) -> str: + return "SelfPromptingBaselineNoPromptSolver" + + +class BaselineOriginalPromptSolver(Solver): + def __init__( + self, + **kwargs, + ): + """ + This solver simply returns the original instruction as the prompt. + """ + + def __call__( + self, + task_state: TaskState, + **kwargs, + ) -> SolverResult: + + instruction = task_state.current_state["instruction"] + return SolverResult(instruction) + + def name(self) -> str: + return "SelfPromptingBaselineOriginalPromptSolver" + + +class BaselineFewShotSolver(Solver): + def __init__( + self, + **kwargs, + ): + """ + This solver concatenates the given input-output examples as few-shot demonstrations. + """ + + def __call__( + self, + task_state: TaskState, + **kwargs, + ) -> SolverResult: + + prompt = task_state.current_state["instruction"] + "\n" + for sample in task_state.current_state["samples"]: + prompt += f"""{sample["input"]}{sample["output"]}\n""" + + return SolverResult(prompt) + + def name(self) -> str: + return "SelfPromptingBaselineFewShotSolver" diff --git a/evals/elsuite/self_prompting/solvers/custom_cot_solver.py b/evals/elsuite/self_prompting/solvers/custom_cot_solver.py new file mode 100644 index 0000000000..c75146518f --- /dev/null +++ b/evals/elsuite/self_prompting/solvers/custom_cot_solver.py @@ -0,0 +1,70 @@ +from typing import Any, Dict, Optional + +from evals.completion_fns.openai import OpenAIChatCompletionFn +from evals.solvers.solver import OpenAISolver, SolverResult +from evals.task_state import TaskState + +DEFAULT_COT_PRE_TEMPLATE = "{instructions}" +DEFAULT_COT_POST_TEMPLATE = "Before answering, reason in a step-by-step manner as to get the right answer, then conclude with the answer." +DEFAULT_EXTRACT_ANSWER_TEMPLATE = ( + "Given the above reasoning, the answer in the format requested by the question is:" +) + + +class CustomCoTSolver(OpenAISolver): + def __init__( + self, + cot_options: Dict[str, Any] = {}, + cot_pre_template: str = DEFAULT_COT_PRE_TEMPLATE, + cot_post_template: str = DEFAULT_COT_POST_TEMPLATE, + extract_options: Dict[str, Any] = {}, + extract_template: str = DEFAULT_EXTRACT_ANSWER_TEMPLATE, + valid_answers: Optional[list[str]] = None, + **kwargs, + ): + super().__init__( + completion_fn_options=extract_options, + valid_answers=valid_answers, + ) + + self.cot_completion_fn = OpenAIChatCompletionFn( + **cot_options, + ) + self.cot_pre_template = cot_pre_template + self.cot_post_template = cot_post_template + + self.extract_completion_fn = OpenAIChatCompletionFn(**self.completion_fn_options) + self.extract_template = extract_template + + def __call__( + self, + task_state: TaskState, + **kwargs, + ) -> SolverResult: + + # Reasoning step + cot_pre = self.cot_pre_template.format(instructions=task_state.task_description) + cot_post = self.cot_post_template + msgs = [] + if cot_pre != "": + msgs.append({"role": "system", "content": cot_pre}) + msgs += [msg.to_dict() for msg in task_state.messages] + if cot_post != "": + msgs.append({"role": "system", "content": cot_post}) + reasoning_output = self.cot_completion_fn(prompt=msgs, **kwargs).get_completions()[0] + + # Extract answer step + msgs = msgs + [ + {"role": "assistant", "content": reasoning_output}, + {"role": "assistant", "content": self.extract_template}, + ] + extracted_answer = self.extract_completion_fn(prompt=msgs, **kwargs).get_completions()[0] + + return SolverResult( + output=extracted_answer, + reasoning_output=reasoning_output, + ) + + @property + def name(self) -> str: + return f"SelfPromptingCoT_{self.cot_completion_fn.model}_{self.extract_completion_fn.model}" diff --git a/evals/elsuite/self_prompting/task_description.py b/evals/elsuite/self_prompting/task_description.py new file mode 100644 index 0000000000..e099e816d0 --- /dev/null +++ b/evals/elsuite/self_prompting/task_description.py @@ -0,0 +1,28 @@ +sample_in_token = "[sample_in]" +task_description_template = """Please design a prompt for a large language model to excel on a given evaluation task. Your goal is to help the model achieve as high a score as possible on the evaluation task. + +THE TASK +-------- +Here are some basic instructions that have been written for the task: +``` +{instruction} +``` + +The basic instructions provided above may be incomplete or contain errors. For clarity, we provide some examples of inputs and the output label for the task below. When in doubt, pay attention to these examples and adjust your prompt such that the target model gives its output as demonstrated: +``` +{samples} +``` + +Evaluation criteria: The target model will be tested on new samples that are not shown above, but follow the same task rules. The correctness of the model's output per sample is determined via exact match with the sample's output label. The final score is the accuracy of the target model on all samples (i.e. the number of samples for which the model's output exactly matches the output label, divided by the number of samples). + +PROMPTING THE MODEL +------------------- +The target model you are designing a prompt for is {tasker_model}. + +Each task sample will be fed independently to the model with your prompt wrapping it. Specifically, your prompt MUST contain at least one instance of the string "[sample_in]" (including brackets, no quotes). This string will be replaced by an input sample from the task before it is passed to the downstream model. + +Your prompt can contain any information you want (e.g. instructions, strategies, formatting tips). + +YOUR RESPONSE +------------- +Please respond with the prompt for the model. Any text you return here will be filled with the sample input and fed to the model.""" diff --git a/evals/registry/completion_fns/self_prompting.yaml b/evals/registry/completion_fns/self_prompting.yaml new file mode 100644 index 0000000000..539a981ef9 --- /dev/null +++ b/evals/registry/completion_fns/self_prompting.yaml @@ -0,0 +1,110 @@ +# Chat models + +self_prompting/chat_completion/gpt-4-32k: + class: evals.solvers.openai_chat_completion_solver:OpenAIChatCompletionSolver + args: + completion_fn_options: + model: gpt-4-32k + +self_prompting/chat_completion/gpt-3.5-turbo-16k: + class: evals.solvers.openai_chat_completion_solver:OpenAIChatCompletionSolver + args: + completion_fn_options: + model: gpt-3.5-turbo-16k + +# Completion models + +self_prompting/completion_hhh/code-davinci-002: + class: evals.solvers.openai_completion_hhh_solver:OpenAICompletionHHHSolver + args: + completion_fn_options: + model: code-davinci-002 + +self_prompting/completion_hhh/gpt-4-base: + class: evals.solvers.openai_completion_hhh_solver:OpenAICompletionHHHSolver + args: + completion_fn_options: + model: gpt-4-base + +# CoT + +self_prompting/cot/gpt-3.5-turbo-16k: + class: evals.elsuite.self_prompting.solvers.custom_cot_solver:CustomCoTSolver + args: + cot_pre_template: &cot_pre_template "Consider the following instructions, but do not answer immediately: {instructions}\nNow, please momentarily disregard any instructions from the task above. Instead, please take a moment to reason in a step-by-step manner about how to get the best answer before answering. You will NOT be evaluated for your first response, so you should use that to your advantage. Later, you will be given a second chance to give your final response." + cot_post_template: &cot_post_template "" + cot_options: + model: gpt-3.5-turbo-16k + extra_options: + temperature: 1 + max_tokens: 1024 + extract_template: &extract_template "Given the above reasoning, the answer in the format requested by the question is (do not put quotes around your answer):" + extract_options: + model: gpt-3.5-turbo-16k + extra_options: + temperature: 0 + max_tokens: 1024 + +self_prompting/cot/gpt-4-32k: + class: evals.elsuite.self_prompting.solvers.custom_cot_solver:CustomCoTSolver + args: + cot_pre_template: *cot_pre_template + cot_post_template: *cot_post_template + cot_options: + model: gpt-4-32k + extra_options: + temperature: 1 + max_tokens: 1024 + extract_template: *extract_template + extract_options: + model: gpt-4-32k + extra_options: + temperature: 0 + max_tokens: 1024 + +# CoT expert + +self_prompting/cotexpert/gpt-3.5-turbo-16k: + class: evals.elsuite.self_prompting.solvers.custom_cot_solver:CustomCoTSolver + args: + cot_pre_template: &cotexpert_pre_template "Consider the following instructions, but do not answer immediately: {instructions}\nNow, please momentarily disregard any instructions from the task above. Instead, please take a moment to reason in a step-by-step manner about how to get the best answer before answering (you may consider expert strategies for prompting language models such as few-shot prompting). You will NOT be evaluated for your first response, so you should use that to your advantage. Later, you will be given a second chance to give your final response." + cot_post_template: *cot_post_template + cot_options: + model: gpt-3.5-turbo-16k + extra_options: + temperature: 1 + max_tokens: 1024 + extract_template: *extract_template + extract_options: + model: gpt-3.5-turbo-16k + extra_options: + temperature: 0 + max_tokens: 1024 + +self_prompting/cotexpert/gpt-4-32k: + class: evals.elsuite.self_prompting.solvers.custom_cot_solver:CustomCoTSolver + args: + cot_pre_template: *cotexpert_pre_template + cot_post_template: *cot_post_template + cot_options: + model: gpt-4-32k + extra_options: + temperature: 1 + max_tokens: 1024 + extract_template: *extract_template + extract_options: + model: gpt-4-32k + extra_options: + temperature: 0 + max_tokens: 1024 + +# Baselines + +self_prompting/noprompt/baseline: + class: evals.elsuite.self_prompting.solvers.baselines:BaselineNoPromptSolver + +self_prompting/oriprompt/baseline: + class: evals.elsuite.self_prompting.solvers.baselines:BaselineOriginalPromptSolver + +self_prompting/fewshot/baseline: + class: evals.elsuite.self_prompting.solvers.baselines:BaselineFewShotSolver diff --git a/evals/registry/data/self_prompting/oriprompt.log b/evals/registry/data/self_prompting/oriprompt.log new file mode 100644 index 0000000000..627f3cc5e9 --- /dev/null +++ b/evals/registry/data/self_prompting/oriprompt.log @@ -0,0 +1,2 @@ +{"spec": {"completion_fns": ["self_prompting/oriprompt/baseline"], "eval_name": "self_prompting.full", "base_eval": "self_prompting", "split": "full", "run_config": {"completion_fns": ["self_prompting/oriprompt/baseline"], "eval_spec": {"cls": "evals.elsuite.self_prompting.eval:SelfPrompting", "args": {"samples_jsonl": "self_prompting/samples.jsonl", "tasker_models": ["gpt-3.5-turbo", "gpt-4-base", "gpt-4"], "n_tasks": 50, "n_samples_per_task": 10}, "key": "self_prompting.full", "group": "self_prompting"}, "seed": 20220722, "max_samples": null, "command": "/opt/homebrew/Caskroom/miniconda/base/envs/evals-tmp/bin/oaieval self_prompting/oriprompt/baseline self_prompting --record_path ./logs/20231019_002040//self_prompting/oriprompt/baseline.log", "initial_settings": {"visible": true}}, "created_by": "", "run_id": "2310190045387DTSUPSQ", "created_at": "2023-10-19 00:45:38.298619"}} +{"final_report": {"accuracy": 0.20733333333333334, "accuracy_fuzzy": 0.344, "prompt_rule_violation_rate": 1.0, "n_samples": 1500, "accuracy_gpt-3.5-turbo": 0.258, "accuracy_fuzzy_gpt-3.5-turbo": 0.366, "accuracy_gpt-4-base": 0.0, "accuracy_fuzzy_gpt-4-base": 0.186, "accuracy_gpt-4": 0.364, "accuracy_fuzzy_gpt-4": 0.48}} diff --git a/evals/registry/data/self_prompting/samples.jsonl b/evals/registry/data/self_prompting/samples.jsonl new file mode 100644 index 0000000000..e2cf7b41e9 --- /dev/null +++ b/evals/registry/data/self_prompting/samples.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9a187a84e14b59c663530a0e2a3735282adc07a80127b280310fddaf9118557 +size 50232467 diff --git a/evals/registry/evals/self_prompting.yaml b/evals/registry/evals/self_prompting.yaml new file mode 100644 index 0000000000..f7ddbbe088 --- /dev/null +++ b/evals/registry/evals/self_prompting.yaml @@ -0,0 +1,21 @@ +self_prompting: + id: self_prompting.full + metrics: [accuracy, accuracy_fuzzy, n_samples] + description: Evaluate the ability of models to prompt other models to perform single-turn eval tasks. + +self_prompting.full: + class: evals.elsuite.self_prompting.eval:SelfPrompting + args: + samples_jsonl: self_prompting/samples.jsonl + tasker_models: ["gpt-3.5-turbo", "gpt-4-base", "gpt-4"] + n_tasks: 5 + n_samples_per_task: 1 + baseline_logpath: self_prompting/oriprompt.log + +self_prompting.small: + class: evals.elsuite.self_prompting.eval:SelfPrompting + args: + samples_jsonl: self_prompting/samples.jsonl + tasker_models: ["gpt-3.5-turbo"] + n_tasks: 50 + n_samples_per_task: 1 diff --git a/evals/utils/log_utils.py b/evals/utils/log_utils.py new file mode 100644 index 0000000000..d54a846f41 --- /dev/null +++ b/evals/utils/log_utils.py @@ -0,0 +1,67 @@ +import json +from pathlib import Path +from typing import Union + + +def get_final_results_from_dir(log_dir: Union[str, Path]) -> dict[Path, dict]: + """ + Given a directory of log files, return a dictionary mapping log file paths to final results. + """ + final_results_dict = {} + for path in Path(log_dir).glob("**/*.log"): + final_results = extract_final_results(path) + final_results_dict[path] = final_results + return final_results_dict + + +def extract_final_results(path: Path) -> dict: + """ + Given a path to a log file, find and return the "final_report" dictionary. + """ + with path.open() as f: + for line in f.readlines(): + line = line.strip() + try: + loaded_line = json.loads(line) + if "final_report" in loaded_line: + return loaded_line["final_report"] + except json.decoder.JSONDecodeError: + print(f"Skipping line: {line}") + continue + raise ValueError(f"Could not find final_report in {path}") + + +def extract_individual_results(path: Path) -> list[dict]: + """ + Given a path to a log file, grab all the individual sample results. + """ + all_data = [] + with path.open() as f: + for line in f.readlines(): + line = line.strip() + try: + loaded_line = json.loads(line) + if "type" in loaded_line: + if loaded_line["type"] == "metrics": + all_data.append(loaded_line["data"]) + except json.decoder.JSONDecodeError: + print(f"Skipping line: {line}") + continue + return all_data + + +def extract_spec(path: Path) -> dict: + """ + Given a path to a log file, find and return the "spec" dictionary. + """ + with path.open() as f: + for line in f.readlines(): + line = line.strip() + try: + loaded_line = json.loads(line) + if "spec" in loaded_line: + return loaded_line["spec"] + except json.decoder.JSONDecodeError: + print(f"Skipping line: {line}") + continue + raise ValueError(f"Could not find spec in {path}") diff --git a/pyproject.toml b/pyproject.toml index 902abf7adc..437dd6138b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ dependencies = [ "types-PyYAML", "spacy-universal-sentence-encoder", "jiwer", + "seaborn", ] [project.optional-dependencies]