Bases: Optimizer
Select randomly the best examples to populate the LM's prompt to make it
learn using Few Shot Learning.
Example:
import synalinks
import asyncio
async def main():
# ... your program definition
program.compile(
reward=synalinks.rewards.ExactMatch(),
optimizer=synalinks.optimizers.RandomFewShot(
k=3, # The number of examples to provide to the prompt
k_best=10, # The number of best examples to select from
),
)
history = await program.fit(...)
References
Parameters:
Name |
Type |
Description |
Default |
k
|
int
|
The number of examples to select (default 3) among the best predictions.
|
3
|
k_best
|
int
|
The max number of best predictions to select from (default 10).
|
10
|
Source code in synalinks/src/optimizers/random_few_shot.py
| @synalinks_export("synalinks.optimizers.RandomFewShot")
class RandomFewShot(Optimizer):
"""Select randomly the best examples to populate the LM's prompt to make it
learn using Few Shot Learning.
Example:
```python
import synalinks
import asyncio
async def main():
# ... your program definition
program.compile(
reward=synalinks.rewards.ExactMatch(),
optimizer=synalinks.optimizers.RandomFewShot(
k=3, # The number of examples to provide to the prompt
k_best=10, # The number of best examples to select from
),
)
history = await program.fit(...)
```
References:
- [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165)
Args:
k (int): The number of examples to select (default 3) among the best predictions.
k_best (int): The max number of best predictions to select from (default 10).
"""
def __init__(
self,
k=3,
k_best=10,
name=None,
description=None,
):
super().__init__(
name=name,
description=description,
data_model=FewShotOptimizedVariable,
)
self.k = k
self.k_best = k_best
def build(self, variables):
self.built = True
async def optimize(self, trainable_variable, reward=None):
"""Perform a backprop/optimization on a single variable."""
# Reward backpropagation
predictions = trainable_variable.get("predictions")
backpropagated_predictions = []
for p in predictions:
if p["reward"] is None:
p["reward"] = reward
backpropagated_predictions.append(p)
trainable_variable.update({"predictions": backpropagated_predictions})
# Get the k best predictions (sorted by reward)
sorted_predictions = sorted(
backpropagated_predictions,
key=lambda x: x["reward"] if x["reward"] is not None else float("-inf"),
reverse=True,
)
top_k_predictions = sorted_predictions[: self.k_best]
if len(top_k_predictions) > self.k:
selected_predictions = random.sample(top_k_predictions, self.k)
else:
selected_predictions = top_k_predictions
trainable_variable.update({"examples": selected_predictions})
async def finalize(self, trainable_variable):
"""Finalize the optimization of a single variable (cleanup/scaling etc.)."""
trainable_variable.update({"predictions": []})
def get_config(self):
return {
"k": self.k,
"k_best": self.k_best,
"name": self.name,
"description": self.description,
}
|
finalize(trainable_variable)
async
Finalize the optimization of a single variable (cleanup/scaling etc.).
Source code in synalinks/src/optimizers/random_few_shot.py
| async def finalize(self, trainable_variable):
"""Finalize the optimization of a single variable (cleanup/scaling etc.)."""
trainable_variable.update({"predictions": []})
|
optimize(trainable_variable, reward=None)
async
Perform a backprop/optimization on a single variable.
Source code in synalinks/src/optimizers/random_few_shot.py
| async def optimize(self, trainable_variable, reward=None):
"""Perform a backprop/optimization on a single variable."""
# Reward backpropagation
predictions = trainable_variable.get("predictions")
backpropagated_predictions = []
for p in predictions:
if p["reward"] is None:
p["reward"] = reward
backpropagated_predictions.append(p)
trainable_variable.update({"predictions": backpropagated_predictions})
# Get the k best predictions (sorted by reward)
sorted_predictions = sorted(
backpropagated_predictions,
key=lambda x: x["reward"] if x["reward"] is not None else float("-inf"),
reverse=True,
)
top_k_predictions = sorted_predictions[: self.k_best]
if len(top_k_predictions) > self.k:
selected_predictions = random.sample(top_k_predictions, self.k)
else:
selected_predictions = top_k_predictions
trainable_variable.update({"examples": selected_predictions})
|