Skip to content

LAMBADA

get_input_data_model()

Returns LAMBADA input data model.

Source code in synalinks/src/datasets/built_in/lambada.py
@synalinks_export("synalinks.datasets.lambada.get_input_data_model")
def get_input_data_model():
    """Returns LAMBADA input data model."""
    return LAMBADAQuestion

get_output_data_model()

Returns LAMBADA output data model.

Source code in synalinks/src/datasets/built_in/lambada.py
@synalinks_export("synalinks.datasets.lambada.get_output_data_model")
def get_output_data_model():
    """Returns LAMBADA output data model."""
    return LAMBADAAnswer

iterable_dataset(repeat=1, batch_size=1, limit=None, split='test')

Streaming dataset for RL-style training.

Returns:

Type Description
HuggingFaceDataset

A streaming, iterable dataset.

Source code in synalinks/src/datasets/built_in/lambada.py
@synalinks_export("synalinks.datasets.lambada.iterable_dataset")
def iterable_dataset(repeat=1, batch_size=1, limit=None, split="test"):
    """
    Streaming dataset for RL-style training.

    Returns:
        (HuggingFaceDataset): A streaming, iterable dataset.
    """
    return HuggingFaceDataset(
        path="EleutherAI/lambada_openai",
        name="en",
        split=split,
        streaming=True,
        input_data_model=LAMBADAQuestion,
        input_template=_INPUT_TEMPLATE,
        output_data_model=LAMBADAAnswer,
        output_template=_OUTPUT_TEMPLATE,
        batch_size=batch_size,
        limit=limit,
        repeat=repeat,
    )

load_data(validation_split=0.2)

Load LAMBADA (OpenAI variant).

HF ships only a test split (~5k passages), so we split it deterministically into train / test.

Parameters:

Name Type Description Default
validation_split float

Fraction held out for evaluation (default 0.2).

0.2

Returns:

Type Description
tuple

(x_train, y_train), (x_test, y_test).

Source code in synalinks/src/datasets/built_in/lambada.py
@synalinks_export("synalinks.datasets.lambada.load_data")
def load_data(validation_split=0.2):
    """
    Load LAMBADA (OpenAI variant).

    HF ships only a ``test`` split (~5k passages), so we split it
    deterministically into train / test.

    Args:
        validation_split (float): Fraction held out for evaluation
            (default ``0.2``).

    Returns:
        (tuple): ``(x_train, y_train), (x_test, y_test)``.
    """
    x, y = load_split(
        path="EleutherAI/lambada_openai",
        name="en",
        split="test",
        input_data_model=LAMBADAQuestion,
        input_template=_INPUT_TEMPLATE,
        output_data_model=LAMBADAAnswer,
        output_template=_OUTPUT_TEMPLATE,
    )
    return split_train_test(x, y, validation_split=validation_split)