Skip to content

IFEval

get_input_data_model()

Returns IFEval input data model.

Source code in synalinks/src/datasets/built_in/ifeval.py
@synalinks_export("synalinks.datasets.ifeval.get_input_data_model")
def get_input_data_model():
    """Returns IFEval input data model."""
    return IFEvalQuestion

get_output_data_model()

Returns IFEval output data model.

Source code in synalinks/src/datasets/built_in/ifeval.py
@synalinks_export("synalinks.datasets.ifeval.get_output_data_model")
def get_output_data_model():
    """Returns IFEval output data model."""
    return IFEvalAnswer

iterable_dataset(repeat=1, batch_size=1, limit=None, split='train')

Streaming dataset for RL-style training.

Returns:

Type Description
HuggingFaceDataset

A streaming, iterable dataset.

Source code in synalinks/src/datasets/built_in/ifeval.py
@synalinks_export("synalinks.datasets.ifeval.iterable_dataset")
def iterable_dataset(repeat=1, batch_size=1, limit=None, split="train"):
    """
    Streaming dataset for RL-style training.

    Returns:
        (HuggingFaceDataset): A streaming, iterable dataset.
    """
    return HuggingFaceDataset(
        path="google/IFEval",
        split=split,
        streaming=True,
        input_data_model=IFEvalQuestion,
        input_template=_INPUT_TEMPLATE,
        output_data_model=IFEvalAnswer,
        output_template=_OUTPUT_TEMPLATE,
        batch_size=batch_size,
        limit=limit,
        repeat=repeat,
    )

load_data(validation_split=0.2)

Load IFEval (Instruction-Following Eval).

HF ships only a train split (~541 prompts), so we split it deterministically into train / test. The benchmark is rule-based; the gold response is the prompt itself, intended to be used with an LM-as-judge reward.

Parameters:

Name Type Description Default
validation_split float

Fraction held out for evaluation (default 0.2).

0.2

Returns:

Type Description
tuple

(x_train, y_train), (x_test, y_test).

Source code in synalinks/src/datasets/built_in/ifeval.py
@synalinks_export("synalinks.datasets.ifeval.load_data")
def load_data(validation_split=0.2):
    """
    Load IFEval (Instruction-Following Eval).

    HF ships only a ``train`` split (~541 prompts), so we split it
    deterministically into train / test. The benchmark is rule-based;
    the gold ``response`` is the prompt itself, intended to be used
    with an LM-as-judge reward.

    Args:
        validation_split (float): Fraction held out for evaluation
            (default ``0.2``).

    Returns:
        (tuple): ``(x_train, y_train), (x_test, y_test)``.
    """
    x, y = load_split(
        path="google/IFEval",
        split="train",
        input_data_model=IFEvalQuestion,
        input_template=_INPUT_TEMPLATE,
        output_data_model=IFEvalAnswer,
        output_template=_OUTPUT_TEMPLATE,
    )
    return split_train_test(x, y, validation_split=validation_split)