Spaces:
Runtime error
Runtime error
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """TODO: Add a description here.""" | |
| from typing import List | |
| import evaluate | |
| import datasets | |
| # TODO: Add BibTeX citation | |
| _CITATION = """\ | |
| @InProceedings{huggingface:module, | |
| title = {A great new module}, | |
| authors={huggingface, Inc.}, | |
| year={2020} | |
| } | |
| """ | |
| # TODO: Add description of the module here | |
| _DESCRIPTION = """\ | |
| This new module is designed to solve this great ML task and is crafted with a lot of care. | |
| """ | |
| # TODO: Add description of the arguments of the module here | |
| _KWARGS_DESCRIPTION = """ | |
| Calculates how good are predictions given some references, using certain scores | |
| Args: | |
| predictions: list of predictions to score. Each predictions | |
| should be a string with tokens separated by spaces. | |
| references: list of reference for each prediction. Each | |
| reference should be a string with tokens separated by spaces. | |
| Returns: | |
| accuracy: description of the first score, | |
| another_score: description of the second score, | |
| Examples: | |
| Examples should be written in doctest format, and should illustrate how | |
| to use the function. | |
| >>> my_new_module = evaluate.load("my_new_module") | |
| >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
| >>> print(results) | |
| {'accuracy': 1.0} | |
| """ | |
| def calculate_precision( | |
| predictions: List[List[str]], | |
| reference: List[List[str]] | |
| ) -> float: | |
| precision = 0 | |
| count = 0 | |
| for i, d in enumerate(reference): | |
| if len(d) == 0: | |
| continue | |
| predicted_titles = predictions[i] | |
| hits = 0 | |
| for title in predicted_titles: | |
| if title in d: | |
| hits += 1 | |
| if len(predicted_titles) != 0: | |
| precision += hits / len(predicted_titles) | |
| count += 1 | |
| return precision / count | |
| def calculate_recall( | |
| predictions: List[List[str]], | |
| reference: List[List[str]] | |
| ) -> float: | |
| recall = 0 | |
| count = 0 | |
| for i, d in enumerate(reference): | |
| if len(d) == 0: | |
| continue | |
| predicted_titles = predictions[i] | |
| hits = 0 | |
| for title in predicted_titles: | |
| if title in d: | |
| hits += 1 | |
| recall += hits / len(d) | |
| count += 1 | |
| return recall / count | |
| beta = 0.7 | |
| class DocRetrieveMetrics(evaluate.Metric): | |
| """TODO: Short description of my evaluation module.""" | |
| def _info(self): | |
| # TODO: Specifies the evaluate.EvaluationModuleInfo object | |
| return evaluate.MetricInfo( | |
| # This is the description that will appear on the modules page. | |
| module_type="metric", | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| inputs_description=_KWARGS_DESCRIPTION, | |
| # This defines the format of each prediction and reference | |
| features=datasets.Features({ | |
| "predictions": datasets.Sequence(datasets.Value("string")), | |
| "references": datasets.Sequence(datasets.Value("string")), | |
| }), | |
| # Homepage of the module for documentation | |
| homepage="http://module.homepage", | |
| # Additional links to the codebase or references | |
| codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
| reference_urls=["http://path.to.reference.url/new_module"] | |
| ) | |
| def _download_and_prepare(self, dl_manager): | |
| """Optional: download external resources useful to compute the scores""" | |
| # TODO: Download external resources if needed | |
| pass | |
| def _compute(self, predictions, references): | |
| recall = calculate_recall(predictions, references) | |
| precision = calculate_precision(predictions, references) | |
| f_score = (1 + beta*beta) * precision * recall / (beta * beta*precision + recall) | |
| return { | |
| "f1": float( | |
| f_score | |
| ) | |
| } | |