Datasets:

Modalities:
Text
Formats:
json
Languages:
Catalan
ArXiv:
Libraries:
Datasets
pandas
License:
inigopm commited on
Commit
c66feb3
1 Parent(s): d06e03b

Data visualization fixed

Browse files
Files changed (4) hide show
  1. OLD/casum.py +86 -0
  2. data/test.jsonl +3 -0
  3. data/train.jsonl +3 -0
  4. data/valid.jsonl +3 -0
OLD/casum.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the CaSum dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """@misc{degibert2022sequencetosequence,
8
+ title={Sequence-to-Sequence Resources for Catalan},
9
+ author={Ona de Gibert and Ksenia Kharitonova and Blanca Calvo Figueras and Jordi Armengol-Estapé and Maite Melero},
10
+ year={2022},
11
+ eprint={2202.06871},
12
+ archivePrefix={arXiv},
13
+ primaryClass={cs.CL}
14
+ }"""
15
+
16
+ _DESCRIPTION = """CaSum is a summarization dataset. It is extracted from a newswire corpus crawled from the Catalan News Agency. The corpus consists of 217,735 instances that are composed by the headline and the body.
17
+ """
18
+
19
+ _HOMEPAGE = """https://github.com/TeMU-BSC/seq-to-seq-catalan"""
20
+
21
+ _URL = "https://huggingface.co/datasets/projecte-aina/casum/resolve/main/"
22
+ _TRAIN_FILE = "train.jsonl"
23
+ _VALID_FILE = "valid.jsonl"
24
+ _TEST_FILE = "test.jsonl"
25
+
26
+ class CaSumConfig(datasets.BuilderConfig):
27
+ """ Builder config for the CaSum dataset """
28
+
29
+ def __init__(self, **kwargs):
30
+ """BuilderConfig for CaSum.
31
+ Args:
32
+ **kwargs: keyword arguments forwarded to super.
33
+ """
34
+ super(CaSumConfig, self).__init__(**kwargs)
35
+
36
+
37
+ class CaSum(datasets.GeneratorBasedBuilder):
38
+ """CaSum Dataset."""
39
+
40
+ BUILDER_CONFIGS = [
41
+ CaSumConfig(
42
+ name="CaSum",
43
+ version=datasets.Version("1.0.0"),
44
+ description="CaSum dataset"
45
+ ),
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "summary": datasets.Value("string"),
54
+ "text": datasets.Value("string")
55
+ }
56
+
57
+ ),
58
+ supervised_keys=None,
59
+ homepage=_HOMEPAGE,
60
+ citation=_CITATION
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ """Returns SplitGenerators."""
65
+ urls_to_download = {
66
+ "train": f"{_URL}{_TRAIN_FILE}",
67
+ "valid": f"{_URL}{_VALID_FILE}",
68
+ "test": f"{_URL}{_TEST_FILE}"
69
+ }
70
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
71
+
72
+ return [
73
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
74
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
75
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
76
+ ]
77
+
78
+ def _generate_examples(self, filepath):
79
+ """This function returns the examples in the raw (text) form."""
80
+ logger.info("generating examples from = %s", filepath)
81
+ with open(filepath) as f:
82
+ for id_, row in enumerate(f):
83
+ article = json.loads(row)
84
+ text = article['text']
85
+ summary = article['summary']
86
+ yield id_, { "summary": summary,"text": text}
data/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89ab6ba4a414c5eeb3bb12984930a8c1572fb9a45a3180635215503f96732fe
3
+ size 22414860
data/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2f41efa8ea790d57e3aa4dbaf786aba5589435e8fc8491d7511df975c5ca01e
3
+ size 442268535
data/valid.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb210735172c0d09e6321c6fa5cab9dc12fd5ffa2b254c5916c70b287b8c9e03
3
+ size 22387837