<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-4" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.5/metadata.xsd">
  <identifier identifierType="DOI">10.7910/DVN/BZ1RWS</identifier>
  <creators>
    <creator>
      <creatorName nameType="Personal">Rozado, David</creatorName>
      <givenName>David</givenName>
      <familyName>Rozado</familyName>
      <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="https://orcid.org">https://orcid.org/0000-0001-6849-4746</nameIdentifier>
      <affiliation>CSIRO</affiliation>
    </creator>
  </creators>
  <titles>
    <title>word2vec model trained on universities corpus</title>
  </titles>
  <publisher>Harvard Dataverse</publisher>
  <publicationYear>2019</publicationYear>
  <subjects>
    <subject>Computer and Information Science</subject>
    <subject>Social Sciences</subject>
    <subject>word2vec Model, word embeddings</subject>
  </subjects>
  <contributors>
    <contributor contributorType="ContactPerson">
      <contributorName nameType="Personal">Rozado, David</contributorName>
      <givenName>David</givenName>
      <familyName>Rozado</familyName>
      <affiliation>Otago Polytechnic</affiliation>
    </contributor>
  </contributors>
  <dates>
    <date dateType="Submitted">2019-06-10</date>
    <date dateType="Available">2019-06-10</date>
  </dates>
  <resourceType resourceTypeGeneral="Dataset"/>
  <relatedIdentifiers>
    <relatedIdentifier relationType="HasPart" relatedIdentifierType="DOI">10.7910/DVN/BZ1RWS/FDSELM</relatedIdentifier>
    <relatedIdentifier relationType="HasPart" relatedIdentifierType="DOI">10.7910/DVN/BZ1RWS/ZOAHOI</relatedIdentifier>
  </relatedIdentifiers>
  <sizes>
    <size>2147483648</size>
    <size>1145831825</size>
  </sizes>
  <formats>
    <format>application/x-rar-compressed</format>
    <format>application/x-rar-compressed</format>
  </formats>
  <version>1.0</version>
  <rightsList>
    <rights rightsURI="info:eu-repo/semantics/openAccess"/>
    <rights rightsURI="http://creativecommons.org/publicdomain/zero/1.0" rightsIdentifier="CC0-1.0" rightsIdentifierScheme="SPDX" schemeURI="https://spdx.org/licenses/" xml:lang="en">Creative Commons CC0 1.0 Universal Public Domain Dedication.</rights>
  </rightsList>
  <descriptions>
    <description descriptionType="Abstract">word2vec model trained on the concatenation of all the individual universities corpora. To generate the word embeddings of the corpus, the gensim implementation of word2vec (CBOW) was used. For training the word embeddings model, the following parameters were used: vector dimensions=300, window size=10, negative sampling=10, down sampling frequent words = 0.00008 (downsamples 612 most-common words), number of iterations (epochs) through the corpus=10, maximum final vocabulary= 3 million. The maximum final vocabulary resulted in an effective minimum frequency count of 20. That is, only terms that appear more than 20 times in the corpus were included into the word embedding model vocabulary. The exponent used to shape the negative sampling distribution was 0.5.</description>
  </descriptions>
</resource>
