<?xml version='1.0' encoding='UTF-8'?><codeBook xmlns="ddi:codebook:2_5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="ddi:codebook:2_5 https://ddialliance.org/Specification/DDI-Codebook/2.5/XMLSchema/codebook.xsd" version="2.5"><docDscr><citation><titlStmt><titl>word2vec model trained on universities corpus</titl><IDNo agency="DOI">doi:10.7910/DVN/BZ1RWS</IDNo></titlStmt><distStmt><distrbtr source="archive">Harvard Dataverse</distrbtr><distDate>2019-06-10</distDate></distStmt><verStmt source="archive"><version date="2019-06-10" type="RELEASED">1</version></verStmt><biblCit>Rozado, David, 2019, "word2vec model trained on universities corpus", https://doi.org/10.7910/DVN/BZ1RWS, Harvard Dataverse, V1</biblCit></citation></docDscr><stdyDscr><citation><titlStmt><titl>word2vec model trained on universities corpus</titl><IDNo agency="DOI">doi:10.7910/DVN/BZ1RWS</IDNo></titlStmt><rspStmt><AuthEnty affiliation="CSIRO">Rozado, David</AuthEnty></rspStmt><prodStmt/><distStmt><distrbtr source="archive">Harvard Dataverse</distrbtr><contact affiliation="Otago Polytechnic" email="drozado@gmail.com">Rozado, David</contact><depositr>Rozado, David</depositr><depDate>2019-06-10</depDate></distStmt><holdings URI="https://doi.org/10.7910/DVN/BZ1RWS"/></citation><stdyInfo><subject><keyword xml:lang="en">Computer and Information Science</keyword><keyword xml:lang="en">Social Sciences</keyword><keyword>word2vec Model, word embeddings</keyword></subject><abstract date="2019">word2vec model trained on the concatenation of all the individual universities corpora. To generate the word embeddings of the corpus, the gensim implementation of word2vec (CBOW) was used. For training the word embeddings model, the following parameters were used: vector dimensions=300, window size=10, negative sampling=10, down sampling frequent words = 0.00008 (downsamples 612 most-common words), number of iterations (epochs) through the corpus=10, maximum final vocabulary= 3 million. The maximum final vocabulary resulted in an effective minimum frequency count of 20. That is, only terms that appear more than 20 times in the corpus were included into the word embedding model vocabulary. The exponent used to shape the negative sampling distribution was 0.5.</abstract><sumDscr/></stdyInfo><method><dataColl><sources/></dataColl><anlyInfo/></method><dataAccs><setAvail/><useStmt/><notes type="DVN:TOU" level="dv">&lt;a href="http://creativecommons.org/publicdomain/zero/1.0">CC0 1.0&lt;/a></notes></dataAccs><othrStdyMat/></stdyDscr><otherMat ID="f3446248" URI="https://dataverse.harvard.edu/api/access/datafile/3446248" level="datafile"><labl>word2vecModelsTrigrams.part1.rar</labl><notes level="file" type="DATAVERSE:CONTENTTYPE" subject="Content/MIME Type">application/x-rar-compressed</notes></otherMat><otherMat ID="f3446249" URI="https://dataverse.harvard.edu/api/access/datafile/3446249" level="datafile"><labl>word2vecModelsTrigrams.part2.rar</labl><notes level="file" type="DATAVERSE:CONTENTTYPE" subject="Content/MIME Type">application/x-rar-compressed</notes></otherMat></codeBook>