[dev.boringcrypto] all: merge commit 57c115e1 into dev.boringcrypto

Change-Id: I9e2b83c8356372034e4e3bfc6539b813e73611c9
This commit is contained in:
Roland Shoemaker 2021-08-16 09:14:21 -07:00
commit c7e7ce5ec1
1528 changed files with 57252 additions and 33473 deletions

View file

@ -41,7 +41,7 @@ Aeneas Rekkas (arekkas) <aeneas@ory.am>
Afanasev Stanislav <phpprogger@gmail.com>
Agis Anastasopoulos <agis.anast@gmail.com>
Agniva De Sarker <agnivade@yahoo.co.in>
Ahmed Wahed <oneofone@gmail.com>
Ahmed W. Mones <oneofone@gmail.com>
Ahmet Soormally <ahmet@mangomm.co.uk>
Ahmy Yulrizka <yulrizka@gmail.com>
Aiden Scandella <ai@uber.com>

View file

@ -33,6 +33,7 @@ Aaron Jacobs <jacobsa@google.com>
Aaron Jensen <jensen.aaro@gmail.com>
Aaron Kemp <kemp.aaron@gmail.com>
Aaron Patterson <tenderlove@ruby-lang.org>
Aaron Sheah <aaronsheah@gmail.com>
Aaron Stein <aaronstein12@gmail.com>
Aaron Torres <tcboox@gmail.com>
Aaron Zinman <aaron@azinman.com>
@ -47,6 +48,7 @@ Adam Harvey <aharvey@php.net>
Adam Kisala <adam.kisala@gmail.com>
Adam Langley <agl@golang.org>
Adam Medzinski <adam.medzinski@gmail.com>
Adam Mitha <adam.mitha@gmail.com>
Adam Shannon <adamkshannon@gmail.com>
Adam Shelton <aashelt90@gmail.com>
Adam Sindelar <adamsh@google.com>
@ -54,6 +56,8 @@ Adam Thomason <athomason@gmail.com>
Adam Williams <pwnfactory@gmail.com>
Adam Woodbeck <adam@woodbeck.net>
Adarsh Ravichandran <adarshravichandran91@gmail.com>
Adel Rodríguez <adel.rodriguez@leftfieldlabs.com>
Adin Scannell <ascannell@google.com>
Aditya Harindar <aditya.harindar@gmail.com>
Aditya Mukerjee <dev@chimeracoder.net>
Adrian Hesketh <adrianhesketh@hushmail.com>
@ -67,7 +71,8 @@ Aeneas Rekkas (arekkas) <aeneas@ory.am>
Afanasev Stanislav <phpprogger@gmail.com>
Agis Anastasopoulos <agis.anast@gmail.com>
Agniva De Sarker <agnivade@yahoo.co.in>
Ahmed Wahed <oneofone@gmail.com>
Ahmed W. Mones <oneofone@gmail.com>
Ahmet Aktürk <aakturk000@gmail.com>
Ahmet Alp Balkan <ahmetb@google.com>
Ahmet Soormally <ahmet@mangomm.co.uk>
Ahmy Yulrizka <yulrizka@gmail.com>
@ -92,11 +97,13 @@ Alberto Bertogli <albertito@blitiri.com.ar>
Alberto Donizetti <alb.donizetti@gmail.com>
Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com>
Alec Benzer <alec.benzer@gmail.com>
Alejandro García Montoro <alejandro.garciamontoro@gmail.com>
Aleksa Sarai <cyphar@cyphar.com>
Aleksandar Dezelin <dezelin@gmail.com>
Aleksandr Lukinykh <a.lukinykh@xsolla.com>
Aleksandr Razumov <ar@cydev.ru>
Alekseev Artem <a.artem060@gmail.com>
Aleksei Tirman <aleksei.tirman@jetbrains.com>
Alessandro Arzilli <alessandro.arzilli@gmail.com>
Alessandro Baffa <alessandro.baffa@gmail.com>
Alex A Skinner <alex@lx.lc>
@ -165,6 +172,7 @@ Ali Rizvi-Santiago <arizvisa@gmail.com>
Aliaksandr Valialkin <valyala@gmail.com>
Alice Merrick <amerrick@google.com>
Alif Rachmawadi <subosito@gmail.com>
Allan Guwatudde <guwats10@gmail.com>
Allan Simon <allan.simon@supinfo.com>
Allen Li <ayatane@google.com>
Alok Menghrajani <alok.menghrajani@gmail.com>
@ -172,6 +180,7 @@ Alwin Doss <alwindoss84@gmail.com>
Aman Gupta <aman@tmm1.net>
Amarjeet Anand <amarjeetanandsingh@gmail.com>
Amir Mohammad Saied <amir@gluegadget.com>
Amit Kumar <mittalmailbox@gmail.com>
Amr Mohammed <merodiro@gmail.com>
Amrut Joshi <amrut.joshi@gmail.com>
An Long <aisk1988@gmail.com>
@ -185,6 +194,7 @@ André Carvalho <asantostc@gmail.com>
André Martins <aanm90@gmail.com>
Andre Nathan <andrenth@gmail.com>
Andrea Nodari <andrea.nodari91@gmail.com>
Andrea Simonini <andrea.simonini@gmail.com>
Andrea Spadaccini <spadaccio@google.com>
Andreas Auernhammer <aead@mail.de>
Andreas Jellinghaus <andreas@ionisiert.de> <anj@google.com>
@ -244,6 +254,7 @@ Andy Pan <panjf2000@gmail.com> <panjf2000@golangcn.org> <i@andypan.me>
Andy Walker <walkeraj@gmail.com>
Andy Wang <cbeuw.andy@gmail.com>
Andy Williams <andy@andy.xyz>
Andy Zhao <andyzhao@google.com>
Andzej Maciusovic <andzej.maciusovic@gmail.com>
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
Angelo Bulfone <mbulfone@gmail.com>
@ -269,6 +280,7 @@ Anton Kuklin <anton.a.kuklin@gmail.com>
Antonin Amand <antonin.amand@gmail.com>
Antonio Antelo <aantelov87@gmail.com>
Antonio Bibiano <antbbn@gmail.com>
Antonio Garcia <garcia.olais@gmail.com>
Antonio Huete Jimenez <tuxillo@quantumachine.net>
Antonio Murdaca <runcom@redhat.com>
Antonio Troina <thoeni@gmail.com>
@ -292,8 +304,10 @@ Artem Khvastunov <artem.khvastunov@jetbrains.com>
Artem Kolin <artemkaxboy@gmail.com>
Arthur Fabre <arthur@arthurfabre.com>
Arthur Khashaev <arthur@khashaev.ru>
Artur M. Wolff <artur.m.wolff@gmail.com>
Artyom Pervukhin <artyom.pervukhin@gmail.com>
Arvindh Rajesh Tamilmani <art@a-30.net>
Ashish Bhate <ab.listsubs@gmail.com>
Ashish Gandhi <ag@ashishgandhi.org>
Asim Shankar <asimshankar@gmail.com>
Assel Meher <asselmeher@gmail.com>
@ -325,6 +339,7 @@ Baokun Lee <nototon@gmail.com> <bk@golangcn.org>
Barnaby Keene <accounts@southcla.ws>
Bartosz Grzybowski <melkorm@gmail.com>
Bartosz Oler <brtsz@google.com>
Bassam Ojeil <bojeil@google.com>
Bastian Ike <bastian.ike@gmail.com>
Ben Burkert <ben@benburkert.com>
Ben Cartwright-Cox <Ben@Benjojo.co.uk>
@ -332,6 +347,7 @@ Ben Eitzen <eitzenb@golang.org>
Ben Fried <ben.fried@gmail.com>
Ben Haines <bhainesva@gmail.com>
Ben Hoyt <benhoyt@gmail.com>
Ben Hutchings <ben.hutchings@essensium.com>
Ben Kraft <benkraft@khanacademy.org>
Ben Laurie <ben@links.org> <benl@google.com>
Ben Lubar <ben.lubar@gmail.com>
@ -430,6 +446,7 @@ Carl Henrik Lunde <chlunde@ifi.uio.no>
Carl Jackson <carl@stripe.com>
Carl Johnson <me@carlmjohnson.net>
Carl Mastrangelo <notcarl@google.com>
Carl Menezes <carleeto@gmail.com>
Carl Shapiro <cshapiro@google.com> <cshapiro@golang.org>
Carlisia Campos <carlisia@grokkingtech.io>
Carlo Alberto Ferraris <cafxx@strayorange.com>
@ -443,6 +460,7 @@ Carlos Iriarte <ciriarte@gmail.com>
Carlos Souza <carloshrsouza@gmail.com>
Carolyn Van Slyck <me@carolynvanslyck.com>
Carrie Bynon <cbynon@gmail.com>
Carson Hoffman <c@rsonhoffman.com>
Cary Hull <chull@google.com>
Case Nelson <case.nelson@gmail.com>
Casey Callendrello <squeed@gmail.com>
@ -462,6 +480,7 @@ Charles Kenney <charlesc.kenney@gmail.com>
Charles L. Dorian <cldorian@gmail.com>
Charles Lee <zombie.fml@gmail.com>
Charles Weill <weill@google.com>
Charlie Moog <moogcharlie@gmail.com>
Charlotte Brandhorst-Satzkorn <catzkorn@gmail.com>
Chauncy Cullitan <chauncyc@google.com>
Chen Zhidong <njutczd@gmail.com>
@ -516,6 +535,7 @@ Christopher Nelson <nadiasvertex@gmail.com>
Christopher Nielsen <m4dh4tt3r@gmail.com>
Christopher Redden <christopher.redden@gmail.com>
Christopher Swenson <cswenson@google.com>
Christopher Thomas <53317512+chrisssthomas@users.noreply.github.com>
Christopher Wedgwood <cw@f00f.org>
Christos Zoulas <christos@zoulas.com> <zoulasc@gmail.com>
Christy Perez <christy@linux.vnet.ibm.com>
@ -541,6 +561,8 @@ Cosmos Nicolaou <cnicolaou@google.com>
Costin Chirvasuta <ctin@google.com>
Craig Citro <craigcitro@google.com>
Cristian Staretu <unclejacksons@gmail.com>
Cristo García <cgg.code@gmail.com>
cui fliter <imcusg@gmail.com>
Cuihtlauac ALVARADO <cuihtlauac.alvarado@orange.com>
Cuong Manh Le <cuong@orijtech.com>
Curtis La Graff <curtis@lagraff.me>
@ -560,6 +582,7 @@ Dan Callahan <dan.callahan@gmail.com>
Dan Harrington <harringtond@google.com>
Dan Jacques <dnj@google.com>
Dan Johnson <computerdruid@google.com>
Dan McArdle <dmcardle@google.com>
Dan Peterson <dpiddy@gmail.com>
Dan Pupius <dan@medium.com>
Dan Scales <danscales@google.com>
@ -611,6 +634,7 @@ Dave Russell <forfuncsake@gmail.com>
David Anderson <danderson@google.com>
David Barnett <dbarnett@google.com>
David Benjamin <davidben@google.com>
David Black <dblack@atlassian.com>
David Bond <davidsbond93@gmail.com>
David Brophy <dave@brophy.uk>
David Bürgin <676c7473@gmail.com>
@ -654,6 +678,7 @@ Davor Kapsa <davor.kapsa@gmail.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com>
Dean Prichard <dean.prichard@gmail.com>
Deepak Jois <deepak.jois@gmail.com>
Deepak S <deepakspavoodath@gmail.com>
Denis Bernard <db047h@gmail.com>
Denis Brandolini <denis.brandolini@gmail.com>
Denis Isaev <idenx@yandex.com>
@ -676,8 +701,10 @@ Dhiru Kholia <dhiru.kholia@gmail.com>
Dhruvdutt Jadhav <dhruvdutt.jadhav@gmail.com>
Di Xiao <dixiao@google.com>
Didier Spezia <didier.06@gmail.com>
Diego Medina <fmpwizard@gmail.com>
Diego Siqueira <diego9889@gmail.com>
Dieter Plaetinck <dieter@raintank.io>
Dilyn Corner <dilyn.corner@gmail.com>
Dimitri Sokolyuk <sokolyuk@gmail.com>
Dimitri Tcaciuc <dtcaciuc@gmail.com>
Dina Garmash <dgrmsh@gmail.com>
@ -714,6 +741,7 @@ Doug Fawley <dfawley@google.com>
Douglas Danger Manley <doug.manley@gmail.com>
Drew Flower <drewvanstone@gmail.com>
Drew Hintz <adhintz@google.com>
Drew Richardson <drewrichardson@gmail.com>
Duco van Amstel <duco.vanamstel@gmail.com>
Duncan Holm <mail@frou.org>
Dustin Carlino <dcarlino@google.com>
@ -735,6 +763,7 @@ Egon Elbre <egonelbre@gmail.com>
Ehren Kret <ehren.kret@gmail.com>
Eitan Adler <lists@eitanadler.com>
Eivind Uggedal <eivind@uggedal.com>
El Mostafa Idrassi <el.mostafa.idrassi@gmail.com>
Elbert Fliek <efliek@gmail.com>
Eldar Rakhimberdin <ibeono@gmail.com>
Elena Grahovac <elena@grahovac.me>
@ -742,6 +771,7 @@ Eli Bendersky <eliben@google.com>
Elias Naur <mail@eliasnaur.com> <elias.naur@gmail.com>
Elliot Morrison-Reed <elliotmr@gmail.com>
Ellison Leão <ellisonleao@gmail.com>
Elvina Yakubova <elvinayakubova@gmail.com>
Emerson Lin <linyintor@gmail.com>
Emil Bektimirov <lefelys@gmail.com>
Emil Hessman <emil@hessman.se>
@ -767,6 +797,7 @@ Eric Rescorla <ekr@rtfm.com>
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
Eric Rutherford <erutherford@gmail.com>
Eric Rykwalder <e.rykwalder@gmail.com>
Eric Wang <wangchaogo1990@gmail.com>
Erick Tryzelaar <etryzelaar@google.com>
Erik Aigner <aigner.erik@gmail.com>
Erik Dubbelboer <erik@dubbelboer.com>
@ -778,6 +809,7 @@ Ernest Chiang <ernest_chiang@htc.com>
Erwin Oegema <blablaechthema@hotmail.com>
Esko Luontola <esko.luontola@gmail.com>
Ethan Burns <eaburns@google.com>
Ethan Hur <ethan0311@gmail.com>
Ethan Miller <eamiller@us.ibm.com>
Euan Kemp <euank@euank.com>
Eugene Formanenko <mo4islona@gmail.com>
@ -818,6 +850,7 @@ Felix Cornelius <9767036+fcornelius@users.noreply.github.com>
Felix Geisendörfer <haimuiba@gmail.com>
Felix Kollmann <fk@konsorten.de>
Ferenc Szabo <frncmx@gmail.com>
Fernandez Ludovic <lfernandez.dev@gmail.com>
Filip Gruszczyński <gruszczy@gmail.com>
Filip Haglund <drathier@users.noreply.github.com>
Filip Stanis <fstanis@google.com>
@ -858,6 +891,7 @@ Gabriel Nelle <tehsphinx@web.de>
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
Gabriel Rosenhouse <rosenhouse@gmail.com>
Gabriel Russell <gabriel.russell@gmail.com>
Gabriel Vasile <gabriel.vasile0793@gmail.com>
Gareth Paul Jones <gpj@foursquare.com>
Garret Kelly <gdk@google.com>
Garrick Evans <garrick@google.com>
@ -891,6 +925,8 @@ Gianguido Sora` <g.sora4@gmail.com>
Gideon Jan-Wessel Redelinghuys <gjredelinghuys@gmail.com>
Giles Lean <giles.lean@pobox.com>
Giovanni Bajo <rasky@develer.com>
GitHub User @180909 (70465953) <734461790@qq.com>
GitHub User @6543 (24977596) <6543@obermui.de>
GitHub User @aca (50316549) <acadx0@gmail.com>
GitHub User @ajnirp (1688456) <ajnirp@users.noreply.github.com>
GitHub User @ajz01 (4744634) <ajzdenek@gmail.com>
@ -904,10 +940,12 @@ GitHub User @bontequero (2674999) <bontequero@gmail.com>
GitHub User @cch123 (384546) <buaa.cch@gmail.com>
GitHub User @chainhelen (7046329) <chainhelen@gmail.com>
GitHub User @chanxuehong (3416908) <chanxuehong@gmail.com>
GitHub User @Cluas (10056928) <Cluas@live.cn>
GitHub User @cncal (23520240) <flycalvin@qq.com>
GitHub User @DQNEO (188741) <dqneoo@gmail.com>
GitHub User @Dreamacro (8615343) <chuainian@gmail.com>
GitHub User @dupoxy (1143957) <dupoxy@users.noreply.github.com>
GitHub User @EndlessCheng (7086966) <loli.con@qq.com>
GitHub User @erifan (31343225) <eric.fang@arm.com>
GitHub User @esell (9735165) <eujon.sellers@gmail.com>
GitHub User @fatedier (7346661) <fatedier@gmail.com>
@ -916,12 +954,15 @@ GitHub User @geedchin (11672310) <geedchin@gmail.com>
GitHub User @GrigoriyMikhalkin (3637857) <grigoriymikhalkin@gmail.com>
GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com>
GitHub User @hitzhangjie (3725760) <hit.zhangjie@gmail.com>
GitHub User @hqpko (13887251) <whaibin01@hotmail.com>
GitHub User @itchyny (375258) <itchyny@hatena.ne.jp>
GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com>
GitHub User @jopbrown (6345470) <msshane2008@gmail.com>
GitHub User @kazyshr (30496953) <kazyshr0301@gmail.com>
GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com>
GitHub User @komisan19 (18901496) <komiyama6219@gmail.com>
GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com>
GitHub User @lhl2617 (33488131) <l.h.lee2617@gmail.com>
GitHub User @linguohua (3434367) <lghchinaidea@gmail.com>
GitHub User @LotusFenn (13775899) <fenn.lotus@gmail.com>
GitHub User @ly303550688 (11519839) <yang.liu636@gmail.com>
@ -936,10 +977,14 @@ GitHub User @OlgaVlPetrova (44112727) <OVPpetrova@gmail.com>
GitHub User @pityonline (438222) <pityonline@gmail.com>
GitHub User @po3rin (29445112) <abctail30@gmail.com>
GitHub User @pokutuna (57545) <popopopopokutuna@gmail.com>
GitHub User @povsister (11040951) <pov@mahou-shoujo.moe>
GitHub User @pytimer (17105586) <lixin20101023@gmail.com>
GitHub User @qcrao (7698088) <qcrao91@gmail.com>
GitHub User @ramenjuniti (32011829) <ramenjuniti@gmail.com>
GitHub User @saitarunreddy (21041941) <saitarunreddypalla@gmail.com>
GitHub User @SataQiu (9354727) <shidaqiu2018@gmail.com>
GitHub User @shogo-ma (9860598) <Choroma194@gmail.com>
GitHub User @sivchari (55221074) <shibuuuu5@gmail.com>
GitHub User @skanehira (7888591) <sho19921005@gmail.com>
GitHub User @soolaugust (10558124) <soolaugust@gmail.com>
GitHub User @surechen (7249331) <surechen17@gmail.com>
@ -947,9 +992,12 @@ GitHub User @tatsumack (4510569) <tatsu.mack@gmail.com>
GitHub User @tell-k (26263) <ffk2005@gmail.com>
GitHub User @tennashi (10219626) <tennashio@gmail.com>
GitHub User @uhei (2116845) <uhei@users.noreply.github.com>
GitHub User @uji (49834542) <ujiprog@gmail.com>
GitHub User @unbyte (5772358) <i@shangyes.net>
GitHub User @uropek (39370426) <uropek@gmail.com>
GitHub User @utkarsh-extc (53217283) <utkarsh.extc@gmail.com>
GitHub User @witchard (4994659) <witchard@hotmail.co.uk>
GitHub User @wolf1996 (5901874) <ksgiv37@gmail.com>
GitHub User @yah01 (12216890) <kagaminehuan@gmail.com>
GitHub User @yuanhh (1298735) <yuan415030@gmail.com>
GitHub User @zikaeroh (48577114) <zikaeroh@gmail.com>
@ -962,6 +1010,7 @@ Glenn Brown <glennb@google.com>
Glenn Lewis <gmlewis@google.com>
Gordon Klaus <gordon.klaus@gmail.com>
Gordon Tyler <gordon@doxxx.net>
Grace Han <hgrace503@gmail.com>
Graham King <graham4king@gmail.com>
Graham Miller <graham.miller@gmail.com>
Grant Griffiths <ggp493@gmail.com>
@ -977,10 +1026,12 @@ Guilherme Caruso <gui.martinscaruso@gmail.com>
Guilherme Garnier <guilherme.garnier@gmail.com>
Guilherme Goncalves <guilhermeaugustosg@gmail.com>
Guilherme Rezende <guilhermebr@gmail.com>
Guilherme Souza <32180229+gqgs@users.noreply.github.com>
Guillaume J. Charmes <guillaume@charmes.net>
Guillaume Sottas <guillaumesottas@gmail.com>
Günther Noack <gnoack@google.com>
Guobiao Mei <meiguobiao@gmail.com>
Guodong Li <guodongli@google.com>
Guoliang Wang <iamwgliang@gmail.com>
Gustav Paul <gustav.paul@gmail.com>
Gustav Westling <gustav@westling.xyz>
@ -995,6 +1046,7 @@ HAMANO Tsukasa <hamano@osstech.co.jp>
Han-Wen Nienhuys <hanwen@google.com>
Hang Qian <hangqian90@gmail.com>
Hanjun Kim <hallazzang@gmail.com>
Hanlin He <hanling.he@gmail.com>
Hanlin Shi <shihanlin9@gmail.com>
Haoran Luo <haoran.luo@chaitin.com>
Haosdent Huang <haosdent@gmail.com>
@ -1026,18 +1078,19 @@ Herbie Ong <herbie@google.com>
Heschi Kreinick <heschi@google.com>
Hidetatsu Yaginuma <ygnmhdtt@gmail.com>
Hilko Bengen <bengen@hilluzination.de>
Himanshu Kishna Srivastava <28himanshu@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com>
Hiromichi Ema <ema.hiro@gmail.com>
Hironao OTSUBO <motemen@gmail.com>
Hiroshi Ioka <hirochachacha@gmail.com>
Hitoshi Mitake <mitake.hitoshi@gmail.com>
Holden Huang <ttyh061@gmail.com>
Songlin Jiang <hollowman@hollowman.ml>
Hong Ruiqi <hongruiqi@gmail.com>
Hongfei Tan <feilengcui008@gmail.com>
Horacio Duran <horacio.duran@gmail.com>
Horst Rutter <hhrutter@gmail.com>
Hossein Sheikh Attar <hattar@google.com>
Hossein Zolfi <hossein.zolfi@gmail.com>
Howard Zhang <howard.zhang@arm.com>
Hsin Tsao <tsao@google.com>
Hsin-Ho Yeh <yhh92u@gmail.com>
@ -1054,11 +1107,14 @@ Ian Haken <ihaken@netflix.com>
Ian Kent <iankent85@gmail.com>
Ian Lance Taylor <iant@golang.org>
Ian Leue <ian@appboy.com>
Ian Mckay <iann0036@gmail.com>
Ian Tay <iantay@google.com>
Ian Woolf <btw515wolf2@gmail.com>
Ian Zapolsky <ianzapolsky@gmail.com>
Ibrahim AshShohail <ibra.sho@gmail.com>
Icarus Sparry <golang@icarus.freeuk.com>
Iccha Sethi <icchasethi@gmail.com>
Ichinose Shogo <shogo82148@gmail.com>
Idora Shinatose <idora.shinatose@gmail.com>
Ignacio Hagopian <jsign.uy@gmail.com>
Igor Bernstein <igorbernstein@google.com>
@ -1068,6 +1124,7 @@ Igor Vashyst <ivashyst@gmail.com>
Igor Zhilianin <igor.zhilianin@gmail.com>
Ikko Ashimine <eltociear@gmail.com>
Illya Yalovyy <yalovoy@gmail.com>
Ilya Chukov <56119080+Elias506@users.noreply.github.com>
Ilya Sinelnikov <sidhmangh@gmail.com>
Ilya Tocar <ilya.tocar@intel.com>
INADA Naoki <songofacandy@gmail.com>
@ -1122,6 +1179,7 @@ James Cowgill <James.Cowgill@imgtec.com>
James Craig Burley <james-github@burleyarch.com>
James David Chalfant <james.chalfant@gmail.com>
James Eady <jmeady@google.com>
James Fennell <jpfennell@google.com>
James Fysh <james.fysh@gmail.com>
James Gray <james@james4k.com>
James Hartig <fastest963@gmail.com>
@ -1178,6 +1236,7 @@ Jason Wangsadinata <jwangsadinata@gmail.com>
Javier Kohen <jkohen@google.com>
Javier Revillas <jrevillas@massivedynamic.io>
Javier Segura <javism@gmail.com>
Jay Chen <chenjie@chenjie.info>
Jay Conrod <jayconrod@google.com>
Jay Lee <BusyJayLee@gmail.com>
Jay Taylor <outtatime@gmail.com>
@ -1200,6 +1259,7 @@ Jeff Johnson <jrjohnson@google.com>
Jeff R. Allen <jra@nella.org> <jeff.allen@gmail.com>
Jeff Sickel <jas@corpus-callosum.com>
Jeff Wendling <jeff@spacemonkey.com>
Jeff Widman <jeff@jeffwidman.com>
Jeffrey H <jeffreyh192@gmail.com>
Jelte Fennema <github-tech@jeltef.nl>
Jens Frederich <jfrederich@gmail.com>
@ -1210,6 +1270,7 @@ Jeremy Faller <jeremy@golang.org>
Jeremy Jackins <jeremyjackins@gmail.com>
Jeremy Jay <jeremy@pbnjay.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jero Bado <tokidokitalkyou@gmail.com>
Jeroen Bobbeldijk <jerbob92@gmail.com>
Jeroen Simonetti <jeroen@simonetti.nl>
Jérôme Doucet <jerdct@gmail.com>
@ -1251,6 +1312,8 @@ Joe Richey <joerichey@google.com>
Joe Shaw <joe@joeshaw.org>
Joe Sylve <joe.sylve@gmail.com>
Joe Tsai <joetsai@digital-static.net>
Joel Courtney <euphemize@gmail.com>
Joel Ferrier <joelferrier@google.com>
Joel Sing <joel@sing.id.au> <jsing@google.com>
Joël Stemmer <jstemmer@google.com>
Joel Stemmer <stemmertech@gmail.com>
@ -1260,7 +1323,9 @@ Johan Euphrosine <proppy@google.com>
Johan Jansson <johan.jansson@iki.fi>
Johan Knutzen <johan@senri.se>
Johan Sageryd <j@1616.se>
Johannes Huning <johannes.huning@gmail.com>
John Asmuth <jasmuth@gmail.com>
John Bampton <jbampton@gmail.com>
John Beisley <huin@google.com>
John C Barstow <jbowtie@amathaine.com>
John DeNero <denero@google.com>
@ -1269,6 +1334,7 @@ John Gibb <johngibb@gmail.com>
John Gilik <john@jgilik.com>
John Graham-Cumming <jgc@jgc.org> <jgrahamc@gmail.com>
John Howard Palevich <jack.palevich@gmail.com>
John Jago <johnjago@protonmail.com>
John Jeffery <jjeffery@sp.com.au>
John Jenkins <twodopeshaggy@gmail.com>
John Leidegren <john.leidegren@gmail.com>
@ -1320,6 +1386,7 @@ Josa Gesell <josa@gesell.me>
Jose Luis Vázquez González <josvazg@gmail.com>
Joseph Bonneau <jcb@google.com>
Joseph Holsten <joseph@josephholsten.com>
Joseph Morag <sefim96@gmail.com>
Josh Baum <joshbaum@google.com>
Josh Bleecher Snyder <josharian@gmail.com>
Josh Chorlton <jchorlton@gmail.com>
@ -1327,12 +1394,14 @@ Josh Deprez <josh.deprez@gmail.com>
Josh Goebel <dreamer3@gmail.com>
Josh Hoak <jhoak@google.com>
Josh Holland <jrh@joshh.co.uk>
Josh Rickmar <jrick@companyzero.com>
Josh Roppo <joshroppo@gmail.com>
Josh Varga <josh.varga@gmail.com>
Joshua Bezaleel Abednego <joshua.bezaleel@gmail.com>
Joshua Boelter <joshua.boelter@intel.com>
Joshua Chase <jcjoshuachase@gmail.com>
Joshua Crowgey <jcrowgey@uw.edu>
Joshua Harshman <joshgreyhat@gmail.com>
Joshua M. Clulow <josh.clulow@joyent.com>
Joshua Rubin <joshua@rubixconsulting.com>
Josselin Costanzi <josselin@costanzi.fr>
@ -1353,6 +1422,7 @@ Julie Qiu <julie@golang.org>
Julien Kauffmann <julien.kauffmann@freelan.org>
Julien Salleyron <julien.salleyron@gmail.com>
Julien Schmidt <google@julienschmidt.com>
Julien Tant <julien@craftyx.fr>
Julio Montes <julio.montes@intel.com>
Jun Zhang <jim.zoumo@gmail.com>
Junchen Li <junchen.li@arm.com>
@ -1419,10 +1489,12 @@ Kenta Mori <zoncoen@gmail.com>
Kerollos Magdy <kerolloz@yahoo.com>
Ketan Parmar <ketanbparmar@gmail.com>
Kevan Swanberg <kevswanberg@gmail.com>
Kevin Albertson <kevin.albertson@mongodb.com>
Kevin Ballard <kevin@sb.org>
Kevin Burke <kev@inburke.com>
Kévin Dunglas <dunglas@gmail.com>
Kevin Gillette <extemporalgenome@gmail.com>
Kevin Herro <kevin109104@gmail.com>
Kevin Kirsche <kev.kirsche@gmail.com>
Kevin Klues <klueska@gmail.com> <klueska@google.com>
Kevin Malachowski <chowski@google.com>
@ -1457,6 +1529,7 @@ Koya IWAMURA <kiwamura0314@gmail.com>
Kris Kwiatkowski <kris@cloudflare.com>
Kris Nova <kris@nivenly.com>
Kris Rousey <krousey@google.com>
Krishna Birla <krishnabirla16@gmail.com>
Kristopher Watts <traetox@gmail.com>
Krzysztof Dąbrowski <krzysdabro@live.com>
Kshitij Saraogi <kshitijsaraogi@gmail.com>
@ -1480,6 +1553,7 @@ Lajos Papp <lalyos@yahoo.com>
Lakshay Garg <lakshay.garg.1996@gmail.com>
Lann Martin <lannm@google.com>
Lanre Adelowo <yo@lanre.wtf>
Lapo Luchini <lapo@lapo.it>
Larry Clapp <larry@theclapp.org>
Larry Hosken <lahosken@golang.org>
Lars Jeppesen <jeppesen.lars@gmail.com>
@ -1496,6 +1570,7 @@ Leigh McCulloch <leighmcc@gmail.com>
Leo Antunes <leo@costela.net>
Leo Rudberg <ljr@google.com>
Leon Klingele <git@leonklingele.de>
Leonard Wang <wangdeyu0907@gmail.com>
Leonardo Comelli <leonardo.comelli@gmail.com>
Leonel Quinteros <leonel.quinteros@gmail.com>
Lev Shamardin <shamardin@gmail.com>
@ -1506,7 +1581,9 @@ Lily Chung <lilithkchung@gmail.com>
Lingchao Xin <douglarek@gmail.com>
Lion Yang <lion@aosc.xyz>
Liz Rice <liz@lizrice.com>
Lize Cai <lizzzcai1@gmail.com>
Lloyd Dewolf <foolswisdom@gmail.com>
Lluís Batlle i Rossell <viric@viric.name>
Lorenz Bauer <lmb@cloudflare.com>
Lorenz Brun <lorenz@brun.one>
Lorenz Nickel <mail@lorenznickel.de>
@ -1531,6 +1608,7 @@ Lukasz Milewski <lmmilewski@gmail.com>
Luke Champine <luke.champine@gmail.com>
Luke Curley <qpingu@gmail.com>
Luke Granger-Brown <git@lukegb.com>
Luke Shumaker <lukeshu@datawire.io>
Luke Young <bored-engineer@users.noreply.github.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Luuk van Dijk <lvd@golang.org> <lvd@google.com>
@ -1550,6 +1628,7 @@ Mal Curtis <mal@mal.co.nz>
Manfred Touron <m@42.am>
Manigandan Dharmalingam <manigandan.jeff@gmail.com>
Manish Goregaokar <manishsmail@gmail.com>
Manlio Perillo <manlio.perillo@gmail.com>
Manoj Dayaram <platform-dev@moovweb.com> <manoj.dayaram@moovweb.com>
Mansour Rahimi <rahimi.mnr@gmail.com>
Manu Garg <manugarg@google.com>
@ -1646,6 +1725,8 @@ Matt Joiner <anacrolix@gmail.com>
Matt Jones <mrjones@google.com>
Matt Juran <thepciet@gmail.com>
Matt Layher <mdlayher@gmail.com>
Matt Masurka <masurka@google.com>
Matt Pearring <broskies@google.com>
Matt Reiferson <mreiferson@gmail.com>
Matt Robenolt <matt@ydekproductions.com>
Matt Strong <mstrong1341@gmail.com>
@ -1659,9 +1740,12 @@ Matthew Denton <mdenton@skyportsystems.com>
Matthew Holt <Matthew.Holt+git@gmail.com>
Matthew Horsnell <matthew.horsnell@gmail.com>
Matthew Waters <mwwaters@gmail.com>
Matthias Frei <matthias.frei@inf.ethz.ch>
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
Matthieu Olivier <olivier.matthieu@gmail.com>
Matthijs Kooijman <matthijs@stdin.nl>
Mattias Appelgren <mattias@ppelgren.se>
Mauricio Alvarado <mauricio.alvarado@leftfieldlabs.com>
Max Drosdo.www <g1ran1q@gmail.com>
Max Riveiro <kavu13@gmail.com>
Max Schmitt <max@schmitt.mx>
@ -1677,9 +1761,11 @@ Máximo Cuadros Ortiz <mcuadros@gmail.com>
Maxwell Krohn <themax@gmail.com>
Maya Rashish <maya@NetBSD.org>
Mayank Kumar <krmayankk@gmail.com>
Mehrad Sadeghi <2012.linkinpark@gmail.com>
Meir Fischer <meirfischer@gmail.com>
Meng Zhuo <mengzhuo1203@gmail.com> <mzh@golangcn.org>
Mhd Sulhan <m.shulhan@gmail.com>
Mia Zhu <CrystalZhu1025getu@gmail.com>
Micah Stetson <micah.stetson@gmail.com>
Michael Anthony Knyszek <mknyszek@google.com>
Michael Brandenburg <mbrandenburg@bolste.com>
@ -1730,8 +1816,10 @@ Michal Franc <lam.michal.franc@gmail.com>
Michał Łowicki <mlowicki@gmail.com>
Michal Pristas <michal.pristas@gmail.com>
Michal Rostecki <mrostecki@suse.de>
Michal Stokluska <mstoklus@redhat.com>
Michalis Kargakis <michaliskargakis@gmail.com>
Michel Lespinasse <walken@google.com>
Michel Levieux <mlevieux42@gmail.com>
Michele Di Pede <michele.di.pede@gmail.com>
Mickael Kerjean <mickael.kerjean@gmail.com>
Mickey Reiss <mickeyreiss@gmail.com>
@ -1790,7 +1878,9 @@ Muir Manders <muir@mnd.rs>
Mukesh Sharma <sharma.mukesh439@gmail.com>
Mura Li <mura_li@castech.com.tw>
Mykhailo Lesyk <mikhail@lesyk.org>
Nahum Shalman <nahamu@gmail.com>
Naman Aggarwal <aggarwal.nam@gmail.com>
Naman Gera <namangera15@gmail.com>
Nan Deng <monnand@gmail.com>
Nao Yonashiro <owan.orisano@gmail.com>
Naoki Kanatani <k12naoki@gmail.com>
@ -1818,6 +1908,7 @@ Neven Sajko <nsajko@gmail.com>
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
Niall Sheridan <nsheridan@gmail.com>
Nic Day <nic.day@me.com>
Nicholas Asimov <nicholas@asimov.me>
Nicholas Katsaros <nick@nickkatsaros.com>
Nicholas Maniscalco <nicholas@maniscalco.com>
Nicholas Ng <nickng@nickng.io>
@ -1847,6 +1938,7 @@ Nik Nyby <nnyby@columbia.edu>
Nikhil Benesch <nikhil.benesch@gmail.com>
Nikita Gillmann <nikita@n0.is> <ng0@n0.is>
Nikita Kryuchkov <nkryuchkov10@gmail.com>
Nikita Melekhin <nimelehin@gmail.com>
Nikita Vanyasin <nikita.vanyasin@gmail.com>
Niklas Schnelle <niklas.schnelle@gmail.com>
Niko Dziemba <niko@dziemba.com>
@ -1858,6 +1950,7 @@ Niranjan Godbole <niranjan8192@gmail.com>
Nishanth Shanmugham <nishanth.gerrard@gmail.com>
Noah Campbell <noahcampbell@gmail.com>
Noah Goldman <noahg34@gmail.com>
Noah Santschi-Cooney <noah@santschi-cooney.ch>
Noble Johnson <noblepoly@gmail.com>
Nodir Turakulov <nodir@google.com>
Noel Georgi <git@frezbo.com>
@ -1894,6 +1987,7 @@ Pablo Rozas Larraondo <pablo.larraondo@anu.edu.au>
Pablo Santiago Blum de Aguiar <scorphus@gmail.com>
Padraig Kitterick <padraigkitterick@gmail.com>
Pallat Anchaleechamaikorn <yod.pallat@gmail.com>
Pan Chenglong <1004907659@qq.com>
Panos Georgiadis <pgeorgiadis@suse.de>
Pantelis Sampaziotis <psampaz@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
@ -1947,6 +2041,7 @@ Paulo Casaretto <pcasaretto@gmail.com>
Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
Paulo Gomes <paulo.gomes.uk@gmail.com>
Pavel Paulau <pavel.paulau@gmail.com>
Pavel Watson <watsonpavel@gmail.com>
Pavel Zinovkin <pavel.zinovkin@gmail.com>
Pavlo Sumkin <ymkins@gmail.com>
Pawel Knap <pawelknap88@gmail.com>
@ -1954,6 +2049,8 @@ Pawel Szczur <filemon@google.com>
Paweł Szulik <pawel.szulik@intel.com>
Pei Xian Chee <luciolas1991@gmail.com>
Pei-Ming Wu <p408865@gmail.com>
Pen Tree <appletree2479@outlook.com>
Peng Gao <peng.gao.dut@gmail.com>
Percy Wegmann <ox.to.a.cart@gmail.com>
Perry Abbott <perry.j.abbott@gmail.com>
Petar Dambovaliev <petar.atanasov.1987@gmail.com>
@ -1992,6 +2089,7 @@ Philip Brown <phil@bolthole.com>
Philip Hofer <phofer@umich.edu>
Philip K. Warren <pkwarren@gmail.com>
Philip Nelson <me@pnelson.ca>
Philipp Sauter <sauterp@protonmail.com>
Philipp Stephani <phst@google.com>
Phillip Campbell <15082+phillc@users.noreply.github.com>
Pierre Carru <pierre.carru@eshard.com>
@ -2007,6 +2105,7 @@ Poh Zi How <poh.zihow@gmail.com>
Polina Osadcha <polliosa@google.com>
Pontus Leitzler <leitzler@gmail.com>
Povilas Versockas <p.versockas@gmail.com>
Prajwal Koirala <16564273+Prajwal-Koirala@users.noreply.github.com>
Prasanga Siripala <pj@pjebs.com.au>
Prasanna Swaminathan <prasanna@mediamath.com>
Prashant Agrawal <prashant.a.vjti@gmail.com>
@ -2027,11 +2126,13 @@ Quim Muntal <quimmuntal@gmail.com>
Quinn Slack <sqs@sourcegraph.com>
Quinten Yearsley <qyearsley@chromium.org>
Quoc-Viet Nguyen <afelion@gmail.com>
Rabin Gaire <rabingaire20@gmail.com>
Radek Simko <radek.simko@gmail.com>
Radek Sohlich <sohlich@gmail.com>
Radu Berinde <radu@cockroachlabs.com>
Rafal Jeczalik <rjeczalik@gmail.com>
Raghavendra Nagaraj <jamdagni86@gmail.com>
Rahul Bajaj <rahulrb0509@gmail.com>
Rahul Chaudhry <rahulchaudhry@chromium.org>
Rahul Wadhwani <rahulwadhwani21@gmail.com>
Raif S. Naffah <go@naffah-raif.name>
@ -2041,12 +2142,14 @@ Rajender Reddy Kompally <rajenderreddykompally@gmail.com>
Ralph Corderoy <ralph@inputplus.co.uk>
Ramazan AYYILDIZ <rayyildiz@gmail.com>
Ramesh Dharan <dharan@google.com>
Randy Reddig <randy@alta.software>
Raph Levien <raph@google.com>
Raphael Geronimi <raphael.geronimi@gmail.com>
Raul Silvera <rsilvera@google.com>
Ravil Bikbulatov <weeellz12@gmail.com>
RaviTeja Pothana <ravi.tezu@gmail.com>
Ray Tung <rtung@thoughtworks.com>
Ray Wu <ray@liftoff.io>
Raymond Kazlauskas <raima220@gmail.com>
Rebecca Stambler <rstambler@golang.org>
Reilly Watson <reillywatson@gmail.com>
@ -2066,6 +2169,7 @@ Richard Eric Gavaletz <gavaletz@gmail.com>
Richard Gibson <richard.gibson@gmail.com>
Richard Miller <miller.research@gmail.com>
Richard Musiol <mail@richard-musiol.de> <neelance@gmail.com>
Richard Pickering <richard.pickering@hotmail.co.uk>
Richard Ulmer <codesoap@mailbox.org>
Richard Wilkes <wilkes@me.com>
Rick Arnold <rickarnoldjr@gmail.com>
@ -2124,6 +2228,7 @@ Rowan Worth <sqweek@gmail.com>
Rudi Kramer <rudi.kramer@gmail.com>
Rui Ueyama <ruiu@google.com>
Ruixin Bao <ruixin.bao@ibm.com>
Ruslan Andreev <ruslan.andreev@huawei.com>
Ruslan Nigmatullin <elessar@dropbox.com>
Russ Cox <rsc@golang.org>
Russell Haering <russellhaering@gmail.com>
@ -2141,6 +2246,7 @@ Ryan Seys <ryan@ryanseys.com>
Ryan Slade <ryanslade@gmail.com>
Ryan Zhang <ryan.zhang@docker.com>
Ryoichi KATO <ryo1kato@gmail.com>
Ryoya Sekino <ryoyasekino1993@gmail.com>
Ryuji Iwata <qt.luigi@gmail.com>
Ryuma Yoshida <ryuma.y1117@gmail.com>
Ryuzo Yamamoto <ryuzo.yamamoto@gmail.com>
@ -2176,8 +2282,10 @@ Sardorbek Pulatov <sardorbek.pulatov@outlook.com>
Sascha Brawer <sascha@brawer.ch>
Sasha Lionheart <lionhearts@google.com>
Sasha Sobol <sasha@scaledinference.com>
Satoru Kitaguchi <rule.the.fate.myfirststory@gmail.com>
Scott Barron <scott.barron@github.com>
Scott Bell <scott@sctsm.com>
Scott Cotton <scott@mindowl.com>
Scott Crunkleton <crunk1@gmail.com>
Scott Ferguson <scottwferg@gmail.com>
Scott Lawrence <bytbox@gmail.com>
@ -2191,6 +2299,7 @@ Sean Chittenden <seanc@joyent.com>
Sean Christopherson <sean.j.christopherson@intel.com>
Sean Dolphin <Sean.Dolphin@kpcompass.com>
Sean Harger <sharger@google.com>
Sean Harrington <sean.harrington@leftfieldlabs.com>
Sean Hildebrand <seanwhildebrand@gmail.com>
Sean Liao <seankhliao@gmail.com>
Sean Rees <sean@erifax.org>
@ -2212,6 +2321,7 @@ Sergey Dobrodey <sergey.dobrodey@synesis.ru>
Sergey Frolov <sfrolov@google.com>
Sergey Glushchenko <gsserge@gmail.com>
Sergey Ivanov <ser1325@gmail.com>
Sergey Kacheev <S.Kacheev@gmail.com>
Sergey Lukjanov <me@slukjanov.name>
Sergey Mishin <sergeymishine@gmail.com>
Sergey Mudrik <sergey.mudrik@gmail.com>
@ -2223,6 +2333,7 @@ Serhat Giydiren <serhatgiydiren@gmail.com>
Serhii Aheienko <serhii.aheienko@gmail.com>
Seth Hoenig <seth.a.hoenig@gmail.com>
Seth Vargo <sethvargo@gmail.com>
Shaba Abhiram <shabarivas.abhiram@gmail.com>
Shahar Kohanim <skohanim@gmail.com>
Shailesh Suryawanshi <ss.shailesh28@gmail.com>
Shamil Garatuev <garatuev@gmail.com>
@ -2250,9 +2361,13 @@ Shivakumar GN <shivakumar.gn@gmail.com>
Shivani Singhal <shivani.singhal2804@gmail.com>
Shivansh Rai <shivansh@freebsd.org>
Shivashis Padhi <shivashispadhi@gmail.com>
Shoshin Nikita <shoshin_nikita@fastmail.com>
Shota Sugiura <s.shota.710.3506@gmail.com>
Shubham Sharma <shubham.sha12@gmail.com>
Shuhei Takahashi <nya@chromium.org>
Shun Fan <sfan@google.com>
Silvan Jegen <s.jegen@gmail.com>
Simão Gomes Viana <simaogmv@gmail.com>
Simarpreet Singh <simar@linux.com>
Simon Drake <simondrake1990@gmail.com>
Simon Ferquel <simon.ferquel@docker.com>
@ -2267,13 +2382,16 @@ Sina Siadat <siadat@gmail.com>
Sjoerd Siebinga <sjoerd.siebinga@gmail.com>
Sokolov Yura <funny.falcon@gmail.com>
Song Gao <song@gao.io>
Song Lim <songlim327@gmail.com>
Songjiayang <songjiayang1@gmail.com>
Songlin Jiang <hollowman@hollowman.ml>
Soojin Nam <jsunam@gmail.com>
Søren L. Hansen <soren@linux2go.dk>
Sparrow Li <liyuancylx@gmail.com>
Spencer Kocot <spencerkocot@gmail.com>
Spencer Nelson <s@spenczar.com>
Spencer Tung <spencertung@google.com>
Spenser Black <spenserblack01@gmail.com>
Spring Mc <heresy.mc@gmail.com>
Srdjan Petrovic <spetrovic@google.com>
Sridhar Venkatakrishnan <sridhar@laddoo.net>
@ -2324,6 +2442,7 @@ Suyash <dextrous93@gmail.com>
Suzy Mueller <suzmue@golang.org>
Sven Almgren <sven@tras.se>
Sven Blumenstein <svbl@google.com>
Sven Lee <lee1300394324@gmail.com>
Sven Taute <sven.taute@gmail.com>
Sylvain Zimmer <sylvain@sylvainzimmer.com>
Syohei YOSHIDA <syohex@gmail.com>
@ -2406,12 +2525,14 @@ Tiwei Bie <tiwei.btw@antgroup.com>
Tobias Assarsson <tobias.assarsson@gmail.com>
Tobias Columbus <tobias.columbus@gmail.com> <tobias.columbus@googlemail.com>
Tobias Klauser <tklauser@distanz.ch>
Tobias Kohlbau <tobias@kohlbau.de>
Toby Burress <kurin@google.com>
Todd Kulesza <tkulesza@google.com>
Todd Neal <todd@tneal.org>
Todd Wang <toddwang@gmail.com>
Tom Anthony <git@tomanthony.co.uk>
Tom Bergan <tombergan@google.com>
Tom Freudenberg <tom.freudenberg@4commerce.de>
Tom Heng <zhm20070928@gmail.com>
Tom Lanyon <tomlanyon@google.com>
Tom Levy <tomlevy93@gmail.com>
@ -2440,6 +2561,7 @@ Toshiki Shima <hayabusa1419@gmail.com>
Totoro W <tw19881113@gmail.com>
Travis Bischel <travis.bischel@gmail.com>
Travis Cline <travis.cline@gmail.com>
Trevor Dixon <trevordixon@gmail.com>
Trevor Strohman <trevor.strohman@gmail.com>
Trey Lawrence <lawrence.trey@gmail.com>
Trey Roessig <trey.roessig@gmail.com>
@ -2463,6 +2585,7 @@ Tzach Shabtay <tzachshabtay@gmail.com>
Tzu-Chiao Yeh <su3g4284zo6y7@gmail.com>
Tzu-Jung Lee <roylee17@currant.com>
Udalov Max <re.udalov@gmail.com>
Uddeshya Singh <singhuddeshyaofficial@gmail.com>
Ugorji Nwoke <ugorji@gmail.com>
Ulf Holm Nielsen <doktor@dyregod.dk>
Ulrich Kunitz <uli.kunitz@gmail.com>
@ -2475,6 +2598,7 @@ Vadim Grek <vadimprog@gmail.com>
Vadim Vygonets <unixdj@gmail.com>
Val Polouchkine <vpolouch@justin.tv>
Valentin Vidic <vvidic@valentin-vidic.from.hr>
Vaughn Iverson <vsivsi@yahoo.com>
Vee Zhang <veezhang@126.com> <vveezhang@gmail.com>
Vega Garcia Luis Alfonso <vegacom@gmail.com>
Venil Noronha <veniln@vmware.com>
@ -2491,6 +2615,7 @@ Vincent Batts <vbatts@hashbangbash.com> <vbatts@gmail.com>
Vincent Vanackere <vincent.vanackere@gmail.com>
Vinu Rajashekhar <vinutheraj@gmail.com>
Vish Subramanian <vish@google.com>
Vishal Dalwadi <dalwadivishal26@gmail.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Visweswara R <r.visweswara@gmail.com>
Vitaly Zdanevich <zdanevich.vitaly@ya.ru>
@ -2542,6 +2667,7 @@ Willem van der Schyff <willemvds@gmail.com>
William Chan <willchan@chromium.org>
William Chang <mr.williamchang@gmail.com>
William Josephson <wjosephson@gmail.com>
William Langford <wlangfor@gmail.com>
William Orr <will@worrbase.com> <ay1244@gmail.com>
William Poussier <william.poussier@gmail.com>
Wisdom Omuya <deafgoat@gmail.com>
@ -2550,6 +2676,7 @@ Xi Ruoyao <xry23333@gmail.com>
Xia Bin <snyh@snyh.org>
Xiangdong Ji <xiangdong.ji@arm.com>
Xiaodong Liu <teaofmoli@gmail.com>
Xing Gao <18340825824@163.com>
Xing Xing <mikespook@gmail.com>
Xingqang Bai <bxq2011hust@qq.com>
Xu Fei <badgangkiller@gmail.com>
@ -2571,6 +2698,7 @@ Yasha Bubnov <girokompass@gmail.com>
Yasser Abdolmaleki <yasser@yasser.ca>
Yasuharu Goto <matope.ono@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yasutaka Shinzaki <shinzaki@yasu26.tech>
Yasuyuki Oka <yasuyk@gmail.com>
Yazen Shunnar <yazen.shunnar@gmail.com>
Yestin Sun <ylh@pdx.edu>
@ -2583,14 +2711,18 @@ Yorman Arias <cixtords@gmail.com>
Yoshiyuki Kanno <nekotaroh@gmail.com> <yoshiyuki.kanno@stoic.co.jp>
Yoshiyuki Mineo <yoshiyuki.mineo@gmail.com>
Yosuke Akatsuka <yosuke.akatsuka@gmail.com>
Youfu Zhang <zhangyoufu@gmail.com>
Yu Heng Zhang <annita.zhang@cn.ibm.com>
Yu Xuan Zhang <zyxsh@cn.ibm.com>
Yu, Li-Yu <afg984@gmail.com>
Yuichi Kishimoto <yk2220s@gmail.com>
Yuichi Nishiwaki <yuichi.nishiwaki@gmail.com>
Yuji Yaginuma <yuuji.yaginuma@gmail.com>
Yuki Ito <mrno110y@gmail.com>
Yuki OKUSHI <huyuumi.dev@gmail.com>
Yuki Yugui Sonoda <yugui@google.com>
Yukihiro Nishinaka <6elpinal@gmail.com>
YunQiang Su <syq@debian.org>
Yury Smolsky <yury@smolsky.by>
Yusuke Kagiwada <block.rxckin.beats@gmail.com>
Yuusei Kuwana <kuwana@kumama.org>
@ -2599,6 +2731,7 @@ Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>
Zach Bintliff <zbintliff@gmail.com>
Zach Gershman <zachgersh@gmail.com>
Zach Hoffman <zrhoffman@apache.org>
Zach Jones <zachj1@gmail.com>
Zachary Amsden <zach@thundertoken.com>
Zachary Gershman <zgershman@pivotal.io>
@ -2617,6 +2750,7 @@ Zhou Peng <p@ctriple.cn>
Ziad Hatahet <hatahet@gmail.com>
Ziheng Liu <lzhfromustc@gmail.com>
Zorion Arrizabalaga <zorionk@gmail.com>
Zvonimir Pavlinovic <zpavlinovic@google.com>
Zyad A. Ali <zyad.ali.me@gmail.com>
Максадбек Ахмедов <a.maksadbek@gmail.com>
Максим Федосеев <max.faceless.frei@gmail.com>

195
api/go1.17.txt Normal file
View file

@ -0,0 +1,195 @@
pkg archive/zip, method (*File) OpenRaw() (io.Reader, error)
pkg archive/zip, method (*Writer) Copy(*File) error
pkg archive/zip, method (*Writer) CreateRaw(*FileHeader) (io.Writer, error)
pkg compress/lzw, method (*Reader) Close() error
pkg compress/lzw, method (*Reader) Read([]uint8) (int, error)
pkg compress/lzw, method (*Reader) Reset(io.Reader, Order, int)
pkg compress/lzw, method (*Writer) Close() error
pkg compress/lzw, method (*Writer) Reset(io.Writer, Order, int)
pkg compress/lzw, method (*Writer) Write([]uint8) (int, error)
pkg compress/lzw, type Reader struct
pkg compress/lzw, type Writer struct
pkg crypto/tls, method (*CertificateRequestInfo) Context() context.Context
pkg crypto/tls, method (*ClientHelloInfo) Context() context.Context
pkg crypto/tls, method (*Conn) HandshakeContext(context.Context) error
pkg database/sql, method (*NullByte) Scan(interface{}) error
pkg database/sql, method (*NullInt16) Scan(interface{}) error
pkg database/sql, method (NullByte) Value() (driver.Value, error)
pkg database/sql, method (NullInt16) Value() (driver.Value, error)
pkg database/sql, type NullByte struct
pkg database/sql, type NullByte struct, Byte uint8
pkg database/sql, type NullByte struct, Valid bool
pkg database/sql, type NullInt16 struct
pkg database/sql, type NullInt16 struct, Int16 int16
pkg database/sql, type NullInt16 struct, Valid bool
pkg debug/elf, const SHT_MIPS_ABIFLAGS = 1879048234
pkg debug/elf, const SHT_MIPS_ABIFLAGS SectionType
pkg encoding/csv, method (*Reader) FieldPos(int) (int, int)
pkg go/build, type Context struct, ToolTags []string
pkg go/parser, const SkipObjectResolution = 64
pkg go/parser, const SkipObjectResolution Mode
pkg image, method (*Alpha) RGBA64At(int, int) color.RGBA64
pkg image, method (*Alpha) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*Alpha16) RGBA64At(int, int) color.RGBA64
pkg image, method (*Alpha16) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*CMYK) RGBA64At(int, int) color.RGBA64
pkg image, method (*CMYK) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*Gray) RGBA64At(int, int) color.RGBA64
pkg image, method (*Gray) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*Gray16) RGBA64At(int, int) color.RGBA64
pkg image, method (*Gray16) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*NRGBA) RGBA64At(int, int) color.RGBA64
pkg image, method (*NRGBA) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*NRGBA64) RGBA64At(int, int) color.RGBA64
pkg image, method (*NRGBA64) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*NYCbCrA) RGBA64At(int, int) color.RGBA64
pkg image, method (*Paletted) RGBA64At(int, int) color.RGBA64
pkg image, method (*Paletted) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*RGBA) RGBA64At(int, int) color.RGBA64
pkg image, method (*RGBA) SetRGBA64(int, int, color.RGBA64)
pkg image, method (*Uniform) RGBA64At(int, int) color.RGBA64
pkg image, method (*YCbCr) RGBA64At(int, int) color.RGBA64
pkg image, method (Rectangle) RGBA64At(int, int) color.RGBA64
pkg image, type RGBA64Image interface { At, Bounds, ColorModel, RGBA64At }
pkg image, type RGBA64Image interface, At(int, int) color.Color
pkg image, type RGBA64Image interface, Bounds() Rectangle
pkg image, type RGBA64Image interface, ColorModel() color.Model
pkg image, type RGBA64Image interface, RGBA64At(int, int) color.RGBA64
pkg image/draw, type RGBA64Image interface { At, Bounds, ColorModel, RGBA64At, Set, SetRGBA64 }
pkg image/draw, type RGBA64Image interface, At(int, int) color.Color
pkg image/draw, type RGBA64Image interface, Bounds() image.Rectangle
pkg image/draw, type RGBA64Image interface, ColorModel() color.Model
pkg image/draw, type RGBA64Image interface, RGBA64At(int, int) color.RGBA64
pkg image/draw, type RGBA64Image interface, Set(int, int, color.Color)
pkg image/draw, type RGBA64Image interface, SetRGBA64(int, int, color.RGBA64)
pkg io/fs, func FileInfoToDirEntry(FileInfo) DirEntry
pkg math, const MaxFloat64 = 1.79769e+308 // 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368
pkg math, const MaxInt = 9223372036854775807
pkg math, const MaxInt ideal-int
pkg math, const MaxUint = 18446744073709551615
pkg math, const MaxUint ideal-int
pkg math, const MinInt = -9223372036854775808
pkg math, const MinInt ideal-int
pkg math, const SmallestNonzeroFloat32 = 1.4013e-45 // 1/713623846352979940529142984724747568191373312
pkg math, const SmallestNonzeroFloat64 = 4.94066e-324 // 1/202402253307310618352495346718917307049556649764142118356901358027430339567995346891960383701437124495187077864316811911389808737385793476867013399940738509921517424276566361364466907742093216341239767678472745068562007483424692698618103355649159556340810056512358769552333414615230502532186327508646006263307707741093494784
pkg net, method (*ParseError) Temporary() bool
pkg net, method (*ParseError) Timeout() bool
pkg net, method (IP) IsPrivate() bool
pkg net/http, func AllowQuerySemicolons(Handler) Handler
pkg net/url, method (Values) Has(string) bool
pkg reflect, func VisibleFields(Type) []StructField
pkg reflect, method (Method) IsExported() bool
pkg reflect, method (StructField) IsExported() bool
pkg reflect, method (Value) CanConvert(Type) bool
pkg runtime/cgo (darwin-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (darwin-amd64-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-386-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-amd64-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-arm-cgo), type Handle uintptr
pkg runtime/cgo (linux-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-386-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-386-cgo), type Handle uintptr
pkg runtime/cgo (linux-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-amd64-cgo), type Handle uintptr
pkg runtime/cgo (linux-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-arm-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-386-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-amd64-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-arm-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-arm64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-arm64-cgo), type Handle uintptr
pkg runtime/cgo (openbsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (openbsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (openbsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (openbsd-386-cgo), type Handle uintptr
pkg runtime/cgo (openbsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (openbsd-amd64-cgo), type Handle uintptr
pkg strconv, func QuotedPrefix(string) (string, error)
pkg sync/atomic, method (*Value) CompareAndSwap(interface{}, interface{}) bool
pkg sync/atomic, method (*Value) Swap(interface{}) interface{}
pkg syscall (netbsd-386), const SYS_WAIT6 = 481
pkg syscall (netbsd-386), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-386), const WEXITED = 32
pkg syscall (netbsd-386), const WEXITED ideal-int
pkg syscall (netbsd-386-cgo), const SYS_WAIT6 = 481
pkg syscall (netbsd-386-cgo), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-386-cgo), const WEXITED = 32
pkg syscall (netbsd-386-cgo), const WEXITED ideal-int
pkg syscall (netbsd-amd64), const SYS_WAIT6 = 481
pkg syscall (netbsd-amd64), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-amd64), const WEXITED = 32
pkg syscall (netbsd-amd64), const WEXITED ideal-int
pkg syscall (netbsd-amd64-cgo), const SYS_WAIT6 = 481
pkg syscall (netbsd-amd64-cgo), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-amd64-cgo), const WEXITED = 32
pkg syscall (netbsd-amd64-cgo), const WEXITED ideal-int
pkg syscall (netbsd-arm), const SYS_WAIT6 = 481
pkg syscall (netbsd-arm), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-arm), const WEXITED = 32
pkg syscall (netbsd-arm), const WEXITED ideal-int
pkg syscall (netbsd-arm-cgo), const SYS_WAIT6 = 481
pkg syscall (netbsd-arm-cgo), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-arm-cgo), const WEXITED = 32
pkg syscall (netbsd-arm-cgo), const WEXITED ideal-int
pkg syscall (netbsd-arm64), const SYS_WAIT6 = 481
pkg syscall (netbsd-arm64), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-arm64), const WEXITED = 32
pkg syscall (netbsd-arm64), const WEXITED ideal-int
pkg syscall (netbsd-arm64-cgo), const SYS_WAIT6 = 481
pkg syscall (netbsd-arm64-cgo), const SYS_WAIT6 ideal-int
pkg syscall (netbsd-arm64-cgo), const WEXITED = 32
pkg syscall (netbsd-arm64-cgo), const WEXITED ideal-int
pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (windows-386), type SysProcAttr struct, AdditionalInheritedHandles []Handle
pkg syscall (windows-386), type SysProcAttr struct, ParentProcess Handle
pkg syscall (windows-amd64), type SysProcAttr struct, AdditionalInheritedHandles []Handle
pkg syscall (windows-amd64), type SysProcAttr struct, ParentProcess Handle
pkg testing, method (*B) Setenv(string, string)
pkg testing, method (*T) Setenv(string, string)
pkg testing, type TB interface, Setenv(string, string)
pkg text/template/parse, const SkipFuncCheck = 2
pkg text/template/parse, const SkipFuncCheck Mode
pkg time, const Layout = "01/02 03:04:05PM '06 -0700"
pkg time, const Layout ideal-string
pkg time, func UnixMicro(int64) Time
pkg time, func UnixMilli(int64) Time
pkg time, method (Time) GoString() string
pkg time, method (Time) IsDST() bool
pkg time, method (Time) UnixMicro() int64
pkg time, method (Time) UnixMilli() int64

View file

@ -1,99 +0,0 @@
pkg compress/lzw, method (*Reader) Close() error
pkg compress/lzw, method (*Reader) Read([]uint8) (int, error)
pkg compress/lzw, method (*Reader) Reset(io.Reader, Order, int)
pkg compress/lzw, method (*Writer) Close() error
pkg compress/lzw, method (*Writer) Reset(io.Writer, Order, int)
pkg compress/lzw, method (*Writer) Write([]uint8) (int, error)
pkg compress/lzw, type Reader struct
pkg compress/lzw, type Writer struct
pkg crypto/tls, method (*CertificateRequestInfo) Context() context.Context
pkg crypto/tls, method (*ClientHelloInfo) Context() context.Context
pkg crypto/tls, method (*Conn) HandshakeContext(context.Context) error
pkg debug/elf, const SHT_MIPS_ABIFLAGS = 1879048234
pkg debug/elf, const SHT_MIPS_ABIFLAGS SectionType
pkg encoding/csv, method (*Reader) FieldPos(int) (int, int)
pkg go/ast, method (*FuncDecl) IsMethod() bool
pkg go/build, type Context struct, ToolTags []string
pkg go/parser, const SkipObjectResolution = 64
pkg go/parser, const SkipObjectResolution Mode
pkg go/types, type Config struct, GoVersion string
pkg io/fs, func FileInfoToDirEntry(FileInfo) DirEntry
pkg net, method (*ParseError) Temporary() bool
pkg net, method (*ParseError) Timeout() bool
pkg net, method (IP) IsPrivate() bool
pkg reflect, func VisibleFields(Type) []StructField
pkg reflect, method (Method) IsExported() bool
pkg reflect, method (StructField) IsExported() bool
pkg runtime/cgo (darwin-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (darwin-amd64-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-386-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-amd64-cgo), type Handle uintptr
pkg runtime/cgo (freebsd-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (freebsd-arm-cgo), type Handle uintptr
pkg runtime/cgo (linux-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-386-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-386-cgo), type Handle uintptr
pkg runtime/cgo (linux-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-amd64-cgo), type Handle uintptr
pkg runtime/cgo (linux-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (linux-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (linux-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (linux-arm-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-386-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-amd64-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-arm-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-arm-cgo), type Handle uintptr
pkg runtime/cgo (netbsd-arm64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Delete()
pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (netbsd-arm64-cgo), type Handle uintptr
pkg runtime/cgo (openbsd-386-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (openbsd-386-cgo), method (Handle) Delete()
pkg runtime/cgo (openbsd-386-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (openbsd-386-cgo), type Handle uintptr
pkg runtime/cgo (openbsd-amd64-cgo), func NewHandle(interface{}) Handle
pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Delete()
pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Value() interface{}
pkg runtime/cgo (openbsd-amd64-cgo), type Handle uintptr
pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC = 2048
pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC ideal-int
pkg syscall (windows-386), type SysProcAttr struct, AdditionalInheritedHandles []Handle
pkg syscall (windows-386), type SysProcAttr struct, ParentProcess Handle
pkg syscall (windows-amd64), type SysProcAttr struct, AdditionalInheritedHandles []Handle
pkg syscall (windows-amd64), type SysProcAttr struct, ParentProcess Handle
pkg testing, method (*B) Setenv(string, string)
pkg testing, method (*T) Setenv(string, string)
pkg text/template/parse, const SkipFuncCheck = 2
pkg text/template/parse, const SkipFuncCheck Mode
pkg time, func UnixMicro(int64) Time
pkg time, func UnixMilli(int64) Time
pkg time, method (*Time) IsDST() bool
pkg time, method (Time) UnixMicro() int64
pkg time, method (Time) UnixMilli() int64

View file

@ -166,7 +166,7 @@ jumps and branches.
</li>
<li>
<code>SP</code>: Stack pointer: top of stack.
<code>SP</code>: Stack pointer: the highest address within the local stack frame.
</li>
</ul>
@ -216,7 +216,7 @@ If a Go prototype does not name its result, the expected assembly name is <code>
The <code>SP</code> pseudo-register is a virtual stack pointer
used to refer to frame-local variables and the arguments being
prepared for function calls.
It points to the top of the local stack frame, so references should use negative offsets
It points to the highest address within the local stack frame, so references should use negative offsets
in the range [framesize, 0):
<code>x-8(SP)</code>, <code>y-4(SP)</code>, and so on.
</p>
@ -409,7 +409,7 @@ The linker will choose one of the duplicates to use.
(For <code>TEXT</code> items.)
Don't insert the preamble to check if the stack must be split.
The frame for the routine, plus anything it calls, must fit in the
spare space at the top of the stack segment.
spare space remaining in the current stack segment.
Used to protect routines such as the stack splitting code itself.
</li>
<li>
@ -460,7 +460,7 @@ Only valid on functions that declare a frame size of 0.
<code>TOPFRAME</code> = 2048
<br>
(For <code>TEXT</code> items.)
Function is the top of the call stack. Traceback should stop at this function.
Function is the outermost frame of the call stack. Traceback should stop at this function.
</li>
</ul>
@ -827,10 +827,6 @@ The other codes are <code>-&gt;</code> (arithmetic right shift),
<h3 id="arm64">ARM64</h3>
<p>
The ARM64 port is in an experimental state.
</p>
<p>
<code>R18</code> is the "platform register", reserved on the Apple platform.
To prevent accidental misuse, the register is named <code>R18_PLATFORM</code>.

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Apr 28, 2021",
"Subtitle": "Version of Jul 26, 2021",
"Path": "/ref/spec"
}-->
@ -490,8 +490,8 @@ After a backslash, certain single-character escapes represent special values:
\n U+000A line feed or newline
\r U+000D carriage return
\t U+0009 horizontal tab
\v U+000b vertical tab
\\ U+005c backslash
\v U+000B vertical tab
\\ U+005C backslash
\' U+0027 single quote (valid escape only within rune literals)
\" U+0022 double quote (valid escape only within string literals)
</pre>
@ -4329,12 +4329,16 @@ a <a href="#Run_time_panics">run-time panic</a> occurs.
<pre>
s := make([]byte, 2, 4)
s0 := (*[0]byte)(s) // s0 != nil
s1 := (*[1]byte)(s[1:]) // &amp;s1[0] == &amp;s[1]
s2 := (*[2]byte)(s) // &amp;s2[0] == &amp;s[0]
s4 := (*[4]byte)(s) // panics: len([4]byte) > len(s)
var t []string
t0 := (*[0]string)(t) // t0 == nil
t1 := (*[1]string)(t) // panics: len([1]string) > len(s)
t1 := (*[1]string)(t) // panics: len([1]string) > len(t)
u := make([]byte, 0)
u0 = (*[0]byte)(u) // u0 != nil
</pre>
<h3 id="Constant_expressions">Constant expressions</h3>
@ -4670,7 +4674,7 @@ The following built-in functions are not permitted in statement context:
<pre>
append cap complex imag len make new real
unsafe.Alignof unsafe.Offsetof unsafe.Sizeof
unsafe.Add unsafe.Alignof unsafe.Offsetof unsafe.Sizeof unsafe.Slice
</pre>
<pre>
@ -4909,7 +4913,7 @@ if x := f(); x &lt; y {
<p>
"Switch" statements provide multi-way execution.
An expression or type specifier is compared to the "cases"
An expression or type is compared to the "cases"
inside the "switch" to determine which branch
to execute.
</p>
@ -5020,7 +5024,7 @@ floating point, or string constants in case expressions.
A type switch compares types rather than values. It is otherwise similar
to an expression switch. It is marked by a special switch expression that
has the form of a <a href="#Type_assertions">type assertion</a>
using the reserved word <code>type</code> rather than an actual type:
using the keyword <code>type</code> rather than an actual type:
</p>
<pre>
@ -6782,18 +6786,26 @@ The rules for <a href="/pkg/unsafe#Pointer">valid uses</a> of <code>Pointer</cod
<p>
The function <code>Slice</code> returns a slice whose underlying array starts at <code>ptr</code>
and whose length and capacity are <code>len</code>:
and whose length and capacity are <code>len</code>.
<code>Slice(ptr, len)</code> is equivalent to
</p>
<pre>
(*[len]ArbitraryType)(unsafe.Pointer(ptr))[:]
</pre>
<p>
except that, as a special case, if <code>ptr</code>
is <code>nil</code> and <code>len</code> is zero,
<code>Slice</code> returns <code>nil</code>.
</p>
<p>
The <code>len</code> argument must be of integer type or an untyped <a href="#Constants">constant</a>.
A constant <code>len</code> argument must be non-negative and <a href="#Representability">representable</a> by a value of type <code>int</code>;
if it is an untyped constant it is given type <code>int</code>.
If <code>ptr</code> is <code>nil</code> or <code>len</code> is negative at run time,
At run time, if <code>len</code> is negative,
or if <code>ptr</code> is <code>nil</code> and <code>len</code> is not zero,
a <a href="#Run_time_panics">run-time panic</a> occurs.
</p>

View file

@ -4,7 +4,7 @@ The IANA asserts that the database is in the public domain.
For more information, see
https://www.iana.org/time-zones
ftp://ftp.iana.org/tz/code/tz-link.htm
http://tools.ietf.org/html/rfc6557
ftp://ftp.iana.org/tz/code/tz-link.html
https://datatracker.ietf.org/doc/html/rfc6557
To rebuild the archive, read and run update.bash.

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// This program can be used as go_android_GOARCH_exec by the Go tool.

View file

@ -40,7 +40,8 @@ func check(t *testing.T, file string) {
if len(frags) == 1 {
continue
}
re, err := regexp.Compile(string(frags[1]))
frag := fmt.Sprintf(":%d:.*%s", i+1, frags[1])
re, err := regexp.Compile(frag)
if err != nil {
t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frags[1])
continue

View file

@ -40,15 +40,15 @@ func main() {
C.foop = x // ERROR HERE
// issue 13129: used to output error about C.unsignedshort with CC=clang
var x C.ushort
x = int(0) // ERROR HERE: C\.ushort
var x1 C.ushort
x1 = int(0) // ERROR HERE: C\.ushort
// issue 13423
_ = C.fopen() // ERROR HERE
// issue 13467
var x rune = '✈'
var _ rune = C.transform(x) // ERROR HERE: C\.int
var x2 rune = '✈'
var _ rune = C.transform(x2) // ERROR HERE: C\.int
// issue 13635: used to output error about C.unsignedchar.
// This test tests all such types.
@ -91,10 +91,18 @@ func main() {
// issue 26745
_ = func(i int) int {
return C.i + 1 // ERROR HERE: :13
// typecheck reports at column 14 ('+'), but types2 reports at
// column 10 ('C').
// TODO(mdempsky): Investigate why, and see if types2 can be
// updated to match typecheck behavior.
return C.i + 1 // ERROR HERE: \b(10|14)\b
}
_ = func(i int) {
C.fi(i) // ERROR HERE: :6
// typecheck reports at column 7 ('('), but types2 reports at
// column 8 ('i'). The types2 position is more correct, but
// updating typecheck here is fundamentally challenging because of
// IR limitations.
C.fi(i) // ERROR HERE: \b(7|8)\b
}
C.fi = C.fi // ERROR HERE

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// Compute Fibonacci numbers with two goroutines

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
package main

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && freebsd && openbsd
// +build linux,freebsd,openbsd
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && cgo
// +build linux,cgo
package cgotest
@ -9,6 +10,7 @@ package cgotest
import (
"fmt"
"os"
"sort"
"strings"
"syscall"
"testing"
@ -105,12 +107,24 @@ func compareStatus(filter, expect string) error {
// "Pid:\t".
}
if strings.HasPrefix(line, filter) {
if line != expected {
return fmt.Errorf("%q got:%q want:%q (bad) [pid=%d file:'%s' %v]\n", tf, line, expected, pid, string(d), expectedProc)
}
if line == expected {
foundAThread = true
break
}
if filter == "Groups:" && strings.HasPrefix(line, "Groups:\t") {
// https://github.com/golang/go/issues/46145
// Containers don't reliably output this line in sorted order so manually sort and compare that.
a := strings.Split(line[8:], " ")
sort.Strings(a)
got := strings.Join(a, " ")
if got == expected[8:] {
foundAThread = true
break
}
}
return fmt.Errorf("%q got:%q want:%q (bad) [pid=%d file:'%s' %v]\n", tf, line, expected, pid, string(d), expectedProc)
}
}
}
if !foundAThread {

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
// Issue 18146: pthread_create failure during syscall.Exec.

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && cgo && !internal
// +build darwin,cgo,!internal
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !darwin || !cgo || internal
// +build !darwin !cgo internal
package cgotest

View file

@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !windows,!static
//go:build !windows && !static && (!darwin || (!internal_pie && !arm64))
// +build !windows
// +build !static
// +build !darwin !internal_pie,!arm64
// Excluded in darwin internal linking PIE mode, as dynamic export is not

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows || static || (darwin && internal_pie) || (darwin && arm64)
// +build windows static darwin,internal_pie darwin,arm64
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !android
// +build !android
// Test that pthread_cancel works as expected

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !android
// +build !android
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows && !android
// +build !windows,!android
// Test that the Go runtime still works if C code changes the signal stack.

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package cgotest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package cgotest

View file

@ -292,11 +292,60 @@ func createHeaders() error {
"-installsuffix", "testcshared",
"-o", libgoname,
filepath.Join(".", "libgo", "libgo.go")}
if GOOS == "windows" && strings.HasSuffix(args[6], ".a") {
args[6] = strings.TrimSuffix(args[6], ".a") + ".dll"
}
cmd = exec.Command(args[0], args[1:]...)
out, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out)
}
if GOOS == "windows" {
// We can't simply pass -Wl,--out-implib, because this relies on having imports from multiple packages,
// which results in the linkers output implib getting overwritten at each step. So instead build the
// import library the traditional way, using a def file.
err = os.WriteFile("libgo.def",
[]byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n\t_cgo_dummy_export\n"),
0644)
if err != nil {
return fmt.Errorf("unable to write def file: %v", err)
}
out, err = exec.Command(cc[0], append(cc[1:], "-print-prog-name=dlltool")...).CombinedOutput()
if err != nil {
return fmt.Errorf("unable to find dlltool path: %v\n%s\n", err, out)
}
args := []string{strings.TrimSpace(string(out)), "-D", args[6], "-l", libgoname, "-d", "libgo.def"}
// This is an unfortunate workaround for https://github.com/mstorsjo/llvm-mingw/issues/205 in which
// we basically reimplement the contents of the dlltool.sh wrapper: https://git.io/JZFlU
dlltoolContents, err := os.ReadFile(args[0])
if err != nil {
return fmt.Errorf("unable to read dlltool: %v\n", err)
}
if bytes.HasPrefix(dlltoolContents, []byte("#!/bin/sh")) && bytes.Contains(dlltoolContents, []byte("llvm-dlltool")) {
base, name := filepath.Split(args[0])
args[0] = filepath.Join(base, "llvm-dlltool")
var machine string
switch strings.SplitN(name, "-", 2)[0] {
case "i686":
machine = "i386"
case "x86_64":
machine = "i386:x86-64"
case "armv7":
machine = "arm"
case "aarch64":
machine = "arm64"
}
if len(machine) > 0 {
args = append(args, "-m", machine)
}
}
out, err = exec.Command(args[0], args[1:]...).CombinedOutput()
if err != nil {
return fmt.Errorf("unable to run dlltool to create import library: %v\n%s\n", err, out)
}
}
if runtime.GOOS != GOOS && GOOS == "android" {
args = append(adbCmd(), "push", libgoname, fmt.Sprintf("%s/%s", androiddir, libgoname))
@ -400,7 +449,7 @@ func main() {
defer f.Close()
section := f.Section(".edata")
if section == nil {
t.Fatalf(".edata section is not present")
t.Skip(".edata section is not present")
}
// TODO: deduplicate this struct from cmd/link/internal/ld/pe.go
@ -749,7 +798,12 @@ func TestGo2C2Go(t *testing.T) {
defer os.RemoveAll(tmpdir)
lib := filepath.Join(tmpdir, "libtestgo2c2go."+libSuffix)
run(t, nil, "go", "build", "-buildmode=c-shared", "-o", lib, "./go2c2go/go")
var env []string
if GOOS == "windows" && strings.HasSuffix(lib, ".a") {
env = append(env, "CGO_LDFLAGS=-Wl,--out-implib,"+lib, "CGO_LDFLAGS_ALLOW=.*")
lib = strings.TrimSuffix(lib, ".a") + ".dll"
}
run(t, env, "go", "build", "-buildmode=c-shared", "-o", lib, "./go2c2go/go")
cgoCflags := os.Getenv("CGO_CFLAGS")
if cgoCflags != "" {

View file

@ -263,6 +263,17 @@ func TestIssue25756(t *testing.T) {
}
}
// Test with main using -buildmode=pie with plugin for issue #43228
func TestIssue25756pie(t *testing.T) {
if os.Getenv("GO_BUILDER_NAME") == "darwin-arm64-11_0-toothrot" {
t.Skip("broken on darwin/arm64 builder in sharded mode; see issue 46239")
}
goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin")
goCmd(t, "build", "-buildmode=pie", "-o", "issue25756pie.exe", "./issue25756/main.go")
run(t, "./issue25756pie.exe")
}
func TestMethod(t *testing.T) {
// Exported symbol's method must be live.
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./method/plugin.go")

View file

@ -42,6 +42,7 @@ func TestMSAN(t *testing.T) {
{src: "msan5.go"},
{src: "msan6.go"},
{src: "msan7.go"},
{src: "msan8.go"},
{src: "msan_fail.go", wantErr: true},
}
for _, tc := range cases {

View file

@ -0,0 +1,109 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <pthread.h>
#include <signal.h>
#include <stdint.h>
#include <sanitizer/msan_interface.h>
// cgoTracebackArg is the type of the argument passed to msanGoTraceback.
struct cgoTracebackArg {
uintptr_t context;
uintptr_t sigContext;
uintptr_t* buf;
uintptr_t max;
};
// msanGoTraceback is registered as the cgo traceback function.
// This will be called when a signal occurs.
void msanGoTraceback(void* parg) {
struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg);
arg->buf[0] = 0;
}
// msanGoWait will be called with all registers undefined as far as
// msan is concerned. It just waits for a signal.
// Because the registers are msan-undefined, the signal handler will
// be invoked with all registers msan-undefined.
__attribute__((noinline))
void msanGoWait(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6) {
sigset_t mask;
sigemptyset(&mask);
sigsuspend(&mask);
}
// msanGoSignalThread is the thread ID of the msanGoLoop thread.
static pthread_t msanGoSignalThread;
// msanGoSignalThreadSet is used to record that msanGoSignalThread
// has been initialized. This is accessed atomically.
static int32_t msanGoSignalThreadSet;
// uninit is explicitly poisoned, so that we can make all registers
// undefined by calling msanGoWait.
static unsigned long uninit;
// msanGoLoop loops calling msanGoWait, with the arguments passed
// such that msan thinks that they are undefined. msan permits
// undefined values to be used as long as they are not used to
// for conditionals or for memory access.
void msanGoLoop() {
int i;
msanGoSignalThread = pthread_self();
__atomic_store_n(&msanGoSignalThreadSet, 1, __ATOMIC_SEQ_CST);
// Force uninit to be undefined for msan.
__msan_poison(&uninit, sizeof uninit);
for (i = 0; i < 100; i++) {
msanGoWait(uninit, uninit, uninit, uninit, uninit, uninit);
}
}
// msanGoReady returns whether msanGoSignalThread is set.
int msanGoReady() {
return __atomic_load_n(&msanGoSignalThreadSet, __ATOMIC_SEQ_CST) != 0;
}
// msanGoSendSignal sends a signal to the msanGoLoop thread.
void msanGoSendSignal() {
pthread_kill(msanGoSignalThread, SIGWINCH);
}
*/
import "C"
import (
"runtime"
"time"
)
func main() {
runtime.SetCgoTraceback(0, C.msanGoTraceback, nil, nil)
c := make(chan bool)
go func() {
defer func() { c <- true }()
C.msanGoLoop()
}()
for C.msanGoReady() == 0 {
time.Sleep(time.Microsecond)
}
loop:
for {
select {
case <-c:
break loop
default:
C.msanGoSendSignal()
time.Sleep(time.Microsecond)
}
}
}

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !cgo
// +build !cgo
package so_test

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package so_test

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !cgo
// +build !cgo
package so_test

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo
// +build cgo
package so_test

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package cgotlstest

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// detect attempts to autodetect the correct

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build explicit
// +build explicit
// Package experiment_toolid_test verifies that GOEXPERIMENT settings built

View file

@ -401,6 +401,7 @@
storeValue(sp + 56, result);
this.mem.setUint8(sp + 64, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, err);
this.mem.setUint8(sp + 64, 0);
}
@ -417,6 +418,7 @@
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}
@ -433,6 +435,7 @@
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}

View file

@ -96,7 +96,15 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
return err
}
z.r = r
// Since the number of directory records is not validated, it is not
// safe to preallocate z.File without first checking that the specified
// number of files is reasonable, since a malformed archive may
// indicate it contains up to 1 << 128 - 1 files. Since each file has a
// header which will be _at least_ 30 bytes we can safely preallocate
// if (data size / 30) >= end.directoryRecords.
if (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
z.File = make([]*File, 0, end.directoryRecords)
}
z.Comment = end.comment
rs := io.NewSectionReader(r, 0, size)
if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {

View file

@ -1325,3 +1325,62 @@ func TestReadDataDescriptor(t *testing.T) {
})
}
}
func TestCVE202133196(t *testing.T) {
// Archive that indicates it has 1 << 128 -1 files,
// this would previously cause a panic due to attempting
// to allocate a slice with 1 << 128 -1 elements.
data := []byte{
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x02,
0x03, 0x62, 0x61, 0x65, 0x03, 0x04, 0x00, 0x00,
0xff, 0xff, 0x50, 0x4b, 0x07, 0x08, 0xbe, 0x20,
0x5c, 0x6c, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00,
0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0xbe, 0x20, 0x5c, 0x6c, 0x09, 0x00,
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x02, 0x03, 0x50, 0x4b, 0x06, 0x06, 0x2c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d,
0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x31, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00,
0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x00, 0x00,
}
_, err := NewReader(bytes.NewReader(data), int64(len(data)))
if err != ErrFormat {
t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
}
// Also check that an archive containing a handful of empty
// files doesn't cause an issue
b := bytes.NewBuffer(nil)
w := NewWriter(b)
for i := 0; i < 5; i++ {
_, err := w.Create("")
if err != nil {
t.Fatalf("Writer.Create failed: %s", err)
}
}
if err := w.Close(); err != nil {
t.Fatalf("Writer.Close failed: %s", err)
}
r, err := NewReader(bytes.NewReader(b.Bytes()), int64(b.Len()))
if err != nil {
t.Fatalf("NewReader failed: %s", err)
}
if len(r.File) != 5 {
t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
}
}

View file

@ -1003,7 +1003,8 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) {
p.errorf("unimplemented two-register form")
}
a.Index = r1
if scale != 0 && p.arch.Family == sys.ARM64 {
if scale != 0 && scale != 1 && p.arch.Family == sys.ARM64 {
// Support (R1)(R2) (no scaling) and (R1)(R2*1).
p.errorf("arm64 doesn't support scaled register format")
} else {
a.Scale = int16(scale)

View file

@ -89,7 +89,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
CMP R1<<33, R2
CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff433b6b
CCMN MI, ZR, R1, $4 // e44341ba
// MADD Rn,Rm,Ra,Rd
MADD R1, R2, R3, R4 // 6408019b
@ -377,6 +377,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $0x1000100010001000, RSP // MOVD $1152939097061330944, RSP // ff8304b2
MOVW $0x10001000, RSP // MOVW $268439552, RSP // ff830432
ADDW $0x10001000, R1 // ADDW $268439552, R1 // fb83043221001b0b
ADDW $0x22220000, RSP, R3 // ADDW $572653568, RSP, R3 // 5b44a452e3433b0b
// move a large constant to a Vd.
VMOVS $0x80402010, V11 // VMOVS $2151686160, V11
@ -547,6 +548,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
// shifted or extended register offset.
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // 656866f8
MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // 446866f8
MOVWU (R19)(R20<<2), R20 // 747a74b8
MOVD (R2)(R6<<3), R4 // 447866f8
@ -579,6 +581,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVB R4, (R2)(R6.SXTX) // 44e82638
MOVB R8, (R3)(R9.UXTW) // 68482938
MOVB R10, (R5)(R8) // aa682838
MOVB R10, (R5)(R8*1) // aa682838
MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778
MOVH R5, (R1)(R2<<1) // 25782278
MOVH R7, (R2)(R5.SXTX<<1) // 47f82578

View file

@ -52,6 +52,16 @@ TEXT errors(SB),$0
NEGSW R7@>2, R5 // ERROR "unsupported shift operator"
CINC CS, R2, R3, R4 // ERROR "illegal combination"
CSEL LT, R1, R2 // ERROR "illegal combination"
CINC AL, R2, R3 // ERROR "invalid condition"
CINC NV, R2, R3 // ERROR "invalid condition"
CINVW AL, R2, R3 // ERROR "invalid condition"
CINV NV, R2, R3 // ERROR "invalid condition"
CNEG AL, R2, R3 // ERROR "invalid condition"
CNEGW NV, R2, R3 // ERROR "invalid condition"
CSET AL, R2 // ERROR "invalid condition"
CSET NV, R2 // ERROR "invalid condition"
CSETMW AL, R2 // ERROR "invalid condition"
CSETM NV, R2 // ERROR "invalid condition"
LDP.P 8(R2), (R2, R3) // ERROR "constrained unpredictable behavior"
LDP.W 8(R3), (R2, R3) // ERROR "constrained unpredictable behavior"
LDP (R1), (R2, R2) // ERROR "constrained unpredictable behavior"

View file

@ -41,8 +41,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
MOVDBR (R3)(R4), R5 // 7ca41c28
MOVWBR (R3)(R4), R5 // 7ca41c2c
MOVHBR (R3)(R4), R5 // 7ca41e2c
MOVD $foo+4009806848(FP), R5 // 3fe1ef0138bfcc20
MOVD $foo(SB), R5 // 3fe0000038bf0000
MOVD $foo+4009806848(FP), R5 // 3ca1ef0138a5cc20
MOVD $foo(SB), R5 // 3ca0000038a50000
MOVDU 8(R3), R4 // e8830009
MOVDU (R3)(R4), R5 // 7ca4186a

View file

@ -1638,6 +1638,8 @@ func (p *Package) gccCmd() []string {
c = append(c, "-maix64")
c = append(c, "-mcmodel=large")
}
// disable LTO so we get an object whose symbols we can read
c = append(c, "-fno-lto")
c = append(c, "-") //read input from standard input
return c
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Cgo; see gmp.go for an overview.
// Cgo; see doc.go for an overview.
// TODO(rsc):
// Emit correct line number annotations.

View file

@ -167,9 +167,19 @@ func (p *Package) writeDefs() {
if !cVars[n.C] {
if *gccgo {
fmt.Fprintf(fc, "extern byte *%s;\n", n.C)
} else {
// Force a reference to all symbols so that
// the external linker will add DT_NEEDED
// entries as needed on ELF systems.
// Treat function variables differently
// to avoid type confict errors from LTO
// (Link Time Optimization).
if n.Kind == "fpvar" {
fmt.Fprintf(fm, "extern void %s();\n", n.C)
} else {
fmt.Fprintf(fm, "extern char %s[];\n", n.C)
fmt.Fprintf(fm, "void *_cgohack_%s = %s;\n\n", n.C, n.C)
}
fmt.Fprintf(fgo2, "//go:linkname __cgo_%s %s\n", n.C, n.C)
fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", n.C)
fmt.Fprintf(fgo2, "var __cgo_%s byte\n", n.C)
@ -1042,7 +1052,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
// This unpacks the argument struct above and calls the Go function.
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a *%s) {\n", cPrefix, exp.ExpName, gotype)
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
fmt.Fprintf(fm, "void _cgoexp%s_%s(void* p){}\n", cPrefix, exp.ExpName)
if gccResult != "void" {
// Write results back to frame.

View file

@ -233,7 +233,7 @@ stack frame is laid out in the following sequence:
r1.x uintptr
r1.y [2]uintptr
a1Spill uint8
a2Spill uint8
a3Spill uint8
_ [6]uint8 // alignment padding
In the stack frame, only the `a2` field is initialized on entry; the
@ -402,7 +402,7 @@ without corrupting arguments or results.
Special-purpose registers are as follows:
| Register | Call meaning | Return meaning | Body meaning |
| --- | --- | --- |
| --- | --- | --- | --- |
| RSP | Stack pointer | Same | Same |
| RBP | Frame pointer | Same | Same |
| RDX | Closure context pointer | Scratch | Scratch |
@ -505,6 +505,128 @@ control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
### arm64 architecture
The arm64 architecture uses R0 R15 for integer arguments and results.
It uses F0 F15 for floating-point arguments and results.
*Rationale*: 16 integer registers and 16 floating-point registers are
more than enough for passing arguments and results for practically all
functions (see Appendix). While there are more registers available,
using more registers provides little benefit. Additionally, it will add
overhead on code paths where the number of arguments are not statically
known (e.g. reflect call), and will consume more stack space when there
is only limited stack space available to fit in the nosplit limit.
Registers R16 and R17 are permanent scratch registers. They are also
used as scratch registers by the linker (Go linker and external
linker) in trampolines.
Register R18 is reserved and never used. It is reserved for the OS
on some platforms (e.g. macOS).
Registers R19 R25 are permanent scratch registers. In addition,
R27 is a permanent scratch register used by the assembler when
expanding instructions.
Floating-point registers F16 F31 are also permanent scratch
registers.
Special-purpose registers are as follows:
| Register | Call meaning | Return meaning | Body meaning |
| --- | --- | --- | --- |
| RSP | Stack pointer | Same | Same |
| R30 | Link register | Same | Scratch (non-leaf functions) |
| R29 | Frame pointer | Same | Same |
| R28 | Current goroutine | Same | Same |
| R27 | Scratch | Scratch | Scratch |
| R26 | Closure context pointer | Scratch | Scratch |
| R18 | Reserved (not used) | Same | Same |
| ZR | Zero value | Same | Same |
*Rationale*: These register meanings are compatible with Gos
stack-based calling convention.
*Rationale*: The link register, R30, holds the function return
address at the function entry. For functions that have frames
(including most non-leaf functions), R30 is saved to stack in the
function prologue and restored in the epilogue. Within the function
body, R30 can be used as a scratch register.
*Implementation note*: Registers with fixed meaning at calls but not
in function bodies must be initialized by "injected" calls such as
signal-based panics.
#### Stack layout
The stack pointer, RSP, grows down and is always aligned to 16 bytes.
*Rationale*: The arm64 architecture requires the stack pointer to be
16-byte aligned.
A function's stack frame, after the frame is created, is laid out as
follows:
+------------------------------+
| ... locals ... |
| ... outgoing arguments ... |
| return PC | ← RSP points to
| frame pointer on entry |
+------------------------------+ ↓ lower addresses
The "return PC" is loaded to the link register, R30, as part of the
arm64 `CALL` operation.
On entry, a function subtracts from RSP to open its stack frame, and
saves the values of R30 and R29 at the bottom of the frame.
Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
after RSP is updated.
A leaf function that does not require any stack space may omit the
saved R30 and R29.
The Go ABI's use of R29 as a frame pointer register is compatible with
arm64 architecture requirement so that Go can inter-operate with platform
debuggers and profilers.
This stack layout is used by both register-based (ABIInternal) and
stack-based (ABI0) calling conventions.
#### Flags
The arithmetic status flags (NZCV) are treated like scratch registers
and not preserved across calls.
All other bits in PSTATE are system flags and are not modified by Go.
The floating-point status register (FPSR) is treated like scratch
registers and not preserved across calls.
At calls, the floating-point control register (FPCR) bits are always
set as follows:
| Flag | Bit | Value | Meaning |
| --- | --- | --- | --- |
| DN | 25 | 0 | Propagate NaN operands |
| FZ | 24 | 0 | Do not flush to zero |
| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
| IDE | 15 | 0 | Denormal operations trap disabled |
| IXE | 12 | 0 | Inexact trap disabled |
| UFE | 11 | 0 | Underflow trap disabled |
| OFE | 10 | 0 | Overflow trap disabled |
| DZE | 9 | 0 | Divide-by-zero trap disabled |
| IOE | 8 | 0 | Invalid operations trap disabled |
| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
| AH | 1 | 0 | No alternate handling of de-normal inputs |
| FIZ | 0 | 0 | Do not zero de-normals |
*Rationale*: Having a fixed FPCR control configuration allows Go
functions to use floating-point and vector (SIMD) operations without
modifying or saving the FPCR.
Functions are allowed to modify it between calls (as long as they
restore it), but as of this writing Go code never does.
## Future directions
### Spill path improvements

View file

@ -446,35 +446,20 @@ func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResul
return result
}
// parameterUpdateMu protects the Offset field of function/method parameters (a subset of structure Fields)
var parameterUpdateMu sync.Mutex
// FieldOffsetOf returns a concurency-safe version of f.Offset
func FieldOffsetOf(f *types.Field) int64 {
parameterUpdateMu.Lock()
defer parameterUpdateMu.Unlock()
return f.Offset
}
func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn, setNname bool) {
// Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
if !isReturn || len(a.Registers) == 0 {
// The type frame offset DOES NOT show effects of minimum frame size.
// Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
parameterUpdateMu.Lock()
defer parameterUpdateMu.Unlock()
off := a.FrameOffset(result)
fOffset := f.Offset
if fOffset == types.BOGUS_FUNARG_OFFSET {
// Set the Offset the first time. After that, we may recompute it, but it should never change.
f.Offset = off
if f.Nname != nil {
// always set it in this case.
if setNname && f.Nname != nil {
f.Nname.(*ir.Name).SetFrameOffset(off)
f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
}
} else if fOffset != off {
base.Fatalf("offset for %s at %s changed from %d to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset, off)
} else {
base.Fatalf("field offset for %s at %s has been set to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset)
}
} else {
if setNname && f.Nname != nil {

View file

@ -18,11 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
arch.LoadRegResults = loadRegResults
arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}

View file

@ -57,7 +57,6 @@ func dzDI(b int64) int64 {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
r13 = 1 << iota // if R13 is already zeroed.
x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
)
if cnt == 0 {
@ -85,11 +84,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
}
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
@ -98,10 +92,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
}
// Save DI to r12. With the amd64 Go register abi, DI can contain
// an incoming parameter, whereas R12 is always scratch.
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)

View file

@ -823,7 +823,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off64())
case ssa.OpAMD64MOVOstorezero:
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@ -914,7 +914,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64DUFFZERO:
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@ -997,22 +997,26 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
if s.ABI == obj.ABIInternal {
v.Fatalf("LoweredGetG should not appear in ABIInternal")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@ -1304,9 +1308,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@ -1348,20 +1354,15 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
}
}
func loadRegResults(s *ssagen.State, f *ssa.Func) {
for _, o := range f.OwnAux.ABIInfo().OutParams() {
n := o.Name.(*ir.Name)
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
p := s.Prog(loadByType(rts[i]))
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p := s.Prog(loadByType(t))
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_AUTO
p.From.Sym = n.Linksym()
p.From.Offset = n.FrameOffset() + offs[i]
p.From.Offset = n.FrameOffset() + off
p.To.Type = obj.TYPE_REG
p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
}
}
p.To.Reg = reg
return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {

View file

@ -18,7 +18,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOARM == 5
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View file

@ -18,9 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.PadFrame = padframe
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}

View file

@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
@ -161,6 +162,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
// The loop only runs once.
for _, a := range v.Block.Func.RegArgs {
// Pass the spill/unspill information along to the assembler, offset by size of
// the saved LR slot.
addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
s.FuncInfo().AddSpill(
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
}
v.Block.Func.RegArgs = nil
ssagen.CheckArgReg(v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@ -1101,8 +1114,34 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpClobber, ssa.OpClobberReg:
// TODO: implement for clobberdead experiment. Nop is ok for now.
case ssa.OpClobber:
// MOVW $0xdeaddead, REGTMP
// MOVW REGTMP, (slot)
// MOVW REGTMP, 4(slot)
p := s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
p = s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGTMP
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
ssagen.AddAux(&p.To, v)
p = s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGTMP
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
ssagen.AddAux2(&p.To, v, v.AuxInt+4)
case ssa.OpClobberReg:
x := uint64(0xdeaddeaddeaddead)
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(x)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
@ -1266,3 +1305,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p := s.Prog(loadByType(t))
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_AUTO
p.From.Sym = n.Linksym()
p.From.Offset = n.FrameOffset() + off
p.To.Type = obj.TYPE_REG
p.To.Reg = reg
return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
p.To.Name = obj.NAME_PARAM
p.To.Sym = n.Linksym()
p.Pos = p.Pos.WithNotStmt()
return p
}

View file

@ -0,0 +1,12 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !compiler_bootstrap
// +build !compiler_bootstrap
package base
// CompilerBootstrap reports whether the current compiler binary was
// built with -tags=compiler_bootstrap.
const CompilerBootstrap = false

View file

@ -0,0 +1,12 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build compiler_bootstrap
// +build compiler_bootstrap
package base
// CompilerBootstrap reports whether the current compiler binary was
// built with -tags=compiler_bootstrap.
const CompilerBootstrap = true

View file

@ -44,8 +44,11 @@ type DebugFlags struct {
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
Unified int `help:"enable unified IR construction"`
UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`

View file

@ -159,7 +159,11 @@ func ParseFlags() {
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
Debug.InlFuncsWithClosures = 1
if buildcfg.Experiment.Unified {
Debug.Unified = 1
}
Debug.Checkptr = -1 // so we can tell whether it is set explicitly

View file

@ -5,7 +5,7 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd linux netbsd openbsd
package typecheck
package base
import (
"os"
@ -19,7 +19,7 @@ import (
// mapFile returns length bytes from the file starting at the
// specified offset as a string.
func mapFile(f *os.File, offset, length int64) (string, error) {
func MapFile(f *os.File, offset, length int64) (string, error) {
// POSIX mmap: "The implementation may require that off is a
// multiple of the page size."
x := offset & int64(os.Getpagesize()-1)

View file

@ -5,14 +5,14 @@
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package typecheck
package base
import (
"io"
"os"
)
func mapFile(f *os.File, offset, length int64) (string, error) {
func MapFile(f *os.File, offset, length int64) (string, error) {
buf := make([]byte, length)
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
if err != nil {

View file

@ -233,6 +233,27 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
ErrorExit()
}
// Assert reports "assertion failed" with Fatalf, unless b is true.
func Assert(b bool) {
if !b {
Fatalf("assertion failed")
}
}
// Assertf reports a fatal error with Fatalf, unless b is true.
func Assertf(b bool, format string, args ...interface{}) {
if !b {
Fatalf(format, args...)
}
}
// AssertfAt reports a fatal error with FatalfAt, unless b is true.
func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
if !b {
FatalfAt(pos, format, args...)
}
}
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {

View file

@ -222,9 +222,64 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
// Sort decls and vars.
sortDeclsAndVars(fn, decls, vars)
return decls, vars
}
// sortDeclsAndVars sorts the decl and dwarf var lists according to
// parameter declaration order, so as to insure that when a subprogram
// DIE is emitted, its parameter children appear in declaration order.
// Prior to the advent of the register ABI, sorting by frame offset
// would achieve this; with the register we now need to go back to the
// original function signature.
func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) {
paramOrder := make(map[*ir.Name]int)
idx := 1
for _, selfn := range types.RecvsParamsResults {
fsl := selfn(fn.Type()).FieldSlice()
for _, f := range fsl {
if n, ok := f.Nname.(*ir.Name); ok {
paramOrder[n] = idx
idx++
}
}
}
sort.Stable(varsAndDecls{decls, vars, paramOrder})
}
type varsAndDecls struct {
decls []*ir.Name
vars []*dwarf.Var
paramOrder map[*ir.Name]int
}
func (v varsAndDecls) Len() int {
return len(v.decls)
}
func (v varsAndDecls) Less(i, j int) bool {
nameLT := func(ni, nj *ir.Name) bool {
oi, foundi := v.paramOrder[ni]
oj, foundj := v.paramOrder[nj]
if foundi {
if foundj {
return oi < oj
} else {
return true
}
}
return false
}
return nameLT(v.decls[i], v.decls[j])
}
func (v varsAndDecls) Swap(i, j int) {
v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
v.decls[i], v.decls[j] = v.decls[j], v.decls[i]
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
@ -476,6 +531,14 @@ func RecordFlags(flags ...string) {
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
// Adds flag to producer string singalling whether regabi is turned on or
// off.
// Once regabi is turned on across the board and the relative GOEXPERIMENT
// knobs no longer exist this code should be removed.
if buildcfg.Experiment.RegabiArgs {
cmd.Write([]byte(" regabi"))
}
if cmd.Len() == 0 {
return
}

View file

@ -0,0 +1,120 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
// addr evaluates an addressable expression n and returns a hole
// that represents storing into the represented location.
func (e *escape) addr(n ir.Node) hole {
if n == nil || ir.IsBlank(n) {
// Can happen in select case, range, maybe others.
return e.discardHole()
}
k := e.heapHole()
switch n.Op() {
default:
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
case ir.OLINKSYMOFFSET:
break
case ir.ODOT:
n := n.(*ir.SelectorExpr)
k = e.addr(n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
e.discard(n.Index)
if n.X.Type().IsArray() {
k = e.addr(n.X)
} else {
e.discard(n.X)
}
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.X)
e.assignHeap(n.Index, "key of map put", n)
}
return k
}
func (e *escape) addrs(l ir.Nodes) []hole {
var ks []hole
for _, n := range l {
ks = append(ks, e.addr(n))
}
return ks
}
func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// assignList evaluates the assignment dsts... = srcs....
func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
ks := e.addrs(dsts)
for i, k := range ks {
var src ir.Node
if i < len(srcs) {
src = srcs[i]
}
if dst := dsts[i]; dst != nil {
// Detect implicit conversion of uintptr to unsafe.Pointer when
// storing into reflect.{Slice,String}Header.
if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
continue
}
// Filter out some no-op assignments for escape analysis.
if src != nil && isSelfAssign(dst, src) {
if base.Flag.LowerM != 0 {
base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
}
k = e.discardHole()
}
}
e.expr(k.note(where, why), src)
}
e.reassigned(ks, where)
}
// reassigned marks the locations associated with the given holes as
// reassigned, unless the location represents a variable declared and
// assigned exactly once by where.
func (e *escape) reassigned(ks []hole, where ir.Node) {
if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
// Zero-value assignment for variable declared without an
// explicit initial value. Assume this is its initialization
// statement.
return
}
}
for _, k := range ks {
loc := k.dst
// Variables declared by range statements are assigned on every iteration.
if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
continue
}
loc.reassigned = true
}
}

View file

@ -0,0 +1,428 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows.
func (e *escape) call(ks []hole, call ir.Node) {
var init ir.Nodes
e.callCommon(ks, call, &init, nil)
if len(init) != 0 {
call.(*ir.CallExpr).PtrInit().Append(init...)
}
}
func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
// argumentPragma handles escape analysis of argument *argp to the
// given hole. If the function callee is known, pragma is the
// function's pragma flags; otherwise 0.
argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
e.rewriteArgument(argp, init, call, fn, wrapper)
e.expr(k.note(call, "call parameter"), *argp)
}
argument := func(k hole, argp *ir.Node) {
argumentFunc(nil, k, argp)
}
switch call.Op() {
default:
ir.Dump("esc", call)
base.Fatalf("unexpected call op: %v", call.Op())
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
call := call.(*ir.CallExpr)
typecheck.FixVariadicCall(call)
typecheck.FixMethodCall(call)
// Pick out the function callee, if statically known.
//
// TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
// functions (e.g., runtime builtins, method wrappers, generated
// eq/hash functions) don't have it set. Investigate whether
// that's a concern.
var fn *ir.Name
switch call.Op() {
case ir.OCALLFUNC:
// If we have a direct call to a closure (not just one we were
// able to statically resolve with ir.StaticValue), mark it as
// such so batch.outlives can optimize the flow results.
if call.X.Op() == ir.OCLOSURE {
call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
}
switch v := ir.StaticValue(call.X); v.Op() {
case ir.ONAME:
if v := v.(*ir.Name); v.Class == ir.PFUNC {
fn = v
}
case ir.OCLOSURE:
fn = v.(*ir.ClosureExpr).Func.Nname
case ir.OMETHEXPR:
fn = ir.MethodExprName(v)
}
case ir.OCALLMETH:
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
fntype := call.X.Type()
if fn != nil {
fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
for i, result := range fn.Type().Results().FieldSlice() {
e.expr(ks[i], ir.AsNode(result.Nname))
}
}
var recvp *ir.Node
if call.Op() == ir.OCALLFUNC {
// Evaluate callee function expression.
//
// Note: We use argument and not argumentFunc, because while
// call.X here may be an argument to runtime.{new,defer}proc,
// it's not an argument to fn itself.
argument(e.discardHole(), &call.X)
} else {
recvp = &call.X.(*ir.SelectorExpr).X
}
args := call.Args
if recv := fntype.Recv(); recv != nil {
if recvp == nil {
// Function call using method expression. Recevier argument is
// at the front of the regular arguments list.
recvp = &args[0]
args = args[1:]
}
argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
}
for i, param := range fntype.Params().FieldSlice() {
argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
}
case ir.OINLCALL:
call := call.(*ir.InlinedCallExpr)
e.stmts(call.Body)
for i, result := range call.ReturnVars {
k := e.discardHole()
if ks != nil {
k = ks[i]
}
e.expr(k, result)
}
case ir.OAPPEND:
call := call.(*ir.CallExpr)
args := call.Args
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, &args[0])
if call.IsDDD {
appendedK := e.discardHole()
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, &args[1])
} else {
for i := 1; i < len(args); i++ {
argument(e.heapHole(), &args[i])
}
}
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), &call.X)
copiedK := e.discardHole()
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
argument(copiedK, &call.Y)
case ir.OPANIC:
call := call.(*ir.UnaryExpr)
argument(e.heapHole(), &call.X)
case ir.OCOMPLEX:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), &call.X)
argument(e.discardHole(), &call.Y)
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
call := call.(*ir.CallExpr)
fixRecoverCall(call)
for i := range call.Args {
argument(e.discardHole(), &call.Args[i])
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
call := call.(*ir.UnaryExpr)
argument(e.discardHole(), &call.X)
case ir.OUNSAFEADD, ir.OUNSAFESLICE:
call := call.(*ir.BinaryExpr)
argument(ks[0], &call.X)
argument(e.discardHole(), &call.Y)
}
}
// goDeferStmt analyzes a "go" or "defer" statement.
//
// In the process, it also normalizes the statement to always use a
// simple function call with no arguments and no results. For example,
// it rewrites:
//
// defer f(x, y)
//
// into:
//
// x1, y1 := x, y
// defer func() { f(x1, y1) }()
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
k := e.heapHole()
if n.Op() == ir.ODEFER && e.loopDepth == 1 {
// Top-level defer arguments don't escape to the heap,
// but they do need to last until they're invoked.
k = e.later(e.discardHole())
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
n.SetEsc(ir.EscNever)
}
call := n.Call
init := n.PtrInit()
init.Append(ir.TakeInit(call)...)
e.stmts(*init)
// If the function is already a zero argument/result function call,
// just escape analyze it normally.
if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
clo.IsGoWrap = true
}
e.expr(k, call.X)
return
}
}
// Create a new no-argument function that we'll hand off to defer.
fn := ir.NewClosureFunc(n.Pos(), true)
fn.SetWrapper(true)
fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
fn.Body = []ir.Node{call}
clo := fn.OClosure
if n.Op() == ir.OGO {
clo.IsGoWrap = true
}
e.callCommon(nil, call, init, fn)
e.closures = append(e.closures, closure{e.spill(k, clo), clo})
// Create new top level call to closure.
n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
ir.WithFunc(e.curfn, func() {
typecheck.Stmt(n.Call)
})
}
// rewriteArgument rewrites the argument *argp of the given call expression.
// fn is the static callee function, if known.
// wrapper is the go/defer wrapper function for call, if any.
func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
var pragma ir.PragmaFlag
if fn != nil && fn.Func != nil {
pragma = fn.Func.Pragma
}
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
// functions, so that ptr is kept alive and/or escaped as
// appropriate. unsafeUintptr also reports whether it modified arg0.
unsafeUintptr := func(arg0 ir.Node) bool {
if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
return false
}
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
return false
}
arg := arg0.(*ir.ConvExpr)
if !arg.X.Type().IsUnsafePtr() {
return false
}
// Create and declare a new pointer-typed temp variable.
tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
if pragma&ir.UintptrEscapes != 0 {
e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
}
if pragma&ir.UintptrKeepAlive != 0 {
call := call.(*ir.CallExpr)
// SSA implements CallExpr.KeepAlive using OpVarLive, which
// doesn't support PAUTOHEAP variables. I tried changing it to
// use OpKeepAlive, but that ran into issues of its own.
// For now, the easy solution is to explicitly copy to (yet
// another) new temporary variable.
keep := tmp
if keep.Class == ir.PAUTOHEAP {
keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
}
keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
call.KeepAlive = append(call.KeepAlive, keep)
}
return true
}
visit := func(pos src.XPos, argp *ir.Node) {
// Optimize a few common constant expressions. By leaving these
// untouched in the call expression, we let the wrapper handle
// evaluating them, rather than taking up closure context space.
switch arg := *argp; arg.Op() {
case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
return
case ir.ONAME:
if arg.(*ir.Name).Class == ir.PFUNC {
return
}
}
if unsafeUintptr(*argp) {
return
}
if wrapper != nil {
e.wrapExpr(pos, argp, init, call, wrapper)
}
}
// Peel away any slice lits.
if arg := *argp; arg.Op() == ir.OSLICELIT {
list := arg.(*ir.CompLitExpr).List
for i := range list {
visit(arg.Pos(), &list[i])
}
} else {
visit(call.Pos(), argp)
}
}
// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
// is non-nil, the variable will be captured for use within that
// function.
func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
if wrapper != nil {
// Currently for "defer i.M()" if i is nil it panics at the point
// of defer statement, not when deferred function is called. We
// need to do the nil check outside of the wrapper.
if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
init.Append(typecheck.Stmt(check))
}
e.oldLoc(tmp).captured = true
tmp = ir.NewClosureVar(pos, wrapper, tmp)
}
*exprp = tmp
return tmp
}
// copyExpr creates and returns a new temporary variable within fn;
// appends statements to init to declare and initialize it to expr;
// and escape analyzes the data flow if analyze is true.
func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
if ir.HasUniquePos(expr) {
pos = expr.Pos()
}
tmp := typecheck.TempAt(pos, fn, expr.Type())
stmts := []ir.Node{
ir.NewDecl(pos, ir.ODCL, tmp),
ir.NewAssignStmt(pos, tmp, expr),
}
typecheck.Stmts(stmts)
init.Append(stmts...)
if analyze {
e.newLoc(tmp, false)
e.stmts(stmts)
}
return tmp
}
// tagHole returns a hole for evaluating an argument passed to param.
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
}
if e.inMutualBatch(fn) {
return e.addr(ir.AsNode(param.Nname))
}
// Call to previously tagged function.
var tagKs []hole
esc := parseLeaks(param.Note)
if x := esc.Heap(); x >= 0 {
tagKs = append(tagKs, e.heapHole().shift(x))
}
if ks != nil {
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
tagKs = append(tagKs, ks[i].shift(x))
}
}
}
return e.teeHole(tagKs...)
}

View file

@ -0,0 +1,37 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
// but for now it's the most convenient place for some rewrites.
// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
// adding an explicit frame pointer argument.
// If call is not an ORECOVER call, it's left unmodified.
func fixRecoverCall(call *ir.CallExpr) {
if call.Op() != ir.ORECOVER {
return
}
pos := call.Pos()
// FP is equal to caller's SP plus FixedFrameSize().
var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
if off := base.Ctxt.FixedFrameSize(); off != 0 {
fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
}
// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
call.SetOp(ir.ORECOVERFP)
call.Args = []ir.Node{typecheck.Expr(fp)}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,335 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// expr models evaluating an expression n and flowing the result into
// hole k.
func (e *escape) expr(k hole, n ir.Node) {
if n == nil {
return
}
e.stmts(n.Init())
e.exprSkipInit(k, n)
}
func (e *escape) exprSkipInit(k hole, n ir.Node) {
if n == nil {
return
}
lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
if k.derefs >= 0 && !n.Type().HasPointers() {
k.dst = &e.blankLoc
}
switch n.Op() {
default:
base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
// nop
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n := n.(*ir.BinaryExpr)
e.discard(n.X)
e.discard(n.Y)
case ir.OANDAND, ir.OOROR:
n := n.(*ir.LogicalExpr)
e.discard(n.X)
e.discard(n.Y)
case ir.OADDR:
n := n.(*ir.AddrExpr)
e.expr(k.addr(n, "address-of"), n.X) // "address-of"
case ir.ODEREF:
n := n.(*ir.StarExpr)
e.expr(k.deref(n, "indirection"), n.X) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
n := n.(*ir.SelectorExpr)
e.expr(k.note(n, "dot"), n.X)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
n := n.(*ir.TypeAssertExpr)
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
n := n.(*ir.DynamicTypeAssertExpr)
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
// n.T doesn't need to be tracked; it always points to read-only storage.
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsArray() {
e.expr(k.note(n, "fixed-array-index-of"), n.X)
} else {
// TODO(mdempsky): Fix why reason text.
e.expr(k.deref(n, "dot of pointer"), n.X)
}
e.discard(n.Index)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.X)
e.discard(n.Index)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
n := n.(*ir.SliceExpr)
e.expr(k.note(n, "slice"), n.X)
e.discard(n.Low)
e.discard(n.High)
e.discard(n.Max)
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
} else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
e.unsafeValue(k, n.X)
} else {
e.expr(k, n.X)
}
case ir.OCONVIFACE, ir.OCONVIDATA:
n := n.(*ir.ConvExpr)
if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.X)
case ir.OEFACE:
n := n.(*ir.BinaryExpr)
// Note: n.X is not needed because it can never point to memory that might escape.
e.expr(k, n.Y)
case ir.OIDATA, ir.OSPTR:
n := n.(*ir.UnaryExpr)
e.expr(k, n.X)
case ir.OSLICE2ARRPTR:
// the slice pointer flows directly to the result
n := n.(*ir.ConvExpr)
e.expr(k, n.X)
case ir.ORECV:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
e.call([]hole{k}, n)
case ir.ONEW:
n := n.(*ir.UnaryExpr)
e.spill(k, n)
case ir.OMAKESLICE:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Len)
e.discard(n.Cap)
case ir.OMAKECHAN:
n := n.(*ir.MakeExpr)
e.discard(n.Len)
case ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Len)
case ir.OMETHVALUE:
// Flow the receiver argument to both the closure and
// to the receiver parameter.
n := n.(*ir.SelectorExpr)
closureK := e.spill(k, n)
m := n.Selection
// We don't know how the method value will be called
// later, so conservatively assume the result
// parameters all flow to the heap.
//
// TODO(mdempsky): Change ks into a callback, so that
// we don't have to create this slice?
var ks []hole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
name, _ := m.Nname.(*ir.Name)
paramK := e.tagHole(ks, name, m.Type.Recv())
e.expr(e.teeHole(paramK, closureK), n.X)
case ir.OPTRLIT:
n := n.(*ir.AddrExpr)
e.expr(e.spill(k, n), n.X)
case ir.OARRAYLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List {
if elt.Op() == ir.OKEY {
elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "array literal element"), elt)
}
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
k = e.spill(k, n)
for _, elt := range n.List {
if elt.Op() == ir.OKEY {
elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List {
e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
}
case ir.OMAPLIT:
n := n.(*ir.CompLitExpr)
e.spill(k, n)
// Map keys and values are always stored in the heap.
for _, elt := range n.List {
elt := elt.(*ir.KeyExpr)
e.assignHeap(elt.Key, "map literal key", n)
e.assignHeap(elt.Value, "map literal value", n)
}
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
k = e.spill(k, n)
e.closures = append(e.closures, closure{k, n})
if fn := n.Func; fn.IsHiddenClosure() {
for _, cv := range fn.ClosureVars {
if loc := e.oldLoc(cv); !loc.captured {
loc.captured = true
// Ignore reassignments to the variable in straightline code
// preceding the first capture by a closure.
if loc.loopDepth == e.loopDepth {
loc.reassigned = false
}
}
}
for _, n := range fn.Dcl {
// Add locations for local variables of the
// closure, if needed, in case we're not including
// the closure func in the batch for escape
// analysis (happens for escape analysis called
// from reflectdata.methodWrapper)
if n.Op() == ir.ONAME && n.Opt == nil {
e.with(fn).newLoc(n, false)
}
}
e.walkFunc(fn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
n := n.(*ir.ConvExpr)
e.spill(k, n)
e.discard(n.X)
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
e.discards(n.List)
case ir.ODYNAMICTYPE:
// Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *escape) unsafeValue(k hole, n ir.Node) {
if n.Type().Kind() != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
if k.addrtaken {
base.Fatalf("unexpected addrtaken")
}
e.stmts(n.Init())
switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.X.Type().IsUnsafePtr() {
e.expr(k, n.X)
} else {
e.discard(n.X)
}
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
if ir.IsReflectHeaderDataField(n) {
e.expr(k.deref(n, "reflect.Header.Data"), n.X)
} else {
e.discard(n.X)
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
e.unsafeValue(k, n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.X)
e.unsafeValue(k, n.Y)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.X)
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
e.discard(n.Y)
default:
e.exprSkipInit(e.discardHole(), n)
}
}
// discard evaluates an expression n for side-effects, but discards
// its value.
func (e *escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
func (e *escape) discards(l ir.Nodes) {
for _, n := range l {
e.discard(n)
}
}
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
func (e *escape) spill(k hole, n ir.Node) hole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
}

View file

@ -0,0 +1,324 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"fmt"
)
// Below we implement the methods for walking the AST and recording
// data flow edges. Note that because a sub-expression might have
// side-effects, it's important to always visit the entire AST.
//
// For example, write either:
//
// if x {
// e.discard(n.Left)
// } else {
// e.value(k, n.Left)
// }
//
// or
//
// if x {
// k = e.discardHole()
// }
// e.value(k, n.Left)
//
// Do NOT write:
//
// // BAD: possibly loses side-effects within n.Left
// if !x {
// e.value(k, n.Left)
// }
// An location represents an abstract location that stores a Go
// variable.
type location struct {
n ir.Node // represented variable or expression, if any
curfn *ir.Func // enclosing function
edges []edge // incoming edges
loopDepth int // loopDepth at declaration
// resultIndex records the tuple index (starting at 1) for
// PPARAMOUT variables within their function's result type.
// For non-PPARAMOUT variables it's 0.
resultIndex int
// derefs and walkgen are used during walkOne to track the
// minimal dereferences from the walk root.
derefs int // >= -1
walkgen uint32
// dst and dstEdgeindex track the next immediate assignment
// destination location during walkone, along with the index
// of the edge pointing back to this location.
dst *location
dstEdgeIdx int
// queued is used by walkAll to track whether this location is
// in the walk queue.
queued bool
// escapes reports whether the represented variable's address
// escapes; that is, whether the variable must be heap
// allocated.
escapes bool
// transient reports whether the represented expression's
// address does not outlive the statement; that is, whether
// its storage can be immediately reused.
transient bool
// paramEsc records the represented parameter's leak set.
paramEsc leaks
captured bool // has a closure captured this variable?
reassigned bool // has this variable been reassigned?
addrtaken bool // has this variable's address been taken?
}
// An edge represents an assignment edge between two Go variables.
type edge struct {
src *location
derefs int // >= -1
notes *note
}
func (l *location) asHole() hole {
return hole{dst: l}
}
// leak records that parameter l leaks to sink.
func (l *location) leakTo(sink *location, derefs int) {
// If sink is a result parameter that doesn't escape (#44614)
// and we can fit return bits into the escape analysis tag,
// then record as a result leak.
if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
ri := sink.resultIndex - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
return
}
}
// Otherwise, record as heap leak.
l.paramEsc.AddHeap(derefs)
}
func (l *location) isName(c ir.Class) bool {
return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
}
// An hole represents a context for evaluation a Go
// expression. E.g., when evaluating p in "x = **p", we'd have a hole
// with dst==x and derefs==2.
type hole struct {
dst *location
derefs int // >= -1
notes *note
// addrtaken indicates whether this context is taking the address of
// the expression, independent of whether the address will actually
// be stored into a variable.
addrtaken bool
}
type note struct {
next *note
where ir.Node
why string
}
func (k hole) note(where ir.Node, why string) hole {
if where == nil || why == "" {
base.Fatalf("note: missing where/why")
}
if base.Flag.LowerM >= 2 || logopt.Enabled() {
k.notes = &note{
next: k.notes,
where: where,
why: why,
}
}
return k
}
func (k hole) shift(delta int) hole {
k.derefs += delta
if k.derefs < -1 {
base.Fatalf("derefs underflow: %v", k.derefs)
}
k.addrtaken = delta < 0
return k
}
func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
if !t.IsInterface() && !types.IsDirectIface(t) {
k = k.shift(1)
}
return k.note(where, why)
}
func (b *batch) flow(k hole, src *location) {
if k.addrtaken {
src.addrtaken = true
}
dst := k.dst
if dst == &b.blankLoc {
return
}
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
if base.Flag.LowerM >= 2 || logopt.Enabled() {
pos := base.FmtPos(src.n.Pos())
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
src.escapes = true
return
}
// TODO(mdempsky): Deduplicate edges?
dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
}
func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
func (b *batch) oldLoc(n *ir.Name) *location {
if n.Canonical().Opt == nil {
base.Fatalf("%v has no location", n)
}
return n.Canonical().Opt.(*location)
}
func (e *escape) newLoc(n ir.Node, transient bool) *location {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
if n != nil && n.Type() != nil && n.Type().NotInHeap() {
base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
if n != nil && n.Op() == ir.ONAME {
if canon := n.(*ir.Name).Canonical(); n != canon {
base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
}
}
loc := &location{
n: n,
curfn: e.curfn,
loopDepth: e.loopDepth,
transient: transient,
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PPARAM && n.Curfn == nil {
// ok; hidden parameter
} else if n.Curfn != e.curfn {
base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
}
if n.Opt != nil {
base.Fatalf("%v already has a location", n)
}
n.Opt = loc
}
}
return loc
}
// teeHole returns a new hole that flows into each hole of ks,
// similar to the Unix tee(1) command.
func (e *escape) teeHole(ks ...hole) hole {
if len(ks) == 0 {
return e.discardHole()
}
if len(ks) == 1 {
return ks[0]
}
// TODO(mdempsky): Optimize if there's only one non-discard hole?
// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
// new temporary location ltmp, wire it into place, and return
// a hole for "ltmp = _".
loc := e.newLoc(nil, true)
for _, k := range ks {
// N.B., "p = &q" and "p = &tmp; tmp = q" are not
// semantically equivalent. To combine holes like "l1
// = _" and "l2 = &_", we'd need to wire them as "l1 =
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
// instead.
if k.derefs < 0 {
base.Fatalf("teeHole: negative derefs")
}
e.flow(k, loc)
}
return loc.asHole()
}
// later returns a new hole that flows into k, but some time later.
// Its main effect is to prevent immediate reuse of temporary
// variables introduced during Order.
func (e *escape) later(k hole) hole {
loc := e.newLoc(nil, false)
e.flow(k, loc)
return loc.asHole()
}
// Fmt is called from node printing to print information about escape analysis results.
func Fmt(n ir.Node) string {
text := ""
switch n.Esc() {
case ir.EscUnknown:
break
case ir.EscHeap:
text = "esc(h)"
case ir.EscNone:
text = "esc(no)"
case ir.EscNever:
text = "esc(N)"
default:
text = fmt.Sprintf("esc(%d)", n.Esc())
}
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
if text != "" {
text += " "
}
text += fmt.Sprintf("ld(%d)", loc.loopDepth)
}
}
return text
}

View file

@ -0,0 +1,106 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"math"
"strings"
)
const numEscResults = 7
// An leaks represents a set of assignment flows from a parameter
// to the heap or to any of its function's (first numEscResults)
// result parameters.
type leaks [1 + numEscResults]uint8
// Empty reports whether l is an empty set (i.e., no assignment flows).
func (l leaks) Empty() bool { return l == leaks{} }
// Heap returns the minimum deref count of any assignment flow from l
// to the heap. If no such flows exist, Heap returns -1.
func (l leaks) Heap() int { return l.get(0) }
// Result returns the minimum deref count of any assignment flow from
// l to its function's i'th result parameter. If no such flows exist,
// Result returns -1.
func (l leaks) Result(i int) int { return l.get(1 + i) }
// AddHeap adds an assignment flow from l to the heap.
func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
// AddResult adds an assignment flow from l to its function's i'th
// result parameter.
func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
func (l leaks) get(i int) int { return int(l[i]) - 1 }
func (l *leaks) add(i, derefs int) {
if old := l.get(i); old < 0 || derefs < old {
l.set(i, derefs)
}
}
func (l *leaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
base.Fatalf("invalid derefs count: %v", derefs)
}
if v > math.MaxUint8 {
v = math.MaxUint8
}
l[i] = uint8(v)
}
// Optimize removes result flow paths that are equal in length or
// longer than the shortest heap flow path.
func (l *leaks) Optimize() {
// If we have a path to the heap, then there's no use in
// keeping equal or longer paths elsewhere.
if x := l.Heap(); x >= 0 {
for i := 0; i < numEscResults; i++ {
if l.Result(i) >= x {
l.setResult(i, -1)
}
}
}
}
var leakTagCache = map[leaks]string{}
// Encode converts l into a binary string for export data.
func (l leaks) Encode() string {
if l.Heap() == 0 {
// Space optimization: empty string encodes more
// efficiently in export data.
return ""
}
if s, ok := leakTagCache[l]; ok {
return s
}
n := len(l)
for n > 0 && l[n-1] == 0 {
n--
}
s := "esc:" + string(l[:n])
leakTagCache[l] = s
return s
}
// parseLeaks parses a binary string representing a leaks
func parseLeaks(s string) leaks {
var l leaks
if !strings.HasPrefix(s, "esc:") {
l.AddHeap(0)
return l
}
copy(l[:], s[4:])
return l
}

View file

@ -0,0 +1,289 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/internal/src"
"fmt"
"strings"
)
// walkAll computes the minimal dereferences between all pairs of
// locations.
func (b *batch) walkAll() {
// We use a work queue to keep track of locations that we need
// to visit, and repeatedly walk until we reach a fixed point.
//
// We walk once from each location (including the heap), and
// then re-enqueue each location on its transition from
// transient->!transient and !escapes->escapes, which can each
// happen at most once. So we take Θ(len(e.allLocs)) walks.
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
todo := make([]*location, 0, len(b.allLocs)+1)
enqueue := func(loc *location) {
if !loc.queued {
todo = append(todo, loc)
loc.queued = true
}
}
for _, loc := range b.allLocs {
enqueue(loc)
}
enqueue(&b.heapLoc)
var walkgen uint32
for len(todo) > 0 {
root := todo[len(todo)-1]
todo = todo[:len(todo)-1]
root.queued = false
walkgen++
b.walkOne(root, walkgen, enqueue)
}
}
// walkOne computes the minimal number of dereferences from root to
// all other locations.
func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
// The data flow graph has negative edges (from addressing
// operations), so we use the Bellman-Ford algorithm. However,
// we don't have to worry about infinite negative cycles since
// we bound intermediate dereference counts to 0.
root.walkgen = walkgen
root.derefs = 0
root.dst = nil
todo := []*location{root} // LIFO queue
for len(todo) > 0 {
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
derefs := l.derefs
// If l.derefs < 0, then l's address flows to root.
addressOf := derefs < 0
if addressOf {
// For a flow path like "root = &l; l = x",
// l's address flows to root, but x's does
// not. We recognize this by lower bounding
// derefs at 0.
derefs = 0
// If l's address flows to a non-transient
// location, then l can't be transiently
// allocated.
if !root.transient && l.transient {
l.transient = false
enqueue(l)
}
}
if b.outlives(root, l) {
// l's value flows to root. If l is a function
// parameter and root is the heap or a
// corresponding result parameter, then record
// that value flow for tagging the function
// later.
if l.isName(ir.PPARAM) {
if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
}
}
l.leakTo(root, derefs)
}
// If l's address flows somewhere that
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
if logopt.Enabled() || base.Flag.LowerM >= 2 {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
enqueue(l)
continue
}
}
for i, edge := range l.edges {
if edge.src.escapes {
continue
}
d := derefs + edge.derefs
if edge.src.walkgen != walkgen || edge.src.derefs > d {
edge.src.walkgen = walkgen
edge.src.derefs = d
edge.src.dst = l
edge.src.dstEdgeIdx = i
todo = append(todo, edge.src)
}
}
}
}
// explainPath prints an explanation of how src flows to the walk root.
func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
visited := make(map[*location]bool)
pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if visited[src] {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
}
visited[src] = true
dst := src.dst
edge := &dst.edges[src.dstEdgeIdx]
if edge.src != src {
base.Fatalf("path inconsistency: %v != %v", edge.src, src)
}
explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
if dst == root {
break
}
src = dst
}
return explanation
}
func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
ops := "&"
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
print := base.Flag.LowerM >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
if print {
fmt.Printf("%s:%s\n", pos, flow)
}
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
epos = srcloc.n.Pos()
}
var e_curfn *ir.Func // TODO(mdempsky): Fix.
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
return explanation
}
func (b *batch) explainLoc(l *location) string {
if l == &b.heapLoc {
return "{heap}"
}
if l.n == nil {
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
}
// outlives reports whether values stored in l may survive beyond
// other's lifetime if stack allocated.
func (b *batch) outlives(l, other *location) bool {
// The heap outlives everything.
if l.escapes {
return true
}
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
if l.isName(ir.PPARAMOUT) {
// Exception: Directly called closures can return
// locations allocated outside of them without forcing
// them to the heap. For example:
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
return false
}
return true
}
// If l and other are within the same function, then l
// outlives other if it was declared outside other's loop
// scope. For example:
//
// var l *int
// for {
// l = new(int)
// }
if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
return true
}
// If other is declared within a child closure of where l is
// declared, then l outlives it. For example:
//
// var l *int
// func() {
// l = new(int)
// }
if containsClosure(l.curfn, other.curfn) {
return true
}
return false
}
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *ir.Func) bool {
// Common case.
if f == c {
return false
}
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
fn := f.Sym().Name
cn := c.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}

View file

@ -0,0 +1,207 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"fmt"
)
// stmt evaluates a single Go statement.
func (e *escape) stmt(n ir.Node) {
if n == nil {
return
}
lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
if base.Flag.LowerM > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
}
e.stmts(n.Init())
switch n.Op() {
default:
base.Fatalf("unexpected stmt: %v", n)
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
// nop
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
// TODO(mdempsky): Handle dead code?
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
e.stmts(n.List)
case ir.ODCL:
// Record loop depth at declaration.
n := n.(*ir.Decl)
if !ir.IsBlank(n.X) {
e.dcl(n.X)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
switch e.labels[n.Label] {
case nonlooping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
}
case looping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
}
e.loopDepth++
default:
base.Fatalf("label missing tag")
}
delete(e.labels, n.Label)
case ir.OIF:
n := n.(*ir.IfStmt)
e.discard(n.Cond)
e.block(n.Body)
e.block(n.Else)
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
e.loopDepth++
e.discard(n.Cond)
e.stmt(n.Post)
e.block(n.Body)
e.loopDepth--
case ir.ORANGE:
// for Key, Value = range X { Body }
n := n.(*ir.RangeStmt)
// X is evaluated outside the loop.
tmp := e.newLoc(nil, false)
e.expr(tmp.asHole(), n.X)
e.loopDepth++
ks := e.addrs([]ir.Node{n.Key, n.Value})
if n.X.Type().IsArray() {
e.flow(ks[1].note(n, "range"), tmp)
} else {
e.flow(ks[1].deref(n, "range-deref"), tmp)
}
e.reassigned(ks, n)
e.block(n.Body)
e.loopDepth--
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
var ks []hole
if guard.Tag != nil {
for _, cas := range n.Cases {
cv := cas.Var
k := e.dcl(cv) // type switch variables have no ODCL.
if cv.Type().HasPointers() {
ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
}
e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
} else {
e.discard(n.Tag)
}
for _, cas := range n.Cases {
e.discards(cas.List)
e.block(cas.Body)
}
case ir.OSELECT:
n := n.(*ir.SelectStmt)
for _, cas := range n.Cases {
e.stmt(cas.Comm)
e.block(cas.Body)
}
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
n := n.(*ir.UnaryExpr)
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
n := n.(*ir.SendStmt)
e.discard(n.Chan)
e.assignHeap(n.Value, "send", n)
case ir.OAS:
n := n.(*ir.AssignStmt)
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
// TODO(mdempsky): Worry about OLSH/ORSH?
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
case ir.OAS2:
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
case ir.OAS2DOTTYPE: // v, ok = x.(type)
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
e.stmts(n.Rhs[0].Init())
ks := e.addrs(n.Lhs)
e.call(ks, n.Rhs[0])
e.reassigned(ks, n)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
results := e.curfn.Type().Results().FieldSlice()
dsts := make([]ir.Node, len(results))
for i, res := range results {
dsts[i] = res.Nname.(*ir.Name)
}
e.assignList(dsts, n.Results, "return", n)
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
e.goDeferStmt(n)
case ir.OTAILCALL:
// TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
}
}
func (e *escape) stmts(l ir.Nodes) {
for _, n := range l {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
func (e *escape) block(l ir.Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
}
func (e *escape) dcl(n *ir.Name) hole {
if n.Curfn != e.curfn || n.IsClosureVar() {
base.Fatalf("bad declaration of %v", n)
}
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
}

View file

@ -0,0 +1,215 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
)
func isSliceSelfAssign(dst, src ir.Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
var dstX ir.Node
switch dst.Op() {
default:
return false
case ir.ODEREF:
dst := dst.(*ir.StarExpr)
dstX = dst.X
case ir.ODOTPTR:
dst := dst.(*ir.SelectorExpr)
dstX = dst.X
}
if dstX.Op() != ir.ONAME {
return false
}
// src is a slice operation.
switch src.Op() {
case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
// OK.
case ir.OSLICEARR, ir.OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
src := src.(*ir.SliceExpr)
if src.X.Op() == ir.OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
var baseX ir.Node
switch base := src.(*ir.SliceExpr).X; base.Op() {
default:
return false
case ir.ODEREF:
base := base.(*ir.StarExpr)
baseX = base.X
case ir.ODOTPTR:
base := base.(*ir.SelectorExpr)
baseX = base.X
}
if baseX.Op() != ir.ONAME {
return false
}
// dst and src reference the same base ONAME.
return dstX.(*ir.Name) == baseX.(*ir.Name)
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func isSelfAssign(dst, src ir.Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op() != src.Op() {
return false
}
// The expression prefix must be both "safe" and identical.
switch dst.Op() {
case ir.ODOT, ir.ODOTPTR:
// Safe trailing accessors that are permitted to differ.
dst := dst.(*ir.SelectorExpr)
src := src.(*ir.SelectorExpr)
return ir.SameSafeExpr(dst.X, src.X)
case ir.OINDEX:
dst := dst.(*ir.IndexExpr)
src := src.(*ir.IndexExpr)
if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
return false
}
return ir.SameSafeExpr(dst.X, src.X)
default:
return false
}
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
func mayAffectMemory(n ir.Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
// TODO(rsc): It seems like it should be possible to replace this with
// an ir.Any looking for any op that's not the ones in the case statement.
// But that produces changes in the compiled output detected by buildall.
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return false
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
n := n.(*ir.BinaryExpr)
return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
case ir.OCONVNOP, ir.OCONV:
n := n.(*ir.ConvExpr)
return mayAffectMemory(n.X)
case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
n := n.(*ir.UnaryExpr)
return mayAffectMemory(n.X)
case ir.ODOT, ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
return mayAffectMemory(n.X)
case ir.ODEREF:
n := n.(*ir.StarExpr)
return mayAffectMemory(n.X)
default:
return true
}
}
// HeapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func HeapAllocReason(n ir.Node) string {
if n == nil || n.Type() == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
return ""
}
}
if n.Type().Width > ir.MaxStackVarSize {
return "too large for stack"
}
if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMAKESLICE {
n := n.(*ir.MakeExpr)
r := n.Cap
if r == nil {
r = n.Len
}
if !ir.IsSmallIntConst(r) {
return "non-constant size"
}
if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
return ""
}

View file

@ -119,38 +119,51 @@ func compileFunctions() {
})
}
// We queue up a goroutine per function that needs to be
// compiled, but require them to grab an available worker ID
// before doing any substantial work to limit parallelism.
workerIDs := make(chan int, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
// By default, we perform work right away on the current goroutine
// as the solo worker.
queue := func(work func(int)) {
work(0)
}
if nWorkers := base.Flag.LowerC; nWorkers > 1 {
// For concurrent builds, we create a goroutine per task, but
// require them to hold a unique worker ID while performing work
// to limit parallelism.
workerIDs := make(chan int, nWorkers)
for i := 0; i < nWorkers; i++ {
workerIDs <- i
}
var wg sync.WaitGroup
var asyncCompile func(*ir.Func)
asyncCompile = func(fn *ir.Func) {
wg.Add(1)
queue = func(work func(int)) {
go func() {
worker := <-workerIDs
ssagen.Compile(fn, worker)
work(worker)
workerIDs <- worker
// Done compiling fn. Schedule it's closures for compilation.
for _, closure := range fn.Closures {
asyncCompile(closure)
}
wg.Done()
}()
}
}
var wg sync.WaitGroup
var compile func([]*ir.Func)
compile = func(fns []*ir.Func) {
wg.Add(len(fns))
for _, fn := range fns {
fn := fn
queue(func(worker int) {
ssagen.Compile(fn, worker)
compile(fn.Closures)
wg.Done()
})
}
}
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
base.Ctxt.InParallel = true
for _, fn := range compilequeue {
asyncCompile(fn)
}
compile(compilequeue)
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}

View file

@ -5,46 +5,16 @@
package gc
import (
"fmt"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
"fmt"
"go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range typecheck.Target.Exports {
// Must catch it here rather than Export(), because the type can be
// not fully set (still TFORW) when Export() is called.
if n.Type() != nil && n.Type().HasTParam() {
base.Fatalf("Cannot (yet) export a generic type: %v", n)
}
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
typecheck.WriteExports(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
@ -79,83 +49,3 @@ func dumpasmhdr() {
b.Close()
}
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
inline.Inline_Flood(n, typecheck.Export)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.AllMethods().Slice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}

View file

@ -32,6 +32,7 @@ import (
"log"
"os"
"runtime"
"sort"
)
func hidePanic() {
@ -159,9 +160,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
if base.Debug.SoftFloat != 0 {
if buildcfg.Experiment.RegabiArgs {
log.Fatalf("softfloat mode with GOEXPERIMENT=regabiargs not implemented ")
}
ssagen.Arch.SoftFloat = true
}
@ -181,23 +179,41 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
typecheck.InitUniverse()
typecheck.InitRuntime()
// Parse and typecheck input.
noder.LoadPackage(flag.Args())
dwarfgen.RecordPackageName()
// Prepare for backend processing. This must happen before pkginit,
// because it generates itabs for initializing global variables.
ssagen.InitConfig()
// Build init task.
if initTask := pkginit.Task(); initTask != nil {
typecheck.Export(initTask)
}
// Stability quirk: sort top-level declarations, so we're not
// sensitive to the order that functions are added. In particular,
// the order that noder+typecheck add function closures is very
// subtle, and not important to reproduce.
//
// Note: This needs to happen after pkginit.Task, otherwise it risks
// changing the order in which top-level variables are initialized.
if base.Debug.UnifiedQuirks != 0 {
s := typecheck.Target.Decls
sort.SliceStable(s, func(i, j int) bool {
return s[i].Pos().Before(s[j].Pos())
})
}
// Eliminate some obviously dead code.
// Must happen after typechecking.
for _, n := range typecheck.Target.Decls {
@ -252,6 +268,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "escapes")
escape.Funcs(typecheck.Target.Decls)
// TODO(mdempsky): This is a hack. We need a proper, global work
// queue for scheduling function compilation so components don't
// need to adjust their behavior depending on when they're called.
reflectdata.AfterGlobalEscapeAnalysis = true
// Collect information for go:nowritebarrierrec
// checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
@ -260,17 +281,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ssagen.EnableNoWriteBarrierRecCheck()
}
// Prepare for SSA compilation.
// This must be before CompileITabs, because CompileITabs
// can trigger function compilation.
typecheck.InitRuntime()
ssagen.InitConfig()
// Just before compilation, compile itabs found on
// the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation.
ir.CurFunc = nil
reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.

View file

@ -7,6 +7,7 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/noder"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
@ -103,7 +104,7 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
func dumpCompilerObj(bout *bio.Writer) {
printObjHeader(bout)
dumpexport(bout)
noder.WriteExports(bout)
}
func dumpdata() {
@ -116,7 +117,7 @@ func dumpdata() {
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
numPTabs, numITabs := reflectdata.CountTabs()
numPTabs := reflectdata.CountPTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
@ -148,6 +149,7 @@ func dumpdata() {
if reflectdata.ZeroSize > 0 {
zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
zero.Set(obj.AttrStatic, true)
}
staticdata.WriteFuncSyms()
@ -156,13 +158,10 @@ func dumpdata() {
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
newNumPTabs, newNumITabs := reflectdata.CountTabs()
newNumPTabs := reflectdata.CountPTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
if newNumITabs != numITabs {
base.Fatalf("itabs changed after compile functions loop")
}
}
func dumpLinkerObj(bout *bio.Writer) {

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -156,7 +155,7 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err = iImportData(packages, data[1:], id)
pkg, err = ImportData(packages, string(data[1:]), id)
} else {
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
}

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -10,7 +9,6 @@ import (
"cmd/compile/internal/types2"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -64,7 +62,7 @@ const maxTime = 30 * time.Second
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
list, err := ioutil.ReadDir(dirname)
list, err := os.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
@ -92,7 +90,7 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
}
func mktmpdir(t *testing.T) string {
tmpdir, err := ioutil.TempDir("", "gcimporter_test")
tmpdir, err := os.MkdirTemp("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
@ -142,7 +140,7 @@ func TestVersionHandling(t *testing.T) {
}
const dir = "./testdata/versions"
list, err := ioutil.ReadDir(dir)
list, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
@ -195,7 +193,7 @@ func TestVersionHandling(t *testing.T) {
// create file with corrupted export data
// 1) read file
data, err := ioutil.ReadFile(filepath.Join(dir, name))
data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
@ -212,7 +210,7 @@ func TestVersionHandling(t *testing.T) {
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
ioutil.WriteFile(filename, data, 0666)
os.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
@ -261,8 +259,7 @@ var importedObjectTests = []struct {
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
// go/types.Type has grown much larger - excluded for now
// {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
{"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
func TestImportedTypes(t *testing.T) {
@ -457,17 +454,17 @@ func TestIssue13898(t *testing.T) {
t.Fatal("go/types not found")
}
// look for go/types2.Object type
// look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types2.Named)
if !ok {
t.Fatalf("go/types2.Object type is %v; wanted named type", typ)
t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
// lookup go/types2.Object.Pkg method
// lookup go/types.Object.Pkg method
m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
t.Fatalf("go/types2.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -9,7 +8,6 @@
package importer
import (
"bytes"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"encoding/binary"
@ -19,10 +17,11 @@ import (
"io"
"math/big"
"sort"
"strings"
)
type intReader struct {
*bytes.Reader
*strings.Reader
path string
}
@ -42,6 +41,21 @@ func (r *intReader) uint64() uint64 {
return i
}
// Keep this in sync with constants in iexport.go.
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
// TODO: before release, change this back to 2.
iexportVersionGenerics = iexportVersionPosCol
iexportVersionCurrent = iexportVersionGenerics
)
type ident struct {
pkg string
name string
}
const predeclReserved = 32
type itag uint64
@ -57,6 +71,9 @@ const (
signatureType
structType
interfaceType
typeParamType
instType
unionType
)
const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
@ -65,8 +82,8 @@ const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func iImportData(imports map[string]*types2.Package, data []byte, path string) (_ int, pkg *types2.Package, err error) {
const currentVersion = 1
func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@ -78,14 +95,18 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
}
}()
r := &intReader{bytes.NewReader(data), path}
r := &intReader{strings.NewReader(data), path}
version = int64(r.uint64())
switch version {
case currentVersion, 0:
case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
default:
if version > iexportVersionGenerics {
errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
} else {
errorf("unknown iexport format version %d", version)
}
}
sLen := int64(r.uint64())
dLen := int64(r.uint64())
@ -96,16 +117,20 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
r.Seek(sLen+dLen, io_SeekCurrent)
p := iimporter{
exportVersion: version,
ipath: path,
version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types2.Package),
posBaseCache: make(map[uint64]*syntax.PosBase),
declData: declData,
pkgIndex: make(map[*types2.Package]map[string]uint64),
typCache: make(map[uint64]types2.Type),
// Separate map for typeparams, keyed by their package and unique
// name (name with subscript).
tparamIndex: make(map[ident]types2.Type),
}
for i, pt := range predeclared {
@ -117,18 +142,23 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
pkgHeight := int(r.uint64())
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types2.NewPackage(pkgPath, pkgName)
pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
} else {
if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
if pkg.Height() != pkgHeight {
errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
}
}
p.pkgCache[pkgPathOff] = pkg
@ -153,10 +183,6 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
p.doDecl(localpkg, name)
}
for _, typ := range p.interfaceList {
typ.Complete()
}
// record all referenced packages as imports
list := append(([]*types2.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
@ -165,21 +191,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
// package was imported completely and without errors
localpkg.MarkComplete()
consumed, _ := r.Seek(0, io_SeekCurrent)
return int(consumed), localpkg, nil
return localpkg, nil
}
type iimporter struct {
exportVersion int64
ipath string
version int
stringData []byte
stringCache map[uint64]string
stringData string
pkgCache map[uint64]*types2.Package
posBaseCache map[uint64]*syntax.PosBase
declData []byte
declData string
pkgIndex map[*types2.Package]map[string]uint64
typCache map[uint64]types2.Type
tparamIndex map[ident]types2.Type
interfaceList []*types2.Interface
}
@ -199,24 +226,21 @@ func (p *iimporter) doDecl(pkg *types2.Package, name string) {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off:])
r.declReader = *bytes.NewReader(p.declData[off:])
r.declReader = *strings.NewReader(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
var x [binary.MaxVarintLen64]byte
n := copy(x[:], p.stringData[off:])
slen, n := binary.Uvarint(p.stringData[off:])
slen, n := binary.Uvarint(x[:n])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
return p.stringData[spos : spos+slen]
}
func (p *iimporter) pkgAt(off uint64) *types2.Package {
@ -228,6 +252,16 @@ func (p *iimporter) pkgAt(off uint64) *types2.Package {
return nil
}
func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
if posBase, ok := p.posBaseCache[off]; ok {
return posBase
}
filename := p.stringAt(off)
posBase := syntax.NewFileBase(filename)
p.posBaseCache[off] = posBase
return posBase
}
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
@ -241,7 +275,7 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off-predeclReserved:])
r.declReader = *bytes.NewReader(p.declData[off-predeclReserved:])
r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
@ -252,9 +286,9 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
type importReader struct {
p *iimporter
declReader bytes.Reader
declReader strings.Reader
currPkg *types2.Package
prevFile string
prevPosBase *syntax.PosBase
prevLine int64
prevColumn int64
}
@ -274,16 +308,28 @@ func (r *importReader) obj(name string) {
r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
case 'F', 'G':
var tparams []*types2.TypeName
if tag == 'G' {
tparams = r.tparamList()
}
sig := r.signature(nil)
sig.SetTParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
case 'T':
case 'T', 'U':
var tparams []*types2.TypeName
if tag == 'U' {
tparams = r.tparamList()
}
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
if tag == 'U' {
named.SetTParams(tparams)
}
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
@ -296,10 +342,43 @@ func (r *importReader) obj(name string) {
recv := r.param()
msig := r.signature(recv)
// If the receiver has any targs, set those as the
// rparams of the method (since those are the
// typeparams being used in the method sig/body).
targs := baseType(msig.Recv().Type()).TArgs()
if len(targs) > 0 {
rparams := make([]*types2.TypeName, len(targs))
for i, targ := range targs {
rparams[i] = types2.AsTypeParam(targ).Obj()
}
msig.SetRParams(rparams)
}
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'P':
// We need to "declare" a typeparam in order to have a name that
// can be referenced recursively (if needed) in the type param's
// bound.
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected type param type")
}
name0, sub := parseSubscript(name)
tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
t := (*types2.Checker)(nil).NewTypeParam(tn, nil)
if sub == 0 {
errorf("missing subscript")
}
t.SetId(sub)
// To handle recursive references to the typeparam within its
// bound, save the partial type in tparamIndex before reading the bounds.
id := ident{r.currPkg.Name(), name}
r.p.tparamIndex[id] = t
t.SetConstraint(r.typ())
case 'V':
typ := r.typ()
@ -439,12 +518,11 @@ func (r *importReader) pos() syntax.Pos {
r.posv0()
}
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
return syntax.Pos{}
}
// TODO(gri) fix this
// return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
return syntax.Pos{}
return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
}
func (r *importReader) posv0() {
@ -454,7 +532,7 @@ func (r *importReader) posv0() {
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevPosBase = r.posBase()
r.prevLine = l
}
}
@ -466,7 +544,7 @@ func (r *importReader) posv1() {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
r.prevFile = r.string()
r.prevPosBase = r.posBase()
}
}
}
@ -482,6 +560,7 @@ func isInterface(t types2.Type) bool {
func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
func (r *importReader) doType(base *types2.Named) types2.Type {
switch k := r.kind(); k {
@ -554,6 +633,47 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
typ := types2.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
case typeParamType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected type param type")
}
pkg, name := r.qualifiedIdent()
id := ident{pkg.Name(), name}
if t, ok := r.p.tparamIndex[id]; ok {
// We're already in the process of importing this typeparam.
return t
}
// Otherwise, import the definition of the typeparam now.
r.p.doDecl(pkg, name)
return r.p.tparamIndex[id]
case instType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected instantiation type")
}
pos := r.pos()
len := r.uint64()
targs := make([]types2.Type, len)
for i := range targs {
targs[i] = r.typ()
}
baseType := r.typ()
// The imported instantiated type doesn't include any methods, so
// we must always use the methods of the base (orig) type.
var check *types2.Checker // TODO provide a non-nil *Checker
t := check.Instantiate(pos, baseType, targs, nil, false)
return t
case unionType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected instantiation type")
}
terms := make([]*types2.Term, r.uint64())
for i := range terms {
terms[i] = types2.NewTerm(r.bool(), r.typ())
}
return types2.NewUnion(terms)
}
}
@ -568,6 +688,19 @@ func (r *importReader) signature(recv *types2.Var) *types2.Signature {
return types2.NewSignature(recv, params, results, variadic)
}
func (r *importReader) tparamList() []*types2.TypeName {
n := r.uint64()
if n == 0 {
return nil
}
xs := make([]*types2.TypeName, n)
for i := range xs {
typ := r.typ()
xs[i] = types2.AsTypeParam(typ).Obj()
}
return xs
}
func (r *importReader) paramList() *types2.Tuple {
xs := make([]*types2.Var, r.uint64())
for i := range xs {
@ -610,3 +743,33 @@ func (r *importReader) byte() byte {
}
return x
}
func baseType(typ types2.Type) *types2.Named {
// pointer receivers are never types2.Named types
if p, _ := typ.(*types2.Pointer); p != nil {
typ = p.Elem()
}
// receiver base types are always (possibly generic) types2.Named types
n, _ := typ.(*types2.Named)
return n
}
func parseSubscript(name string) (string, uint64) {
// Extract the subscript value from the type param name. We export
// and import the subscript value, so that all type params have
// unique names.
sub := uint64(0)
startsub := -1
for i, r := range name {
if '₀' <= r && r < '₀'+10 {
if startsub == -1 {
startsub = i
}
sub = sub*10 + uint64(r-'₀')
}
}
if startsub >= 0 {
name = name[:startsub]
}
return name, sub
}

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -120,6 +119,9 @@ var predeclared = []types2.Type{
// used internally by gc; never used by this package or in .a files
anyType{},
// comparable
types2.Universe.Lookup("comparable").Type(),
}
type anyType struct{}

View file

@ -179,6 +179,8 @@ func CanInline(fn *ir.Func) {
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: inlcopylist(fn.Body),
CanDelayResults: canDelayResults(fn),
}
if base.Flag.LowerM > 1 {
@ -191,60 +193,36 @@ func CanInline(fn *ir.Func) {
}
}
// Inline_Flood marks n's inline body for export and recursively ensures
// all called functions are marked too.
func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
if n == nil {
return
// canDelayResults reports whether inlined calls to fn can delay
// declaring the result parameter until the "return" statement.
func canDelayResults(fn *ir.Func) bool {
// We can delay declaring+initializing result parameters if:
// (1) there's exactly one "return" statement in the inlined function;
// (2) it's not an empty return statement (#44355); and
// (3) the result parameters aren't named.
nreturns := 0
ir.VisitList(fn.Body, func(n ir.Node) {
if n, ok := n.(*ir.ReturnStmt); ok {
nreturns++
if len(n.Results) == 0 {
nreturns++ // empty return statement (case 2)
}
if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
}
fn := n.Func
if fn == nil {
base.Fatalf("Inline_Flood: missing Func on %v", n)
}
if fn.Inl == nil {
return
})
if nreturns != 1 {
return false // not exactly one return statement (case 1)
}
if fn.ExportInline() {
return
}
fn.SetExportInline(true)
typecheck.ImportedBody(fn)
var doFlood func(n ir.Node)
doFlood = func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
Inline_Flood(ir.MethodExprName(n), exportsym)
case ir.ONAME:
n := n.(*ir.Name)
switch n.Class {
case ir.PFUNC:
Inline_Flood(n, exportsym)
exportsym(n)
case ir.PEXTERN:
exportsym(n)
}
case ir.OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
case ir.OCLOSURE:
// VisitList doesn't visit closure bodies, so force a
// recursive call to VisitList on the body of the closure.
ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
// temporaries for return values.
for _, param := range fn.Type().Results().FieldSlice() {
if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
return false // found a named result parameter (case 3)
}
}
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
return true
}
// hairyVisitor visits a function body to determine its inlining
@ -295,6 +273,19 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
}
}
}
if n.X.Op() == ir.OMETHEXPR {
if meth := ir.MethodExprName(n.X); meth != nil {
fn := meth.Func
if fn != nil && types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
// runtime.heapBits.next even though
// it calls slow-path
// runtime.heapBits.nextArena.
break
}
}
}
if ir.IsIntrinsicCall(n) {
// Treat like any other node.
@ -309,28 +300,8 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
n := n.(*ir.CallExpr)
t := n.X.Type()
if t == nil {
base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
}
fn := ir.MethodExprName(n.X).Func
if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
// runtime.heapBits.next even though
// it calls slow-path
// runtime.heapBits.nextArena.
break
}
if fn.Inl != nil {
v.budget -= fn.Inl.Cost
break
}
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
// Things that are too hairy, irrespective of the budget
case ir.OCALL, ir.OCALLINTER:
@ -445,7 +416,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
v.budget++
case ir.OCALLPART, ir.OSLICELIT:
case ir.OMETHVALUE, ir.OSLICELIT:
v.budget-- // Hack for toolstash -cmp.
case ir.OMETHEXPR:
@ -499,9 +470,6 @@ func inlcopy(n ir.Node) ir.Node {
// x.Func.Body for iexport and local inlining.
oldfn := x.Func
newfn := ir.NewFunc(oldfn.Pos())
if oldfn.ClosureCalled() {
newfn.SetClosureCalled(true)
}
m.(*ir.ClosureExpr).Func = newfn
newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
// XXX OK to share fn.Type() ??
@ -544,37 +512,6 @@ func InlineCalls(fn *ir.Func) {
ir.CurFunc = savefn
}
// Turn an OINLCALL into a statement.
func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
n := ir.NewBlockStmt(inlcall.Pos(), nil)
n.List = inlcall.Init()
n.List.Append(inlcall.Body.Take()...)
return n
}
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
r := n.ReturnVars[0]
return ir.InitExpr(append(n.Init(), n.Body...), r)
}
// Turn the rlist (with the return values) of the OINLCALL in
// n into an expression list lumping the ninit and body
// containing the inlined statements on the first list element so
// order will be preserved. Used in return, oas2func and call
// statements.
func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
s := n.ReturnVars
s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0])
return s
}
// inlnode recurses over the tree to find inlineable calls, which will
// be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
@ -597,7 +534,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.ODEFER, ir.OGO:
n := n.(*ir.GoDeferStmt)
switch call := n.Call; call.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
case ir.OCALLMETH:
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
case ir.OCALLFUNC:
call := call.(*ir.CallExpr)
call.NoInline = true
}
@ -607,43 +546,37 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.OCLOSURE:
return n
case ir.OCALLMETH:
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if n.X.Op() == ir.OMETHEXPR {
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
n := n.(*ir.CallExpr)
if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
if meth := ir.MethodExprName(n.X); meth != nil {
s := meth.Sym()
if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
}
}
lno := ir.SetPos(n)
ir.EditChildren(n, edit)
if as := n; as.Op() == ir.OAS2FUNC {
as := as.(*ir.AssignListStmt)
if as.Rhs[0].Op() == ir.OINLCALL {
as.Rhs = inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))
as.SetOp(ir.OAS2)
as.SetTypecheck(0)
n = typecheck.Stmt(as)
}
}
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
switch n.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
n := n.(*ir.CallExpr)
if n.NoInline {
return n
}
}
case ir.OCALLMETH:
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
var call *ir.CallExpr
switch n.Op() {
case ir.OCALLFUNC:
call = n.(*ir.CallExpr)
call := n.(*ir.CallExpr)
if call.NoInline {
break
}
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
}
@ -653,38 +586,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
case ir.OCALLMETH:
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
if call.X.Type() == nil {
base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
}
n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
}
base.Pos = lno
if n.Op() == ir.OINLCALL {
ic := n.(*ir.InlinedCallExpr)
switch call.Use {
default:
ir.Dump("call", call)
base.Fatalf("call missing use")
case ir.CallUseExpr:
n = inlconv2expr(ic)
case ir.CallUseStmt:
n = inlconv2stmt(ic)
case ir.CallUseList:
// leave for caller to convert
}
}
return n
}
@ -740,7 +645,12 @@ var inlgen int
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
// NewInline allows the inliner implementation to be overridden.
// If it returns nil, the legacy inliner will handle this call
// instead.
var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
// If n is a OCALLFUNC node, and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
// inlined function body, and (List, Rlist) contain the (input, output)
@ -793,38 +703,90 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
defer func() {
inlMap[fn] = false
}()
if base.Debug.TypecheckInl == 0 {
typecheck.ImportedBody(fn)
typecheck.FixVariadicCall(n)
parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
sym := fn.Linksym()
inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
if base.Flag.GenDwarfInl > 0 {
if !sym.WasInlined() {
base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
sym.Set(obj.AttrWasInlined, true)
}
}
// We have a function node, and it has an inlineable body.
if base.Flag.LowerM > 1 {
fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
} else if base.Flag.LowerM != 0 {
if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
res := NewInline(n, fn, inlIndex)
if res == nil {
res = oldInline(n, fn, inlIndex)
}
// transitive inlining
// might be nice to do this before exporting the body,
// but can't emit the body with inlining expanded.
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
ir.EditChildren(res, edit)
if base.Flag.LowerM > 2 {
fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
}
return res
}
// CalleeEffects appends any side effects from evaluating callee to init.
func CalleeEffects(init *ir.Nodes, callee ir.Node) {
for {
switch callee.Op() {
case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
return // done
case ir.OCONVNOP:
conv := callee.(*ir.ConvExpr)
init.Append(ir.TakeInit(conv)...)
callee = conv.X
case ir.OINLCALL:
ic := callee.(*ir.InlinedCallExpr)
init.Append(ir.TakeInit(ic)...)
init.Append(ic.Body.Take()...)
callee = ic.SingleResult()
default:
base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
}
}
}
// oldInline creates an InlinedCallExpr to replace the given call
// expression. fn is the callee function to be inlined. inlIndex is
// the inlining tree position index, for use with src.NewInliningBase
// when rewriting positions.
func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
if base.Debug.TypecheckInl == 0 {
typecheck.ImportedBody(fn)
}
SSADumpInline(fn)
ninit := n.Init()
ninit := call.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// may contain side effects. Make sure to preserve these,
// if necessary (#42703).
if n.Op() == ir.OCALLFUNC {
callee := n.X
for callee.Op() == ir.OCONVNOP {
conv := callee.(*ir.ConvExpr)
ninit.Append(ir.TakeInit(conv)...)
callee = conv.X
}
if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
base.Fatalf("unexpected callee expression: %v", callee)
}
if call.Op() == ir.OCALLFUNC {
CalleeEffects(&ninit, call.X)
}
// Make temp names to use instead of the originals.
@ -854,25 +816,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
}
// We can delay declaring+initializing result parameters if:
// (1) there's exactly one "return" statement in the inlined function;
// (2) it's not an empty return statement (#44355); and
// (3) the result parameters aren't named.
delayretvars := true
nreturns := 0
ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
if n, ok := n.(*ir.ReturnStmt); ok {
nreturns++
if len(n.Results) == 0 {
delayretvars = false // empty return statement (case 2)
}
}
})
if nreturns != 1 {
delayretvars = false // not exactly one return statement (case 1)
}
// temporaries for return values.
var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
@ -882,7 +825,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
m = inlvar(n)
m = typecheck.Expr(m).(*ir.Name)
inlvars[n] = m
delayretvars = false // found a named result parameter (case 3)
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@ -905,61 +847,23 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
// Assign arguments to the parameters' temp names.
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Def = true
if n.Op() == ir.OCALLMETH {
sel := n.X.(*ir.SelectorExpr)
if sel.X == nil {
base.Fatalf("method call without receiver: %+v", n)
if call.Op() == ir.OCALLMETH {
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
as.Rhs.Append(sel.X)
}
as.Rhs.Append(n.Args...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *ir.AssignStmt
as.Rhs.Append(call.Args...)
if recv := fn.Type().Recv(); recv != nil {
as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD {
as.Lhs.Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
x := len(as.Lhs)
for len(as.Lhs) < len(as.Rhs) {
as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
}
varargs := as.Lhs[x:]
vas = ir.NewAssignStmt(base.Pos, nil, nil)
vas.X = inlParam(param, vas, inlvars)
if len(varargs) == 0 {
vas.Y = typecheck.NodNil()
vas.Y.SetType(param.Type)
} else {
lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
lit.List = varargs
vas.Y = lit
}
}
if len(as.Rhs) != 0 {
ninit.Append(typecheck.Stmt(as))
}
if vas != nil {
ninit.Append(typecheck.Stmt(vas))
}
if !delayretvars {
if !fn.Inl.CanDelayResults {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
@ -972,39 +876,20 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlgen++
parent := -1
if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
sym := fn.Linksym()
newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
inlMark.SetPos(n.Pos().WithIsStmt())
inlMark.Index = int64(newIndex)
ninit.Append(inlMark)
if base.Flag.GenDwarfInl > 0 {
if !sym.WasInlined() {
base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
sym.Set(obj.AttrWasInlined, true)
}
}
ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
subst := inlsubst{
retlabel: retlabel,
retvars: retvars,
delayretvars: delayretvars,
inlvars: inlvars,
defnMarker: ir.NilExpr{},
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
newInlIndex: inlIndex,
fn: fn,
}
subst.edit = subst.node
@ -1026,26 +911,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
//dumplist("ninit post", ninit);
call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
*call.PtrInit() = ninit
call.Body = body
call.ReturnVars = retvars
call.SetType(n.Type())
call.SetTypecheck(1)
// transitive inlining
// might be nice to do this before exporting the body,
// but can't emit the body with inlining expanded.
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
ir.EditChildren(call, edit)
if base.Flag.LowerM > 2 {
fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
}
return call
res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
res.SetInit(ninit)
res.SetType(call.Type())
res.SetTypecheck(1)
return res
}
// Every time we expand a function we generate a new set of tmpnames,
@ -1058,8 +928,10 @@ func inlvar(var_ *ir.Name) *ir.Name {
n := typecheck.NewName(var_.Sym())
n.SetType(var_.Type())
n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
n.SetAutoTemp(var_.AutoTemp())
n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
@ -1071,18 +943,7 @@ func inlvar(var_ *ir.Name) *ir.Name {
func retvar(t *types.Field, i int) *ir.Name {
n := typecheck.NewName(typecheck.LookupNum("~R", i))
n.SetType(t.Type)
n.Class = ir.PAUTO
n.SetUsed(true)
n.Curfn = ir.CurFunc // the calling function, not the called one
ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
func argvar(t *types.Type, i int) ir.Node {
n := typecheck.NewName(typecheck.LookupNum("~arg", i))
n.SetType(t.Elem())
n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
n.Curfn = ir.CurFunc // the calling function, not the called one
@ -1099,10 +960,6 @@ type inlsubst struct {
// Temporary result variables.
retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
inlvars map[*ir.Name]*ir.Name
// defnMarker is used to mark a Node for reassignment.
// inlsubst.clovar set this during creating new ONAME.
@ -1124,6 +981,10 @@ type inlsubst struct {
newclofn *ir.Func
fn *ir.Func // For debug -- the func that is being inlined
// If true, then don't update source positions during substitution
// (retain old source positions).
noPosUpdate bool
}
// list inlines a list of nodes.
@ -1153,17 +1014,21 @@ func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// TODO(danscales): want to get rid of this shallow copy, with code like the
// following, but it is hard to copy all the necessary flags in a maintainable way.
// m := ir.NewNameAt(n.Pos(), n.Sym())
// m.Class = n.Class
// m.SetType(n.Type())
// m.SetTypecheck(1)
//if n.IsClosureVar() {
// m.SetIsClosureVar(true)
//}
m := &ir.Name{}
*m = *n
m := ir.NewNameAt(n.Pos(), n.Sym())
m.Class = n.Class
m.SetType(n.Type())
m.SetTypecheck(1)
if n.IsClosureVar() {
m.SetIsClosureVar(true)
}
if n.Addrtaken() {
m.SetAddrtaken(true)
}
if n.Used() {
m.SetUsed(true)
}
m.Defn = n.Defn
m.Curfn = subst.newclofn
switch defn := n.Defn.(type) {
@ -1218,31 +1083,23 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
m := ir.Copy(n)
m.SetPos(subst.updatedPos(m.Pos()))
ir.EditChildren(m, subst.edit)
// Prior to the subst edit, set a flag in the inlsubst to
// indicated that we don't want to update the source positions in
// the new closure. If we do this, it will appear that the closure
// itself has things inlined into it, which is not the case. See
// issue #46234 for more details.
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
// The following is similar to funcLit
oldfn := n.Func
newfn := ir.NewFunc(oldfn.Pos())
// These three lines are not strictly necessary, but just to be clear
// that new function needs to redo typechecking and inlinability.
newfn.SetTypecheck(0)
newfn.SetInlinabilityChecked(false)
newfn.Inl = nil
newfn.SetIsHiddenClosure(true)
newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
newfn.Nname.Func = newfn
newfn := ir.NewClosureFunc(oldfn.Pos(), true)
// Ntype can be nil for -G=3 mode.
if oldfn.Nname.Ntype != nil {
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
}
newfn.Nname.Defn = newfn
m.(*ir.ClosureExpr).Func = newfn
newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
@ -1292,13 +1149,9 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
m.SetTypecheck(0)
if oldfn.ClosureCalled() {
typecheck.Callee(m)
} else {
typecheck.Expr(m)
}
return m
newclo := newfn.OClosure
newclo.SetInit(subst.list(n.Init()))
return typecheck.Expr(newclo)
}
// node recursively copies a node from the saved pristine body of the
@ -1380,7 +1233,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
as.Rhs = subst.list(n.Results)
if subst.delayretvars {
if subst.fn.Inl.CanDelayResults {
for _, n := range as.Lhs {
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
n.Name().Defn = as
@ -1445,6 +1298,9 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
if subst.noPosUpdate {
return xpos
}
pos := base.Ctxt.PosTable.Pos(xpos)
oldbase := pos.Base() // can be nil
newbase := subst.bases[oldbase]

View file

@ -142,17 +142,6 @@ func (n *BinaryExpr) SetOp(op Op) {
}
}
// A CallUse records how the result of the call is used:
type CallUse byte
const (
_ CallUse = iota
CallUseExpr // single expression result is used
CallUseList // list of results are used
CallUseStmt // results not used - call is a statement
)
// A CallExpr is a function call X(Args).
type CallExpr struct {
miniExpr
@ -161,9 +150,7 @@ type CallExpr struct {
Args Nodes
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
Use CallUse
NoInline bool
PreserveClosure bool // disable directClosureCall for this call
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
@ -181,8 +168,12 @@ func (n *CallExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
case OAPPEND,
OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
ODELETE,
OGETG, OGETCALLERPC, OGETCALLERSP,
OMAKE, OPRINT, OPRINTN,
ORECOVER, ORECOVERFP:
n.op = op
}
}
@ -192,8 +183,10 @@ type ClosureExpr struct {
miniExpr
Func *Func `mknode:"-"`
Prealloc *Name
IsGoWrap bool // whether this is wrapper closure of a go statement
}
// Deprecated: Use NewClosureFunc instead.
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
@ -277,7 +270,7 @@ func (n *ConvExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
n.op = op
}
}
@ -323,26 +316,24 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
Field *types.Sym
Field *types.Field
Value Node
Offset int64
}
func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
n.Offset = types.BADWIDTH
return n
}
func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
miniExpr
Body Nodes
ReturnVars Nodes
ReturnVars Nodes // must be side-effect free
}
func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
@ -354,6 +345,21 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
return n
}
func (n *InlinedCallExpr) SingleResult() Node {
if have := len(n.ReturnVars); have != 1 {
base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have)
}
if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() {
// If the type of the call is not a shape, but the type of the return value
// is a shape, we need to do an implicit conversion, so the real type
// of n is maintained.
r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0])
r.SetTypecheck(1)
return r
}
return n.ReturnVars[0]
}
// A LogicalExpr is a expression X Op Y where Op is && or ||.
// It is separate from BinaryExpr to make room for statements
// that must be executed before Y but after X.
@ -448,6 +454,20 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) {
t.SetNod(n)
}
// A RawOrigExpr represents an arbitrary Go expression as a string value.
// When printed in diagnostics, the string value is written out exactly as-is.
type RawOrigExpr struct {
miniExpr
Raw string
}
func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
n := &RawOrigExpr{Raw: raw}
n.pos = pos
n.op = op
return n
}
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
@ -495,9 +515,14 @@ func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type)
type SelectorExpr struct {
miniExpr
X Node
// Sel is the name of the field or method being selected, without (in the
// case of methods) any preceding type specifier. If the field/method is
// exported, than the Sym uses the local package regardless of the package
// of the containing type.
Sel *types.Sym
// The actual selected field - may not be filled in until typechecking.
Selection *types.Field
Prealloc *Name // preallocated storage for OCALLPART, if any
Prealloc *Name // preallocated storage for OMETHVALUE, if any
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
@ -511,7 +536,7 @@ func (n *SelectorExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR:
case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
n.op = op
}
}
@ -652,6 +677,38 @@ func (n *TypeAssertExpr) SetOp(op Op) {
}
}
// A DynamicTypeAssertExpr asserts that X is of dynamic type T.
type DynamicTypeAssertExpr struct {
miniExpr
X Node
// N = not an interface
// E = empty interface
// I = nonempty interface
// For E->N, T is a *runtime.type for N
// For I->N, T is a *runtime.itab for N+I
// For E->I, T is a *runtime.type for I
// For I->I, ditto
// For I->E, T is a *runtime.type for interface{} (unnecessary, but just to fill in the slot)
// For E->E, ditto
T Node
}
func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, t Node) *DynamicTypeAssertExpr {
n := &DynamicTypeAssertExpr{X: x, T: t}
n.pos = pos
n.op = op
return n
}
func (n *DynamicTypeAssertExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2:
n.op = op
}
}
// A UnaryExpr is a unary expression Op X,
// or Op(X) for a builtin function that does not end up being a call.
type UnaryExpr struct {
@ -678,6 +735,11 @@ func (n *UnaryExpr) SetOp(op Op) {
}
}
// Probably temporary: using Implicit() flag to mark generic function nodes that
// are called to make getGfInfo analysis easier in one pre-order pass.
func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
// An InstExpr is a generic function or type instantiation.
type InstExpr struct {
miniExpr
@ -773,6 +835,11 @@ func StaticValue(n Node) Node {
continue
}
if n.Op() == OINLCALL {
n = n.(*InlinedCallExpr).SingleResult()
continue
}
n1 := staticValue1(n)
if n1 == nil {
return n
@ -1071,7 +1138,7 @@ func MethodExprName(n Node) *Name {
// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
func MethodExprFunc(n Node) *types.Field {
switch n.Op() {
case ODOTMETH, OMETHEXPR, OCALLPART:
case ODOTMETH, OMETHEXPR, OMETHVALUE:
return n.(*SelectorExpr).Selection
}
base.Fatalf("unexpected node: %v (%v)", n, n.Op())

View file

@ -185,6 +185,7 @@ var OpPrec = []int{
OCLOSE: 8,
OCOMPLIT: 8,
OCONVIFACE: 8,
OCONVIDATA: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
@ -237,7 +238,7 @@ var OpPrec = []int{
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
OCALLPART: 8,
OMETHVALUE: 8,
OMETHEXPR: 8,
OPLUS: 7,
ONOT: 7,
@ -546,7 +547,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n = nn.X
continue
}
case OCONV, OCONVNOP, OCONVIFACE:
case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.X
@ -567,6 +568,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
if n, ok := n.(*RawOrigExpr); ok {
fmt.Fprint(s, n.Raw)
return
}
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
@ -709,6 +715,10 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "... argument")
return
}
if typ := n.Type(); typ != nil {
fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
return
}
if n.Ntype != nil {
fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
return
@ -752,7 +762,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR:
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
n := n.(*SelectorExpr)
exprFmt(n.X, s, nprec)
if n.Sel == nil {
@ -804,6 +814,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OCONV,
OCONVIFACE,
OCONVIDATA,
OCONVNOP,
OBYTES2STR,
ORUNES2STR,
@ -854,6 +865,15 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
fmt.Fprintf(s, "(%.v)", n.Args)
case OINLCALL:
n := n.(*InlinedCallExpr)
// TODO(mdempsky): Print Init and/or Body?
if len(n.ReturnVars) == 1 {
fmt.Fprintf(s, "%v", n.ReturnVars[0])
return
}
fmt.Fprintf(s, "(.%v)", n.ReturnVars)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
n := n.(*MakeExpr)
if n.Cap != nil {
@ -986,7 +1006,7 @@ func (l Nodes) Format(s fmt.State, verb rune) {
// Dump prints the message s followed by a debug dump of n.
func Dump(s string, n Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
fmt.Printf("%s%+v\n", s, n)
}
// DumpList prints the message s followed by a debug dump of each node in the list.
@ -1114,16 +1134,21 @@ func dumpNodeHeader(w io.Writer, n Node) {
}
if n.Pos().IsKnown() {
pfx := ""
fmt.Fprint(w, " # ")
switch n.Pos().IsStmt() {
case src.PosNotStmt:
pfx = "_" // "-" would be confusing
fmt.Fprint(w, "_") // "-" would be confusing
case src.PosIsStmt:
pfx = "+"
fmt.Fprint(w, "+")
}
pos := base.Ctxt.PosTable.Pos(n.Pos())
for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
if i > 0 {
fmt.Fprint(w, ",")
}
// TODO(mdempsky): Print line pragma details too.
file := filepath.Base(pos.Filename())
fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
}
}

View file

@ -9,6 +9,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
)
// A Func corresponds to a single function in a Go program
@ -39,14 +40,14 @@ import (
// constructs a fresh node.
//
// A method value (t.M) is represented by ODOTMETH/ODOTINTER
// when it is called directly and by OCALLPART otherwise.
// when it is called directly and by OMETHVALUE otherwise.
// These are like method expressions, except that for ODOTMETH/ODOTINTER,
// the method name is stored in Sym instead of Right.
// Each OCALLPART ends up being implemented as a new
// Each OMETHVALUE ends up being implemented as a new
// function, a bit like a closure, with its own ODCLFUNC.
// The OCALLPART uses n.Func to record the linkage to
// The OMETHVALUE uses n.Func to record the linkage to
// the generated ODCLFUNC, but there is no
// pointer from the Func back to the OCALLPART.
// pointer from the Func back to the OMETHVALUE.
type Func struct {
miniNode
Body Nodes
@ -166,6 +167,11 @@ type Inline struct {
// another package is imported.
Dcl []*Name
Body []Node
// CanDelayResults reports whether it's safe for the inliner to delay
// initializing the result parameters until immediately before the
// "return" statement.
CanDelayResults bool
}
// A Mark represents a scope boundary.
@ -196,7 +202,7 @@ const (
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
funcClosureCalled // closure is only immediately called
funcClosureCalled // closure is only immediately called; used by escape analysis
)
type SymAndPos struct {
@ -272,6 +278,17 @@ func PkgFuncName(f *Func) string {
var CurFunc *Func
// WithFunc invokes do with CurFunc and base.Pos set to curfn and
// curfn.Pos(), respectively, and then restores their previous values
// before returning.
func WithFunc(curfn *Func, do func()) {
oldfn, oldpos := CurFunc, base.Pos
defer func() { CurFunc, base.Pos = oldfn, oldpos }()
CurFunc, base.Pos = curfn, curfn.Pos()
do()
}
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
@ -279,7 +296,7 @@ func FuncSymName(s *types.Sym) string {
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
}
n.Class = PFUNC
@ -296,8 +313,8 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
}
}
@ -306,3 +323,109 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}
// globClosgen is like Func.Closgen, but for the global scope.
var globClosgen int32
// closureName generates a new unique name for a closure within outerfn.
func closureName(outerfn *Func) *types.Sym {
pkg := types.LocalPkg
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfn != nil {
if outerfn.OClosure != nil {
prefix = ""
}
pkg = outerfn.Sym().Pkg
outer = FuncName(outerfn)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !IsBlank(outerfn.Nname) {
gen = &outerfn.Closgen
}
}
*gen++
return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
// NewClosureFunc creates a new Func to represent a function literal.
// If hidden is true, then the closure is marked hidden (i.e., as a
// function literal contained within another function, rather than a
// package-scope variable initialization expression).
func NewClosureFunc(pos src.XPos, hidden bool) *Func {
fn := NewFunc(pos)
fn.SetIsHiddenClosure(hidden)
fn.Nname = NewNameAt(pos, BlankNode.Sym())
fn.Nname.Func = fn
fn.Nname.Defn = fn
fn.OClosure = NewClosureExpr(pos, fn)
return fn
}
// NameClosure generates a unique for the given function literal,
// which must have appeared within outerfn.
func NameClosure(clo *ClosureExpr, outerfn *Func) {
fn := clo.Func
if fn.IsHiddenClosure() != (outerfn != nil) {
base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
}
name := fn.Nname
if !IsBlank(name) {
base.FatalfAt(clo.Pos(), "closure already named: %v", name)
}
name.SetSym(closureName(outerfn))
MarkFunc(name)
}
// UseClosure checks that the ginen function literal has been setup
// correctly, and then returns it as an expression.
// It must be called after clo.Func.ClosureVars has been set.
func UseClosure(clo *ClosureExpr, pkg *Package) Node {
fn := clo.Func
name := fn.Nname
if IsBlank(name) {
base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
}
// Caution: clo.Typecheck() is still 0 when UseClosure is called by
// tcClosure.
if fn.Typecheck() != 1 || name.Typecheck() != 1 {
base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
}
if clo.Type() == nil || name.Type() == nil {
base.FatalfAt(fn.Pos(), "missing types: %v", fn)
}
if !types.Identical(clo.Type(), name.Type()) {
base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
}
if base.Flag.W > 1 {
s := fmt.Sprintf("new closure func: %v", fn)
Dump(s, fn)
}
if pkg != nil {
pkg.Decls = append(pkg.Decls, fn)
}
if false && IsTrivialClosure(clo) {
// TODO(mdempsky): Investigate if we can/should optimize this
// case. walkClosure already handles it later, but it could be
// useful to recognize earlier (e.g., it might allow multiple
// inlined calls to a function to share a common trivial closure
// func, rather than cloning it for each inlined call).
}
return clo
}

View file

@ -358,39 +358,74 @@ func (n *Name) Byval() bool {
return n.Canonical().flags&nameByval != 0
}
// NewClosureVar returns a new closure variable for fn to refer to
// outer variable n.
func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
c := NewNameAt(pos, n.Sym())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n.Canonical()
c.Outer = n
c.SetType(n.Type())
c.SetTypecheck(n.Typecheck())
fn.ClosureVars = append(fn.ClosureVars, c)
return c
}
// NewHiddenParam returns a new hidden parameter for fn with the given
// name and type.
func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name {
if fn.OClosure != nil {
base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures")
}
fn.SetNeedctxt(true)
// Create a fake parameter, disassociated from any real function, to
// pretend to capture.
fake := NewNameAt(pos, sym)
fake.Class = PPARAM
fake.SetType(typ)
fake.SetByval(true)
return NewClosureVar(pos, fn, fake)
}
// CaptureName returns a Name suitable for referring to n from within function
// fn or from the package block if fn is nil. If n is a free variable declared
// within a function that encloses fn, then CaptureName returns a closure
// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
// returns n.
// within a function that encloses fn, then CaptureName returns the closure
// variable that refers to n within fn, creating it if necessary.
// Otherwise, it simply returns n.
func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
if n.Op() != ONAME || n.Curfn == nil {
return n // okay to use directly
}
if n.IsClosureVar() {
base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
}
if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
return n // okay to use directly
c := n.Innermost
if c == nil {
c = n
}
if c.Curfn == fn {
return c
}
if fn == nil {
base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
}
c := n.Innermost
if c != nil && c.Curfn == fn {
return c
}
// Do not have a closure var for the active closure yet; make one.
c = NewNameAt(pos, n.Sym())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n
c = NewClosureVar(pos, fn, c)
// Link into list of active closure variables.
// Popped from list in FinishCaptureNames.
c.Outer = n.Innermost
n.Innermost = c
fn.ClosureVars = append(fn.ClosureVars, c)
return c
}

View file

@ -159,7 +159,6 @@ const (
OCALLFUNC // X(Args) (function call f(args))
OCALLMETH // X(Args) (direct method call x.Method(args))
OCALLINTER // X(Args) (interface method call x.Method(args))
OCALLPART // X.Sel (method expression x.Method, not called)
OCAP // cap(X)
OCLOSE // close(X)
OCLOSURE // func Type { Func.Closure.Body } (func literal)
@ -171,6 +170,7 @@ const (
OPTRLIT // &X (X is composite literal)
OCONV // Type(X) (type conversion)
OCONVIFACE // Type(X) (type conversion, to interface)
OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
OCONVNOP // Type(X) (type conversion, no effect)
OCOPY // copy(X, Y)
ODCL // var X (declares X of type X.Type)
@ -237,6 +237,7 @@ const (
OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
ORECOVER // recover()
ORECOVERFP // recover(Args) w/ explicit FP argument
ORECV // <-X
ORUNESTR // Type(X) (Type is string, X is rune)
OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
@ -249,14 +250,16 @@ const (
OSIZEOF // unsafe.Sizeof(X)
OUNSAFEADD // unsafe.Add(X, Y)
OUNSAFESLICE // unsafe.Slice(X, Y)
OMETHEXPR // method expression
OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
OMETHVALUE // X.Sel (method expression t.Method, not called)
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Label]
// OCASE: case List: Body (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
// for nil), and, if a type-switch variable is specified, Rlist is an
// for nil) or an ODYNAMICTYPE indicating a runtime type for generics.
// If a type-switch variable is specified, Var is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
@ -317,9 +320,16 @@ const (
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
// opcodes for generics
ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter)
ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch)
// arch-specific opcodes
OTAILCALL // tail call to another function
OGETG // runtime.getg() (read g pointer)
OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
OEND
)
@ -436,7 +446,7 @@ func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
return res
}
type PragmaFlag int16
type PragmaFlag uint16
const (
// Func pragmas.
@ -447,6 +457,7 @@ const (
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only)
UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
@ -563,7 +574,7 @@ func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
base.Fatalf("OXDOT in walk")
base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X

View file

@ -463,6 +463,62 @@ func (n *Decl) editChildren(edit func(Node) Node) {
}
}
func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *DynamicType) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *DynamicType) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.X != nil && do(n.X) {
return true
}
if n.ITab != nil && do(n.ITab) {
return true
}
return false
}
func (n *DynamicType) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.X != nil {
n.X = edit(n.X).(Node)
}
if n.ITab != nil {
n.ITab = edit(n.ITab).(Node)
}
}
func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *DynamicTypeAssertExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.X != nil && do(n.X) {
return true
}
if n.T != nil && do(n.T) {
return true
}
return false
}
func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.X != nil {
n.X = edit(n.X).(Node)
}
if n.T != nil {
n.T = edit(n.T).(Node)
}
}
func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ForStmt) copy() Node {
c := *n
@ -947,6 +1003,22 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) {
}
}
func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *RawOrigExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
return false
}
func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
}
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n

View file

@ -41,18 +41,18 @@ func _() {
_ = x[OCALLFUNC-30]
_ = x[OCALLMETH-31]
_ = x[OCALLINTER-32]
_ = x[OCALLPART-33]
_ = x[OCAP-34]
_ = x[OCLOSE-35]
_ = x[OCLOSURE-36]
_ = x[OCOMPLIT-37]
_ = x[OMAPLIT-38]
_ = x[OSTRUCTLIT-39]
_ = x[OARRAYLIT-40]
_ = x[OSLICELIT-41]
_ = x[OPTRLIT-42]
_ = x[OCONV-43]
_ = x[OCONVIFACE-44]
_ = x[OCAP-33]
_ = x[OCLOSE-34]
_ = x[OCLOSURE-35]
_ = x[OCOMPLIT-36]
_ = x[OMAPLIT-37]
_ = x[OSTRUCTLIT-38]
_ = x[OARRAYLIT-39]
_ = x[OSLICELIT-40]
_ = x[OPTRLIT-41]
_ = x[OCONV-42]
_ = x[OCONVIFACE-43]
_ = x[OCONVIDATA-44]
_ = x[OCONVNOP-45]
_ = x[OCOPY-46]
_ = x[ODCL-47]
@ -109,65 +109,72 @@ func _() {
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV2-103]
_ = x[OIOTA-104]
_ = x[OREAL-105]
_ = x[OIMAG-106]
_ = x[OCOMPLEX-107]
_ = x[OALIGNOF-108]
_ = x[OOFFSETOF-109]
_ = x[OSIZEOF-110]
_ = x[OUNSAFEADD-111]
_ = x[OUNSAFESLICE-112]
_ = x[OMETHEXPR-113]
_ = x[OBLOCK-114]
_ = x[OBREAK-115]
_ = x[OCASE-116]
_ = x[OCONTINUE-117]
_ = x[ODEFER-118]
_ = x[OFALL-119]
_ = x[OFOR-120]
_ = x[OFORUNTIL-121]
_ = x[OGOTO-122]
_ = x[OIF-123]
_ = x[OLABEL-124]
_ = x[OGO-125]
_ = x[ORANGE-126]
_ = x[ORETURN-127]
_ = x[OSELECT-128]
_ = x[OSWITCH-129]
_ = x[OTYPESW-130]
_ = x[OFUNCINST-131]
_ = x[OTCHAN-132]
_ = x[OTMAP-133]
_ = x[OTSTRUCT-134]
_ = x[OTINTER-135]
_ = x[OTFUNC-136]
_ = x[OTARRAY-137]
_ = x[OTSLICE-138]
_ = x[OINLCALL-139]
_ = x[OEFACE-140]
_ = x[OITAB-141]
_ = x[OIDATA-142]
_ = x[OSPTR-143]
_ = x[OCFUNC-144]
_ = x[OCHECKNIL-145]
_ = x[OVARDEF-146]
_ = x[OVARKILL-147]
_ = x[OVARLIVE-148]
_ = x[ORESULT-149]
_ = x[OINLMARK-150]
_ = x[OLINKSYMOFFSET-151]
_ = x[OTAILCALL-152]
_ = x[OGETG-153]
_ = x[OEND-154]
_ = x[ORECOVERFP-101]
_ = x[ORECV-102]
_ = x[ORUNESTR-103]
_ = x[OSELRECV2-104]
_ = x[OIOTA-105]
_ = x[OREAL-106]
_ = x[OIMAG-107]
_ = x[OCOMPLEX-108]
_ = x[OALIGNOF-109]
_ = x[OOFFSETOF-110]
_ = x[OSIZEOF-111]
_ = x[OUNSAFEADD-112]
_ = x[OUNSAFESLICE-113]
_ = x[OMETHEXPR-114]
_ = x[OMETHVALUE-115]
_ = x[OBLOCK-116]
_ = x[OBREAK-117]
_ = x[OCASE-118]
_ = x[OCONTINUE-119]
_ = x[ODEFER-120]
_ = x[OFALL-121]
_ = x[OFOR-122]
_ = x[OFORUNTIL-123]
_ = x[OGOTO-124]
_ = x[OIF-125]
_ = x[OLABEL-126]
_ = x[OGO-127]
_ = x[ORANGE-128]
_ = x[ORETURN-129]
_ = x[OSELECT-130]
_ = x[OSWITCH-131]
_ = x[OTYPESW-132]
_ = x[OFUNCINST-133]
_ = x[OTCHAN-134]
_ = x[OTMAP-135]
_ = x[OTSTRUCT-136]
_ = x[OTINTER-137]
_ = x[OTFUNC-138]
_ = x[OTARRAY-139]
_ = x[OTSLICE-140]
_ = x[OINLCALL-141]
_ = x[OEFACE-142]
_ = x[OITAB-143]
_ = x[OIDATA-144]
_ = x[OSPTR-145]
_ = x[OCFUNC-146]
_ = x[OCHECKNIL-147]
_ = x[OVARDEF-148]
_ = x[OVARKILL-149]
_ = x[OVARLIVE-150]
_ = x[ORESULT-151]
_ = x[OINLMARK-152]
_ = x[OLINKSYMOFFSET-153]
_ = x[ODYNAMICDOTTYPE-154]
_ = x[ODYNAMICDOTTYPE2-155]
_ = x[ODYNAMICTYPE-156]
_ = x[OTAILCALL-157]
_ = x[OGETG-158]
_ = x[OGETCALLERPC-159]
_ = x[OGETCALLERSP-160]
_ = x[OEND-161]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 213, 216, 221, 228, 235, 241, 250, 258, 266, 272, 276, 285, 292, 296, 299, 306, 314, 321, 327, 330, 336, 343, 351, 355, 362, 370, 372, 374, 376, 378, 380, 382, 387, 392, 400, 403, 412, 415, 419, 427, 434, 443, 456, 459, 462, 465, 468, 471, 474, 480, 483, 486, 492, 496, 499, 503, 508, 513, 519, 524, 528, 533, 541, 549, 555, 564, 575, 582, 586, 593, 601, 605, 609, 613, 620, 627, 635, 641, 650, 661, 669, 674, 679, 683, 691, 696, 700, 703, 711, 715, 717, 722, 724, 729, 735, 741, 747, 753, 761, 766, 770, 777, 783, 788, 794, 800, 807, 812, 816, 821, 825, 830, 838, 844, 851, 858, 864, 871, 884, 892, 896, 899}
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 286, 293, 297, 300, 307, 315, 322, 328, 331, 337, 344, 352, 356, 363, 371, 373, 375, 377, 379, 381, 383, 388, 393, 401, 404, 413, 416, 420, 428, 435, 444, 457, 460, 463, 466, 469, 472, 475, 481, 484, 487, 493, 497, 500, 504, 509, 514, 520, 525, 529, 534, 542, 550, 556, 565, 576, 583, 592, 596, 603, 611, 615, 619, 623, 630, 637, 645, 651, 660, 671, 679, 688, 693, 698, 702, 710, 715, 719, 722, 730, 734, 736, 741, 743, 748, 754, 760, 766, 772, 780, 785, 789, 796, 802, 807, 813, 819, 826, 831, 835, 840, 844, 849, 857, 863, 870, 877, 883, 890, 903, 917, 932, 943, 951, 955, 966, 977, 980}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {

View file

@ -32,7 +32,4 @@ type Package struct {
// Exported (or re-exported) symbols.
Exports []*Name
// Map from function names of stencils to already-created stencils.
Stencils map[*types.Sym]*Func
}

View file

@ -90,7 +90,7 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 {
if n := n.(*Name); n.Class == PFUNC {
do(n.Defn)
}
case ODOTMETH, OCALLPART, OMETHEXPR:
case ODOTMETH, OMETHVALUE, OMETHEXPR:
if fn := MethodExprName(n); fn != nil {
do(fn.Defn)
}

View file

@ -300,11 +300,36 @@ func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
return TypeNodeAt(src.NoXPos, t)
}
// TypeNodeAt is like TypeNode, but allows specifying the position
// information if a new OTYPE needs to be constructed.
//
// Deprecated: Use TypeNode instead. For typical use, the position for
// an anonymous OTYPE node should not matter. However, TypeNodeAt is
// available for use with toolstash -cmp to refactor existing code
// that is sensitive to OTYPE position.
func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
return newTypeNode(src.NoXPos, t)
return newTypeNode(pos, t)
}
// A DynamicType represents the target type in a type switch.
type DynamicType struct {
miniExpr
X Node // a *runtime._type for the targeted type
ITab Node // for type switches from nonempty interfaces to non-interfaces, this is the itab for that pair.
}
func NewDynamicType(pos src.XPos, x Node) *DynamicType {
n := &DynamicType{X: x}
n.pos = pos
n.op = ODYNAMICTYPE
return n
}

View file

@ -66,7 +66,7 @@ func Float64Val(v constant.Value) float64 {
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
base.Fatalf("%v does not represent %v", t, v)
base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
}
}

View file

@ -1082,6 +1082,10 @@ func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
if lv.fn.Wrapper() || lv.fn.Dupok() {
// Skip reporting liveness information for compiler-generated wrappers.
return
}
if !(v == nil || v.Op.IsCall()) {
// Historically we only printed this information at
// calls. Keep doing so.

View file

@ -209,7 +209,7 @@ func s15a8(x *[15]int64) [15]int64 {
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
// escape analysis explanation
want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
@ -220,8 +220,8 @@ func s15a8(x *[15]int64) [15]int64 {
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~R0 (return)"}]}`)
})
}

View file

@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock

View file

@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View file

@ -0,0 +1,124 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
type code interface {
marker() syncMarker
value() int
}
type codeVal int
func (c codeVal) marker() syncMarker { return syncVal }
func (c codeVal) value() int { return int(c) }
const (
valBool codeVal = iota
valString
valInt64
valBigInt
valBigRat
valBigFloat
)
type codeType int
func (c codeType) marker() syncMarker { return syncType }
func (c codeType) value() int { return int(c) }
const (
typeBasic codeType = iota
typeNamed
typePointer
typeSlice
typeArray
typeChan
typeMap
typeSignature
typeStruct
typeInterface
typeUnion
typeTypeParam
)
type codeObj int
func (c codeObj) marker() syncMarker { return syncCodeObj }
func (c codeObj) value() int { return int(c) }
const (
objAlias codeObj = iota
objConst
objType
objFunc
objVar
objStub
)
type codeStmt int
func (c codeStmt) marker() syncMarker { return syncStmt1 }
func (c codeStmt) value() int { return int(c) }
const (
stmtEnd codeStmt = iota
stmtLabel
stmtBlock
stmtExpr
stmtSend
stmtAssign
stmtAssignOp
stmtIncDec
stmtBranch
stmtCall
stmtReturn
stmtIf
stmtFor
stmtSwitch
stmtSelect
// TODO(mdempsky): Remove after we don't care about toolstash -cmp.
stmtTypeDeclHack
)
type codeExpr int
func (c codeExpr) marker() syncMarker { return syncExpr }
func (c codeExpr) value() int { return int(c) }
// TODO(mdempsky): Split expr into addr, for lvalues.
const (
exprNone codeExpr = iota
exprConst
exprType // type expression
exprLocal // local variable
exprName // global variable or function
exprBlank
exprCompLit
exprFuncLit
exprSelector
exprIndex
exprSlice
exprAssert
exprUnaryOp
exprBinaryOp
exprCall
exprConvert
)
type codeDecl int
func (c codeDecl) marker() syncMarker { return syncDecl }
func (c codeDecl) value() int { return int(c) }
const (
declEnd codeDecl = iota
declFunc
declMethod
declVar
declOther
)

Some files were not shown because too many files have changed in this diff Show more