mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch
The only major fixup is that duffzero changed from 8-byte writes to 16-byte writes. Change-Id: I1762b74ce67a8e4b81c11568027cdb3572f7f87c
This commit is contained in:
commit
7c4fbb650c
545 changed files with 26683 additions and 12849 deletions
61
AUTHORS
61
AUTHORS
|
|
@ -11,10 +11,12 @@
|
||||||
A Medium Corporation
|
A Medium Corporation
|
||||||
Aamir Khan <syst3m.w0rm@gmail.com>
|
Aamir Khan <syst3m.w0rm@gmail.com>
|
||||||
Aaron France <aaron.l.france@gmail.com>
|
Aaron France <aaron.l.france@gmail.com>
|
||||||
|
Aaron Torres <tcboox@gmail.com>
|
||||||
Abhinav Gupta <abhinav.g90@gmail.com>
|
Abhinav Gupta <abhinav.g90@gmail.com>
|
||||||
Adrian Nos <nos.adrian@gmail.com>
|
Adrian Nos <nos.adrian@gmail.com>
|
||||||
Adrian O'Grady <elpollouk@gmail.com>
|
Adrian O'Grady <elpollouk@gmail.com>
|
||||||
Adrien Bustany <adrien-xx-google@bustany.org>
|
Adrien Bustany <adrien-xx-google@bustany.org>
|
||||||
|
Aécio Júnior <aeciodantasjunior@gmail.com>
|
||||||
Ahmed Waheed Moanes <oneofone@gmail.com>
|
Ahmed Waheed Moanes <oneofone@gmail.com>
|
||||||
Ainar Garipov <gugl.zadolbal@gmail.com>
|
Ainar Garipov <gugl.zadolbal@gmail.com>
|
||||||
Akshat Kumar <seed@mail.nanosouffle.net>
|
Akshat Kumar <seed@mail.nanosouffle.net>
|
||||||
|
|
@ -31,6 +33,7 @@ Alex Schroeder <alex@gnu.org>
|
||||||
Alex Sergeyev <abc@alexsergeyev.com>
|
Alex Sergeyev <abc@alexsergeyev.com>
|
||||||
Alexander Larsson <alexander.larsson@gmail.com>
|
Alexander Larsson <alexander.larsson@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
Alexander Morozov <lk4d4math@gmail.com>
|
||||||
|
Alexander Neumann <alexander@bumpern.de>
|
||||||
Alexander Orlov <alexander.orlov@loxal.net>
|
Alexander Orlov <alexander.orlov@loxal.net>
|
||||||
Alexander Reece <awreece@gmail.com>
|
Alexander Reece <awreece@gmail.com>
|
||||||
Alexander Surma <surma@surmair.de>
|
Alexander Surma <surma@surmair.de>
|
||||||
|
|
@ -41,6 +44,7 @@ Alexandre Normand <alexandre.normand@gmail.com>
|
||||||
Alexei Sholik <alcosholik@gmail.com>
|
Alexei Sholik <alcosholik@gmail.com>
|
||||||
Alexey Borzenkov <snaury@gmail.com>
|
Alexey Borzenkov <snaury@gmail.com>
|
||||||
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
||||||
|
Alif Rachmawadi <subosito@gmail.com>
|
||||||
Amir Mohammad Saied <amir@gluegadget.com>
|
Amir Mohammad Saied <amir@gluegadget.com>
|
||||||
Amrut Joshi <amrut.joshi@gmail.com>
|
Amrut Joshi <amrut.joshi@gmail.com>
|
||||||
Andrei Korzhevskii <a.korzhevskiy@gmail.com>
|
Andrei Korzhevskii <a.korzhevskiy@gmail.com>
|
||||||
|
|
@ -49,6 +53,7 @@ Andrew Balholm <andybalholm@gmail.com>
|
||||||
Andrew Bonventre <andybons@chromium.org>
|
Andrew Bonventre <andybons@chromium.org>
|
||||||
Andrew Bursavich <abursavich@gmail.com>
|
Andrew Bursavich <abursavich@gmail.com>
|
||||||
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
||||||
|
Andrew Etter <andrew.etter@gmail.com>
|
||||||
Andrew Harding <andrew@spacemonkey.com>
|
Andrew Harding <andrew@spacemonkey.com>
|
||||||
Andrew Lutomirski <andy@luto.us>
|
Andrew Lutomirski <andy@luto.us>
|
||||||
Andrew Pritchard <awpritchard@gmail.com>
|
Andrew Pritchard <awpritchard@gmail.com>
|
||||||
|
|
@ -64,6 +69,7 @@ Andy Davis <andy@bigandian.com>
|
||||||
Andy Maloney <asmaloney@gmail.com>
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||||
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
||||||
|
Anmol Sethi <anmol@aubble.com>
|
||||||
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
||||||
Anthony Eufemio <anthony.eufemio@gmail.com>
|
Anthony Eufemio <anthony.eufemio@gmail.com>
|
||||||
Anthony Martin <ality@pbrane.org>
|
Anthony Martin <ality@pbrane.org>
|
||||||
|
|
@ -74,6 +80,7 @@ Areski Belaid <areski@gmail.com>
|
||||||
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
||||||
Arne Hormann <arnehormann@gmail.com>
|
Arne Hormann <arnehormann@gmail.com>
|
||||||
Aron Nopanen <aron.nopanen@gmail.com>
|
Aron Nopanen <aron.nopanen@gmail.com>
|
||||||
|
Artyom Pervukhin <artyom.pervukhin@gmail.com>
|
||||||
Arvindh Rajesh Tamilmani <art@a-30.net>
|
Arvindh Rajesh Tamilmani <art@a-30.net>
|
||||||
Ato Araki <ato.araki@gmail.com>
|
Ato Araki <ato.araki@gmail.com>
|
||||||
Aulus Egnatius Varialus <varialus@gmail.com>
|
Aulus Egnatius Varialus <varialus@gmail.com>
|
||||||
|
|
@ -112,6 +119,7 @@ Charles L. Dorian <cldorian@gmail.com>
|
||||||
Charles Lee <zombie.fml@gmail.com>
|
Charles Lee <zombie.fml@gmail.com>
|
||||||
Chris Dollin <ehog.hedge@gmail.com>
|
Chris Dollin <ehog.hedge@gmail.com>
|
||||||
Chris Farmiloe <chrisfarms@gmail.com>
|
Chris Farmiloe <chrisfarms@gmail.com>
|
||||||
|
Chris Hines <chris.cs.guy@gmail.com>
|
||||||
Chris Howey <howeyc@gmail.com>
|
Chris Howey <howeyc@gmail.com>
|
||||||
Chris Jones <chris@cjones.org>
|
Chris Jones <chris@cjones.org>
|
||||||
Chris Kastorff <encryptio@gmail.com>
|
Chris Kastorff <encryptio@gmail.com>
|
||||||
|
|
@ -130,6 +138,7 @@ Clement Skau <clementskau@gmail.com>
|
||||||
CloudFlare Inc.
|
CloudFlare Inc.
|
||||||
Colin Kennedy <moshen.colin@gmail.com>
|
Colin Kennedy <moshen.colin@gmail.com>
|
||||||
Conrad Meyer <cemeyer@cs.washington.edu>
|
Conrad Meyer <cemeyer@cs.washington.edu>
|
||||||
|
CoreOS, Inc.
|
||||||
Corey Thomasson <cthom.lists@gmail.com>
|
Corey Thomasson <cthom.lists@gmail.com>
|
||||||
Cristian Staretu <unclejacksons@gmail.com>
|
Cristian Staretu <unclejacksons@gmail.com>
|
||||||
Damian Gryski <dgryski@gmail.com>
|
Damian Gryski <dgryski@gmail.com>
|
||||||
|
|
@ -137,9 +146,12 @@ Dan Callahan <dan.callahan@gmail.com>
|
||||||
Dan Peterson <dpiddy@gmail.com>
|
Dan Peterson <dpiddy@gmail.com>
|
||||||
Dan Sinclair <dan.sinclair@gmail.com>
|
Dan Sinclair <dan.sinclair@gmail.com>
|
||||||
Daniel Fleischman <danielfleischman@gmail.com>
|
Daniel Fleischman <danielfleischman@gmail.com>
|
||||||
|
Daniel Johansson <dajo2002@gmail.com>
|
||||||
|
Daniel Kerwin <d.kerwin@gini.net>
|
||||||
Daniel Krech <eikeon@eikeon.com>
|
Daniel Krech <eikeon@eikeon.com>
|
||||||
Daniel Lidén <daniel.liden.87@gmail.com>
|
Daniel Lidén <daniel.liden.87@gmail.com>
|
||||||
Daniel Morsing <daniel.morsing@gmail.com>
|
Daniel Morsing <daniel.morsing@gmail.com>
|
||||||
|
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
|
||||||
Daniel Theophanes <kardianos@gmail.com>
|
Daniel Theophanes <kardianos@gmail.com>
|
||||||
Darren Elwood <darren@textnode.com>
|
Darren Elwood <darren@textnode.com>
|
||||||
Dave Cheney <dave@cheney.net>
|
Dave Cheney <dave@cheney.net>
|
||||||
|
|
@ -150,13 +162,16 @@ David Forsythe <dforsythe@gmail.com>
|
||||||
David G. Andersen <dave.andersen@gmail.com>
|
David G. Andersen <dave.andersen@gmail.com>
|
||||||
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
||||||
David Leon Gil <coruus@gmail.com>
|
David Leon Gil <coruus@gmail.com>
|
||||||
|
David R. Jenni <david.r.jenni@gmail.com>
|
||||||
David Thomas <davidthomas426@gmail.com>
|
David Thomas <davidthomas426@gmail.com>
|
||||||
David Titarenco <david.titarenco@gmail.com>
|
David Titarenco <david.titarenco@gmail.com>
|
||||||
Davies Liu <davies.liu@gmail.com>
|
Davies Liu <davies.liu@gmail.com>
|
||||||
Dean Prichard <dean.prichard@gmail.com>
|
Dean Prichard <dean.prichard@gmail.com>
|
||||||
|
Denis Bernard <db047h@gmail.com>
|
||||||
Denis Brandolini <denis.brandolini@gmail.com>
|
Denis Brandolini <denis.brandolini@gmail.com>
|
||||||
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
||||||
Derek Parker <parkerderek86@gmail.com>
|
Derek Parker <parkerderek86@gmail.com>
|
||||||
|
Develer SRL
|
||||||
Devon H. O'Dell <devon.odell@gmail.com>
|
Devon H. O'Dell <devon.odell@gmail.com>
|
||||||
Dhiru Kholia <dhiru.kholia@gmail.com>
|
Dhiru Kholia <dhiru.kholia@gmail.com>
|
||||||
Didier Spezia <didier.06@gmail.com>
|
Didier Spezia <didier.06@gmail.com>
|
||||||
|
|
@ -166,6 +181,7 @@ Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
||||||
Dmitry Chestnykh <dchest@gmail.com>
|
Dmitry Chestnykh <dchest@gmail.com>
|
||||||
Dmitry Savintsev <dsavints@gmail.com>
|
Dmitry Savintsev <dsavints@gmail.com>
|
||||||
Dominik Honnef <dominik.honnef@gmail.com>
|
Dominik Honnef <dominik.honnef@gmail.com>
|
||||||
|
Donald Huang <don.hcd@gmail.com>
|
||||||
Donovan Hide <donovanhide@gmail.com>
|
Donovan Hide <donovanhide@gmail.com>
|
||||||
Dropbox, Inc.
|
Dropbox, Inc.
|
||||||
Duncan Holm <mail@frou.org>
|
Duncan Holm <mail@frou.org>
|
||||||
|
|
@ -180,6 +196,7 @@ Elias Naur <elias.naur@gmail.com>
|
||||||
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
||||||
Eoghan Sherry <ejsherry@gmail.com>
|
Eoghan Sherry <ejsherry@gmail.com>
|
||||||
Eric Clark <zerohp@gmail.com>
|
Eric Clark <zerohp@gmail.com>
|
||||||
|
Eric Lagergren <ericscottlagergren@gmail.com>
|
||||||
Eric Milliken <emilliken@gmail.com>
|
Eric Milliken <emilliken@gmail.com>
|
||||||
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
||||||
Erik Aigner <aigner.erik@gmail.com>
|
Erik Aigner <aigner.erik@gmail.com>
|
||||||
|
|
@ -189,6 +206,7 @@ Esko Luontola <esko.luontola@gmail.com>
|
||||||
Evan Phoenix <evan@phx.io>
|
Evan Phoenix <evan@phx.io>
|
||||||
Evan Shaw <chickencha@gmail.com>
|
Evan Shaw <chickencha@gmail.com>
|
||||||
Ewan Chou <coocood@gmail.com>
|
Ewan Chou <coocood@gmail.com>
|
||||||
|
Fabian Wickborn <fabian@wickborn.net>
|
||||||
Fabrizio Milo <mistobaan@gmail.com>
|
Fabrizio Milo <mistobaan@gmail.com>
|
||||||
Fan Hongjian <fan.howard@gmail.com>
|
Fan Hongjian <fan.howard@gmail.com>
|
||||||
Fastly, Inc.
|
Fastly, Inc.
|
||||||
|
|
@ -234,6 +252,7 @@ Henning Schmiedehausen <henning@schmiedehausen.org>
|
||||||
Henrik Edwards <henrik.edwards@gmail.com>
|
Henrik Edwards <henrik.edwards@gmail.com>
|
||||||
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
||||||
Hong Ruiqi <hongruiqi@gmail.com>
|
Hong Ruiqi <hongruiqi@gmail.com>
|
||||||
|
Hu Keping <hukeping@huawei.com>
|
||||||
IBM
|
IBM
|
||||||
Icarus Sparry <golang@icarus.freeuk.com>
|
Icarus Sparry <golang@icarus.freeuk.com>
|
||||||
Igneous Systems, Inc.
|
Igneous Systems, Inc.
|
||||||
|
|
@ -241,6 +260,7 @@ Igor Dolzhikov <bluesriverz@gmail.com>
|
||||||
INADA Naoki <songofacandy@gmail.com>
|
INADA Naoki <songofacandy@gmail.com>
|
||||||
Ingo Krabbe <ikrabbe.ask@gmail.com>
|
Ingo Krabbe <ikrabbe.ask@gmail.com>
|
||||||
Ingo Oeser <nightlyone@googlemail.com>
|
Ingo Oeser <nightlyone@googlemail.com>
|
||||||
|
Intel Corporation
|
||||||
Isaac Wagner <ibw@isaacwagner.me>
|
Isaac Wagner <ibw@isaacwagner.me>
|
||||||
Ivan Ukhov <ivan.ukhov@gmail.com>
|
Ivan Ukhov <ivan.ukhov@gmail.com>
|
||||||
Jae Kwon <jae@tendermint.com>
|
Jae Kwon <jae@tendermint.com>
|
||||||
|
|
@ -251,6 +271,8 @@ James Fysh <james.fysh@gmail.com>
|
||||||
James Gray <james@james4k.com>
|
James Gray <james@james4k.com>
|
||||||
James Meneghello <rawrz0r@gmail.com>
|
James Meneghello <rawrz0r@gmail.com>
|
||||||
James P. Cooper <jamespcooper@gmail.com>
|
James P. Cooper <jamespcooper@gmail.com>
|
||||||
|
James Schofield <james@shoeboxapp.com>
|
||||||
|
James Sweet <james.sweet88@googlemail.com>
|
||||||
James Toy <nil@opensesame.st>
|
James Toy <nil@opensesame.st>
|
||||||
James Whitehead <jnwhiteh@gmail.com>
|
James Whitehead <jnwhiteh@gmail.com>
|
||||||
Jan H. Hosang <jan.hosang@gmail.com>
|
Jan H. Hosang <jan.hosang@gmail.com>
|
||||||
|
|
@ -269,6 +291,7 @@ Jeff Sickel <jas@corpus-callosum.com>
|
||||||
Jeff Wendling <jeff@spacemonkey.com>
|
Jeff Wendling <jeff@spacemonkey.com>
|
||||||
Jens Frederich <jfrederich@gmail.com>
|
Jens Frederich <jfrederich@gmail.com>
|
||||||
Jeremy Jackins <jeremyjackins@gmail.com>
|
Jeremy Jackins <jeremyjackins@gmail.com>
|
||||||
|
Jihyun Yu <yjh0502@gmail.com>
|
||||||
Jim McGrath <jimmc2@gmail.com>
|
Jim McGrath <jimmc2@gmail.com>
|
||||||
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||||
Jingcheng Zhang <diogin@gmail.com>
|
Jingcheng Zhang <diogin@gmail.com>
|
||||||
|
|
@ -297,6 +320,7 @@ Josh Bleecher Snyder <josharian@gmail.com>
|
||||||
Josh Goebel <dreamer3@gmail.com>
|
Josh Goebel <dreamer3@gmail.com>
|
||||||
Josh Holland <jrh@joshh.co.uk>
|
Josh Holland <jrh@joshh.co.uk>
|
||||||
Joshua Chase <jcjoshuachase@gmail.com>
|
Joshua Chase <jcjoshuachase@gmail.com>
|
||||||
|
Jostein Stuhaug <js@solidsystem.no>
|
||||||
JT Olds <jtolds@xnet5.com>
|
JT Olds <jtolds@xnet5.com>
|
||||||
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
||||||
Julian Phillips <julian@quantumfyre.co.uk>
|
Julian Phillips <julian@quantumfyre.co.uk>
|
||||||
|
|
@ -308,13 +332,18 @@ Kang Hu <hukangustc@gmail.com>
|
||||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||||
Katrina Owen <katrina.owen@gmail.com>
|
Katrina Owen <katrina.owen@gmail.com>
|
||||||
Kei Son <hey.calmdown@gmail.com>
|
Kei Son <hey.calmdown@gmail.com>
|
||||||
|
Keith Ball <inflatablewoman@gmail.com>
|
||||||
Keith Rarick <kr@xph.us>
|
Keith Rarick <kr@xph.us>
|
||||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||||
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
||||||
Ken Friedenbach <kenliz@cruzio.com>
|
Ken Friedenbach <kenliz@cruzio.com>
|
||||||
Ken Rockot <ken@oz.gs>
|
Ken Rockot <ken@oz.gs>
|
||||||
|
Ken Sedgwick <ken@bonsai.com>
|
||||||
Kevin Ballard <kevin@sb.org>
|
Kevin Ballard <kevin@sb.org>
|
||||||
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
||||||
|
KPCompass, Inc.
|
||||||
|
Kristopher Watts <traetox@gmail.com>
|
||||||
|
Kun Li <likunarmstrong@gmail.com>
|
||||||
Kyle Consalus <consalus@gmail.com>
|
Kyle Consalus <consalus@gmail.com>
|
||||||
Kyle Isom <kyle@gokyle.net>
|
Kyle Isom <kyle@gokyle.net>
|
||||||
Kyle Lemons <kyle@kylelemons.net>
|
Kyle Lemons <kyle@kylelemons.net>
|
||||||
|
|
@ -322,14 +351,17 @@ L Campbell <unpantsu@gmail.com>
|
||||||
Lai Jiangshan <eag0628@gmail.com>
|
Lai Jiangshan <eag0628@gmail.com>
|
||||||
Larz Conwell <larzconwell@gmail.com>
|
Larz Conwell <larzconwell@gmail.com>
|
||||||
Lee Packham <lpackham@gmail.com>
|
Lee Packham <lpackham@gmail.com>
|
||||||
|
Liberty Fund Inc
|
||||||
Linaro Limited
|
Linaro Limited
|
||||||
Lloyd Dewolf <foolswisdom@gmail.com>
|
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||||
Lorenzo Stoakes <lstoakes@gmail.com>
|
Lorenzo Stoakes <lstoakes@gmail.com>
|
||||||
Luca Greco <luca.greco@alcacoop.it>
|
Luca Greco <luca.greco@alcacoop.it>
|
||||||
|
Lucien Stuker <lucien.stuker@gmail.com>
|
||||||
Lucio De Re <lucio.dere@gmail.com>
|
Lucio De Re <lucio.dere@gmail.com>
|
||||||
Luit van Drongelen <luitvd@gmail.com>
|
Luit van Drongelen <luitvd@gmail.com>
|
||||||
Luka Zakrajšek <tr00.g33k@gmail.com>
|
Luka Zakrajšek <tr00.g33k@gmail.com>
|
||||||
Luke Curley <qpingu@gmail.com>
|
Luke Curley <qpingu@gmail.com>
|
||||||
|
Mal Curtis <mal@mal.co.nz>
|
||||||
Manuel Mendez <mmendez534@gmail.com>
|
Manuel Mendez <mmendez534@gmail.com>
|
||||||
Marc Weistroff <marc@weistroff.net>
|
Marc Weistroff <marc@weistroff.net>
|
||||||
Marco Hennings <marco.hennings@freiheit.com>
|
Marco Hennings <marco.hennings@freiheit.com>
|
||||||
|
|
@ -344,6 +376,7 @@ Markus Zimmermann <zimmski@gmail.com>
|
||||||
Martin Möhrmann <martisch@uos.de>
|
Martin Möhrmann <martisch@uos.de>
|
||||||
Martin Neubauer <m.ne@gmx.net>
|
Martin Neubauer <m.ne@gmx.net>
|
||||||
Martin Olsson <martin@minimum.se>
|
Martin Olsson <martin@minimum.se>
|
||||||
|
Marvin Stenger <marvin.stenger94@gmail.com>
|
||||||
Mateusz Czapliński <czapkofan@gmail.com>
|
Mateusz Czapliński <czapkofan@gmail.com>
|
||||||
Mathias Beke <git@denbeke.be>
|
Mathias Beke <git@denbeke.be>
|
||||||
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
||||||
|
|
@ -362,6 +395,7 @@ Matthew Holt <Matthew.Holt+git@gmail.com>
|
||||||
Matthew Horsnell <matthew.horsnell@gmail.com>
|
Matthew Horsnell <matthew.horsnell@gmail.com>
|
||||||
Maxim Khitrov <max@mxcrypt.com>
|
Maxim Khitrov <max@mxcrypt.com>
|
||||||
Meir Fischer <meirfischer@gmail.com>
|
Meir Fischer <meirfischer@gmail.com>
|
||||||
|
Meteor Development Group
|
||||||
Micah Stetson <micah.stetson@gmail.com>
|
Micah Stetson <micah.stetson@gmail.com>
|
||||||
Michael Chaten <mchaten@gmail.com>
|
Michael Chaten <mchaten@gmail.com>
|
||||||
Michael Elkins <michael.elkins@gmail.com>
|
Michael Elkins <michael.elkins@gmail.com>
|
||||||
|
|
@ -376,6 +410,7 @@ Michael Schaller <michael@5challer.de>
|
||||||
Michael Stapelberg <michael@stapelberg.de>
|
Michael Stapelberg <michael@stapelberg.de>
|
||||||
Michael Teichgräber <mteichgraeber@gmx.de>
|
Michael Teichgräber <mteichgraeber@gmx.de>
|
||||||
Michael Vetter <g.bluehut@gmail.com>
|
Michael Vetter <g.bluehut@gmail.com>
|
||||||
|
Michal Bohuslávek <mbohuslavek@gmail.com>
|
||||||
Michał Derkacz <ziutek@lnet.pl>
|
Michał Derkacz <ziutek@lnet.pl>
|
||||||
Miek Gieben <miek@miek.nl>
|
Miek Gieben <miek@miek.nl>
|
||||||
Mihai Borobocea <MihaiBorobocea@gmail.com>
|
Mihai Borobocea <MihaiBorobocea@gmail.com>
|
||||||
|
|
@ -394,6 +429,7 @@ Nan Deng <monnand@gmail.com>
|
||||||
Nathan John Youngman <nj@nathany.com>
|
Nathan John Youngman <nj@nathany.com>
|
||||||
Nathan P Finch <nate.finch@gmail.com>
|
Nathan P Finch <nate.finch@gmail.com>
|
||||||
Nathan Youngman <git@nathany.com>
|
Nathan Youngman <git@nathany.com>
|
||||||
|
Neelesh Chandola <neelesh.c98@gmail.com>
|
||||||
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
||||||
ngmoco, LLC
|
ngmoco, LLC
|
||||||
Nicholas Katsaros <nick@nickkatsaros.com>
|
Nicholas Katsaros <nick@nickkatsaros.com>
|
||||||
|
|
@ -405,7 +441,10 @@ Nicolas Kaiser <nikai@nikai.net>
|
||||||
Nicolas Owens <mischief@offblast.org>
|
Nicolas Owens <mischief@offblast.org>
|
||||||
Nicolas S. Dade <nic.dade@gmail.com>
|
Nicolas S. Dade <nic.dade@gmail.com>
|
||||||
Nigel Kerr <nigel.kerr@gmail.com>
|
Nigel Kerr <nigel.kerr@gmail.com>
|
||||||
|
Nikolay Turpitko <nikolay@turpitko.com>
|
||||||
Noah Campbell <noahcampbell@gmail.com>
|
Noah Campbell <noahcampbell@gmail.com>
|
||||||
|
Norberto Lopes <nlopes.ml@gmail.com>
|
||||||
|
Oleku Konko <oleku.konko@gmail.com>
|
||||||
Oling Cat <olingcat@gmail.com>
|
Oling Cat <olingcat@gmail.com>
|
||||||
Oliver Hookins <ohookins@gmail.com>
|
Oliver Hookins <ohookins@gmail.com>
|
||||||
Olivier Antoine <olivier.antoine@gmail.com>
|
Olivier Antoine <olivier.antoine@gmail.com>
|
||||||
|
|
@ -426,11 +465,14 @@ Patrick Smith <pat42smith@gmail.com>
|
||||||
Paul A Querna <paul.querna@gmail.com>
|
Paul A Querna <paul.querna@gmail.com>
|
||||||
Paul Hammond <paul@paulhammond.org>
|
Paul Hammond <paul@paulhammond.org>
|
||||||
Paul Lalonde <paul.a.lalonde@gmail.com>
|
Paul Lalonde <paul.a.lalonde@gmail.com>
|
||||||
|
Paul Rosania <paul.rosania@gmail.com>
|
||||||
Paul Sbarra <Sbarra.Paul@gmail.com>
|
Paul Sbarra <Sbarra.Paul@gmail.com>
|
||||||
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
||||||
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
||||||
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
||||||
|
Pawel Knap <pawelknap88@gmail.com>
|
||||||
Percy Wegmann <ox.to.a.cart@gmail.com>
|
Percy Wegmann <ox.to.a.cart@gmail.com>
|
||||||
|
Perry Abbott <perry.j.abbott@gmail.com>
|
||||||
Petar Maymounkov <petarm@gmail.com>
|
Petar Maymounkov <petarm@gmail.com>
|
||||||
Peter Armitage <peter.armitage@gmail.com>
|
Peter Armitage <peter.armitage@gmail.com>
|
||||||
Peter Froehlich <peter.hans.froehlich@gmail.com>
|
Peter Froehlich <peter.hans.froehlich@gmail.com>
|
||||||
|
|
@ -443,6 +485,7 @@ Peter Waldschmidt <peter@waldschmidt.com>
|
||||||
Peter Waller <peter.waller@gmail.com>
|
Peter Waller <peter.waller@gmail.com>
|
||||||
Peter Williams <pwil3058@gmail.com>
|
Peter Williams <pwil3058@gmail.com>
|
||||||
Philip K. Warren <pkwarren@gmail.com>
|
Philip K. Warren <pkwarren@gmail.com>
|
||||||
|
Pierre Roullon <pierre.roullon@gmail.com>
|
||||||
Pieter Droogendijk <pieter@binky.org.uk>
|
Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
Pietro Gagliardi <pietro10@mac.com>
|
Pietro Gagliardi <pietro10@mac.com>
|
||||||
Preetam Jinka <pj@preet.am>
|
Preetam Jinka <pj@preet.am>
|
||||||
|
|
@ -451,6 +494,7 @@ Quoc-Viet Nguyen <afelion@gmail.com>
|
||||||
Raif S. Naffah <go@naffah-raif.name>
|
Raif S. Naffah <go@naffah-raif.name>
|
||||||
Rajat Goel <rajat.goel2010@gmail.com>
|
Rajat Goel <rajat.goel2010@gmail.com>
|
||||||
Red Hat, Inc.
|
Red Hat, Inc.
|
||||||
|
Reinaldo de Souza Jr <juniorz@gmail.com>
|
||||||
Rémy Oudompheng <oudomphe@phare.normalesup.org>
|
Rémy Oudompheng <oudomphe@phare.normalesup.org>
|
||||||
Richard Barnes <rlb@ipv.sx>
|
Richard Barnes <rlb@ipv.sx>
|
||||||
Richard Crowley <r@rcrowley.org>
|
Richard Crowley <r@rcrowley.org>
|
||||||
|
|
@ -463,6 +507,7 @@ Robert Dinu <r@varp.se>
|
||||||
Robert Figueiredo <robfig@gmail.com>
|
Robert Figueiredo <robfig@gmail.com>
|
||||||
Robert Hencke <robert.hencke@gmail.com>
|
Robert Hencke <robert.hencke@gmail.com>
|
||||||
Robert Obryk <robryk@gmail.com>
|
Robert Obryk <robryk@gmail.com>
|
||||||
|
Robert Stepanek <robert.stepanek@gmail.com>
|
||||||
Robin Eklind <r.eklind.87@gmail.com>
|
Robin Eklind <r.eklind.87@gmail.com>
|
||||||
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
||||||
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
||||||
|
|
@ -472,10 +517,13 @@ Ron Hashimoto <mail@h2so5.net>
|
||||||
Ron Minnich <rminnich@gmail.com>
|
Ron Minnich <rminnich@gmail.com>
|
||||||
Ross Light <rlight2@gmail.com>
|
Ross Light <rlight2@gmail.com>
|
||||||
Rowan Worth <sqweek@gmail.com>
|
Rowan Worth <sqweek@gmail.com>
|
||||||
|
Russell Haering <russellhaering@gmail.com>
|
||||||
Ryan Hitchman <hitchmanr@gmail.com>
|
Ryan Hitchman <hitchmanr@gmail.com>
|
||||||
|
Ryan Lower <rpjlower@gmail.com>
|
||||||
Ryan Seys <ryan@ryanseys.com>
|
Ryan Seys <ryan@ryanseys.com>
|
||||||
Ryan Slade <ryanslade@gmail.com>
|
Ryan Slade <ryanslade@gmail.com>
|
||||||
S.Çağlar Onur <caglar@10ur.org>
|
S.Çağlar Onur <caglar@10ur.org>
|
||||||
|
Salmān Aljammāz <s@0x65.net>
|
||||||
Sanjay Menakuru <balasanjay@gmail.com>
|
Sanjay Menakuru <balasanjay@gmail.com>
|
||||||
Scott Barron <scott.barron@github.com>
|
Scott Barron <scott.barron@github.com>
|
||||||
Scott Ferguson <scottwferg@gmail.com>
|
Scott Ferguson <scottwferg@gmail.com>
|
||||||
|
|
@ -486,6 +534,7 @@ Sergei Skorobogatov <skorobo@rambler.ru>
|
||||||
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
||||||
Sergio Luis O. B. Correia <sergio@correia.cc>
|
Sergio Luis O. B. Correia <sergio@correia.cc>
|
||||||
Shane Hansen <shanemhansen@gmail.com>
|
Shane Hansen <shanemhansen@gmail.com>
|
||||||
|
Shaozhen Ding <dsz0111@gmail.com>
|
||||||
Shawn Smith <shawn.p.smith@gmail.com>
|
Shawn Smith <shawn.p.smith@gmail.com>
|
||||||
Shenghou Ma <minux.ma@gmail.com>
|
Shenghou Ma <minux.ma@gmail.com>
|
||||||
Shivakumar GN <shivakumar.gn@gmail.com>
|
Shivakumar GN <shivakumar.gn@gmail.com>
|
||||||
|
|
@ -493,6 +542,7 @@ Silvan Jegen <s.jegen@gmail.com>
|
||||||
Simon Whitehead <chemnova@gmail.com>
|
Simon Whitehead <chemnova@gmail.com>
|
||||||
Sokolov Yura <funny.falcon@gmail.com>
|
Sokolov Yura <funny.falcon@gmail.com>
|
||||||
Spring Mc <heresy.mc@gmail.com>
|
Spring Mc <heresy.mc@gmail.com>
|
||||||
|
Square, Inc.
|
||||||
StalkR <stalkr@stalkr.net>
|
StalkR <stalkr@stalkr.net>
|
||||||
Stan Schwertly <stan@schwertly.com>
|
Stan Schwertly <stan@schwertly.com>
|
||||||
Stefan Nilsson <snilsson@nada.kth.se> <trolleriprofessorn@gmail.com>
|
Stefan Nilsson <snilsson@nada.kth.se> <trolleriprofessorn@gmail.com>
|
||||||
|
|
@ -508,13 +558,17 @@ Sven Almgren <sven@tras.se>
|
||||||
Szabolcs Nagy <nsz@port70.net>
|
Szabolcs Nagy <nsz@port70.net>
|
||||||
Tad Glines <tad.glines@gmail.com>
|
Tad Glines <tad.glines@gmail.com>
|
||||||
Taj Khattra <taj.khattra@gmail.com>
|
Taj Khattra <taj.khattra@gmail.com>
|
||||||
|
Takeshi YAMANASHI <9.nashi@gmail.com>
|
||||||
Tamir Duberstein <tamird@gmail.com>
|
Tamir Duberstein <tamird@gmail.com>
|
||||||
Tarmigan Casebolt <tarmigan@gmail.com>
|
Tarmigan Casebolt <tarmigan@gmail.com>
|
||||||
Taru Karttunen <taruti@taruti.net>
|
Taru Karttunen <taruti@taruti.net>
|
||||||
|
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
|
||||||
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
||||||
Thiago Fransosi Farina <thiago.farina@gmail.com>
|
Thiago Fransosi Farina <thiago.farina@gmail.com>
|
||||||
Thomas Alan Copeland <talan.copeland@gmail.com>
|
Thomas Alan Copeland <talan.copeland@gmail.com>
|
||||||
|
Thomas Desrosiers <thomasdesr@gmail.com>
|
||||||
Thomas Kappler <tkappler@gmail.com>
|
Thomas Kappler <tkappler@gmail.com>
|
||||||
|
Tim Cooijmans <timcooijmans@gmail.com>
|
||||||
Timo Savola <timo.savola@gmail.com>
|
Timo Savola <timo.savola@gmail.com>
|
||||||
Timo Truyts <alkaloid.btx@gmail.com>
|
Timo Truyts <alkaloid.btx@gmail.com>
|
||||||
Tobias Columbus <tobias.columbus@gmail.com>
|
Tobias Columbus <tobias.columbus@gmail.com>
|
||||||
|
|
@ -523,12 +577,15 @@ Tom Heng <zhm20070928@gmail.com>
|
||||||
Tom Linford <tomlinford@gmail.com>
|
Tom Linford <tomlinford@gmail.com>
|
||||||
Tommy Schaefer <tommy.schaefer@teecom.com>
|
Tommy Schaefer <tommy.schaefer@teecom.com>
|
||||||
Tor Andersson <tor.andersson@gmail.com>
|
Tor Andersson <tor.andersson@gmail.com>
|
||||||
|
Totoro W <tw19881113@gmail.com>
|
||||||
Travis Cline <travis.cline@gmail.com>
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
Trey Tacon <ttacon@gmail.com>
|
||||||
Tudor Golubenco <tudor.g@gmail.com>
|
Tudor Golubenco <tudor.g@gmail.com>
|
||||||
Tw <tw19881113@gmail.com>
|
|
||||||
Tyler Bunnell <tylerbunnell@gmail.com>
|
Tyler Bunnell <tylerbunnell@gmail.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
||||||
Ugorji Nwoke <ugorji@gmail.com>
|
Ugorji Nwoke <ugorji@gmail.com>
|
||||||
Ulf Holm Nielsen <doktor@dyregod.dk>
|
Ulf Holm Nielsen <doktor@dyregod.dk>
|
||||||
|
Ulrich Kunitz <uli.kunitz@gmail.com>
|
||||||
Uriel Mangado <uriel@berlinblue.org>
|
Uriel Mangado <uriel@berlinblue.org>
|
||||||
Vadim Vygonets <unixdj@gmail.com>
|
Vadim Vygonets <unixdj@gmail.com>
|
||||||
Vincent Ambo <tazjin@googlemail.com>
|
Vincent Ambo <tazjin@googlemail.com>
|
||||||
|
|
@ -543,7 +600,9 @@ William Josephson <wjosephson@gmail.com>
|
||||||
William Orr <will@worrbase.com> <ay1244@gmail.com>
|
William Orr <will@worrbase.com> <ay1244@gmail.com>
|
||||||
Xia Bin <snyh@snyh.org>
|
Xia Bin <snyh@snyh.org>
|
||||||
Xing Xing <mikespook@gmail.com>
|
Xing Xing <mikespook@gmail.com>
|
||||||
|
Yann Kerhervé <yann.kerherve@gmail.com>
|
||||||
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
||||||
|
Yesudeep Mangalapilly <yesudeep@google.com>
|
||||||
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
||||||
Yo-An Lin <yoanlin93@gmail.com>
|
Yo-An Lin <yoanlin93@gmail.com>
|
||||||
Yongjian Xu <i3dmaster@gmail.com>
|
Yongjian Xu <i3dmaster@gmail.com>
|
||||||
|
|
|
||||||
86
CONTRIBUTORS
86
CONTRIBUTORS
|
|
@ -35,11 +35,13 @@ Aamir Khan <syst3m.w0rm@gmail.com>
|
||||||
Aaron France <aaron.l.france@gmail.com>
|
Aaron France <aaron.l.france@gmail.com>
|
||||||
Aaron Jacobs <jacobsa@google.com>
|
Aaron Jacobs <jacobsa@google.com>
|
||||||
Aaron Kemp <kemp.aaron@gmail.com>
|
Aaron Kemp <kemp.aaron@gmail.com>
|
||||||
|
Aaron Torres <tcboox@gmail.com>
|
||||||
Abhinav Gupta <abhinav.g90@gmail.com>
|
Abhinav Gupta <abhinav.g90@gmail.com>
|
||||||
Adam Langley <agl@golang.org>
|
Adam Langley <agl@golang.org>
|
||||||
Adrian Nos <nos.adrian@gmail.com>
|
Adrian Nos <nos.adrian@gmail.com>
|
||||||
Adrian O'Grady <elpollouk@gmail.com>
|
Adrian O'Grady <elpollouk@gmail.com>
|
||||||
Adrien Bustany <adrien-xx-google@bustany.org>
|
Adrien Bustany <adrien-xx-google@bustany.org>
|
||||||
|
Aécio Júnior <aeciodantasjunior@gmail.com>
|
||||||
Ahmed Waheed Moanes <oneofone@gmail.com>
|
Ahmed Waheed Moanes <oneofone@gmail.com>
|
||||||
Ainar Garipov <gugl.zadolbal@gmail.com>
|
Ainar Garipov <gugl.zadolbal@gmail.com>
|
||||||
Akshat Kumar <seed@mail.nanosouffle.net>
|
Akshat Kumar <seed@mail.nanosouffle.net>
|
||||||
|
|
@ -58,6 +60,7 @@ Alex Schroeder <alex@gnu.org>
|
||||||
Alex Sergeyev <abc@alexsergeyev.com>
|
Alex Sergeyev <abc@alexsergeyev.com>
|
||||||
Alexander Larsson <alexander.larsson@gmail.com>
|
Alexander Larsson <alexander.larsson@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
Alexander Morozov <lk4d4math@gmail.com>
|
||||||
|
Alexander Neumann <alexander@bumpern.de>
|
||||||
Alexander Orlov <alexander.orlov@loxal.net>
|
Alexander Orlov <alexander.orlov@loxal.net>
|
||||||
Alexander Reece <awreece@gmail.com>
|
Alexander Reece <awreece@gmail.com>
|
||||||
Alexander Surma <surma@surmair.de>
|
Alexander Surma <surma@surmair.de>
|
||||||
|
|
@ -70,6 +73,7 @@ Alexei Sholik <alcosholik@gmail.com>
|
||||||
Alexey Borzenkov <snaury@gmail.com>
|
Alexey Borzenkov <snaury@gmail.com>
|
||||||
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
||||||
Alexis Imperial-Legrand <ail@google.com>
|
Alexis Imperial-Legrand <ail@google.com>
|
||||||
|
Alif Rachmawadi <subosito@gmail.com>
|
||||||
Amir Mohammad Saied <amir@gluegadget.com>
|
Amir Mohammad Saied <amir@gluegadget.com>
|
||||||
Amrut Joshi <amrut.joshi@gmail.com>
|
Amrut Joshi <amrut.joshi@gmail.com>
|
||||||
Andrea Spadaccini <spadaccio@google.com>
|
Andrea Spadaccini <spadaccio@google.com>
|
||||||
|
|
@ -81,6 +85,7 @@ Andrew Balholm <andybalholm@gmail.com>
|
||||||
Andrew Bonventre <andybons@chromium.org>
|
Andrew Bonventre <andybons@chromium.org>
|
||||||
Andrew Bursavich <abursavich@gmail.com>
|
Andrew Bursavich <abursavich@gmail.com>
|
||||||
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
||||||
|
Andrew Etter <andrew.etter@gmail.com>
|
||||||
Andrew Gerrand <adg@golang.org>
|
Andrew Gerrand <adg@golang.org>
|
||||||
Andrew Harding <andrew@spacemonkey.com>
|
Andrew Harding <andrew@spacemonkey.com>
|
||||||
Andrew Lutomirski <andy@luto.us>
|
Andrew Lutomirski <andy@luto.us>
|
||||||
|
|
@ -98,6 +103,7 @@ Andy Davis <andy@bigandian.com>
|
||||||
Andy Maloney <asmaloney@gmail.com>
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||||
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
||||||
|
Anmol Sethi <anmol@aubble.com>
|
||||||
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
||||||
Anthony Eufemio <anthony.eufemio@gmail.com>
|
Anthony Eufemio <anthony.eufemio@gmail.com>
|
||||||
Anthony Martin <ality@pbrane.org>
|
Anthony Martin <ality@pbrane.org>
|
||||||
|
|
@ -108,6 +114,7 @@ Areski Belaid <areski@gmail.com>
|
||||||
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
||||||
Arne Hormann <arnehormann@gmail.com>
|
Arne Hormann <arnehormann@gmail.com>
|
||||||
Aron Nopanen <aron.nopanen@gmail.com>
|
Aron Nopanen <aron.nopanen@gmail.com>
|
||||||
|
Artyom Pervukhin <artyom.pervukhin@gmail.com>
|
||||||
Arvindh Rajesh Tamilmani <art@a-30.net>
|
Arvindh Rajesh Tamilmani <art@a-30.net>
|
||||||
Asim Shankar <asimshankar@gmail.com>
|
Asim Shankar <asimshankar@gmail.com>
|
||||||
Ato Araki <ato.araki@gmail.com>
|
Ato Araki <ato.araki@gmail.com>
|
||||||
|
|
@ -141,10 +148,11 @@ Brendan O'Dea <bod@golang.org>
|
||||||
Brett Cannon <bcannon@gmail.com>
|
Brett Cannon <bcannon@gmail.com>
|
||||||
Brian Dellisanti <briandellisanti@gmail.com>
|
Brian Dellisanti <briandellisanti@gmail.com>
|
||||||
Brian G. Merrell <bgmerrell@gmail.com>
|
Brian G. Merrell <bgmerrell@gmail.com>
|
||||||
Brian Gitonga Marete <marete@toshnix.com> <bgmarete@gmail.com>
|
Brian Gitonga Marete <marete@toshnix.com> <bgmarete@gmail.com> <bgm@google.com>
|
||||||
Brian Ketelsen <bketelsen@gmail.com>
|
Brian Ketelsen <bketelsen@gmail.com>
|
||||||
Brian Slesinsky <skybrian@google.com>
|
Brian Slesinsky <skybrian@google.com>
|
||||||
Brian Smith <ohohvi@gmail.com>
|
Brian Smith <ohohvi@gmail.com>
|
||||||
|
Bryan C. Mills <bcmills@google.com>
|
||||||
Bryan Ford <brynosaurus@gmail.com>
|
Bryan Ford <brynosaurus@gmail.com>
|
||||||
Burcu Dogan <jbd@google.com> <burcujdogan@gmail.com>
|
Burcu Dogan <jbd@google.com> <burcujdogan@gmail.com>
|
||||||
Caine Tighe <arctanofyourface@gmail.com>
|
Caine Tighe <arctanofyourface@gmail.com>
|
||||||
|
|
@ -166,6 +174,7 @@ Charles Lee <zombie.fml@gmail.com>
|
||||||
Chris Broadfoot <cbro@golang.org>
|
Chris Broadfoot <cbro@golang.org>
|
||||||
Chris Dollin <ehog.hedge@gmail.com>
|
Chris Dollin <ehog.hedge@gmail.com>
|
||||||
Chris Farmiloe <chrisfarms@gmail.com>
|
Chris Farmiloe <chrisfarms@gmail.com>
|
||||||
|
Chris Hines <chris.cs.guy@gmail.com>
|
||||||
Chris Howey <howeyc@gmail.com>
|
Chris Howey <howeyc@gmail.com>
|
||||||
Chris Hundt <hundt@google.com>
|
Chris Hundt <hundt@google.com>
|
||||||
Chris Jones <chris@cjones.org> <chris.jones.yar@gmail.com>
|
Chris Jones <chris@cjones.org> <chris.jones.yar@gmail.com>
|
||||||
|
|
@ -197,13 +206,17 @@ Dan Peterson <dpiddy@gmail.com>
|
||||||
Dan Pupius <dan@medium.com>
|
Dan Pupius <dan@medium.com>
|
||||||
Dan Sinclair <dan.sinclair@gmail.com>
|
Dan Sinclair <dan.sinclair@gmail.com>
|
||||||
Daniel Fleischman <danielfleischman@gmail.com>
|
Daniel Fleischman <danielfleischman@gmail.com>
|
||||||
|
Daniel Johansson <dajo2002@gmail.com>
|
||||||
|
Daniel Kerwin <d.kerwin@gini.net>
|
||||||
Daniel Krech <eikeon@eikeon.com>
|
Daniel Krech <eikeon@eikeon.com>
|
||||||
Daniel Lidén <daniel.liden.87@gmail.com>
|
Daniel Lidén <daniel.liden.87@gmail.com>
|
||||||
Daniel Morsing <daniel.morsing@gmail.com>
|
Daniel Morsing <daniel.morsing@gmail.com>
|
||||||
Daniel Nadasi <dnadasi@google.com>
|
Daniel Nadasi <dnadasi@google.com>
|
||||||
|
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
|
||||||
Daniel Theophanes <kardianos@gmail.com>
|
Daniel Theophanes <kardianos@gmail.com>
|
||||||
Darren Elwood <darren@textnode.com>
|
Darren Elwood <darren@textnode.com>
|
||||||
Dave Borowitz <dborowitz@google.com>
|
Dave Borowitz <dborowitz@google.com>
|
||||||
|
Dave Bort <dbort@golang.org>
|
||||||
Dave Cheney <dave@cheney.net>
|
Dave Cheney <dave@cheney.net>
|
||||||
Dave Day <djd@golang.org>
|
Dave Day <djd@golang.org>
|
||||||
Dave Grijalva <dgrijalva@ngmoco.com>
|
Dave Grijalva <dgrijalva@ngmoco.com>
|
||||||
|
|
@ -217,15 +230,18 @@ David Crawshaw <david.crawshaw@zentus.com> <crawshaw@google.com> <crawshaw@golan
|
||||||
David du Colombier <0intro@gmail.com>
|
David du Colombier <0intro@gmail.com>
|
||||||
David Forsythe <dforsythe@gmail.com>
|
David Forsythe <dforsythe@gmail.com>
|
||||||
David G. Andersen <dave.andersen@gmail.com>
|
David G. Andersen <dave.andersen@gmail.com>
|
||||||
|
David Glasser <glasser@meteor.com>
|
||||||
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
||||||
David Leon Gil <coruus@gmail.com>
|
David Leon Gil <coruus@gmail.com>
|
||||||
David McLeish <davemc@google.com>
|
David McLeish <davemc@google.com>
|
||||||
David Presotto <presotto@gmail.com>
|
David Presotto <presotto@gmail.com>
|
||||||
|
David R. Jenni <david.r.jenni@gmail.com>
|
||||||
David Symonds <dsymonds@golang.org>
|
David Symonds <dsymonds@golang.org>
|
||||||
David Thomas <davidthomas426@gmail.com>
|
David Thomas <davidthomas426@gmail.com>
|
||||||
David Titarenco <david.titarenco@gmail.com>
|
David Titarenco <david.titarenco@gmail.com>
|
||||||
Davies Liu <davies.liu@gmail.com>
|
Davies Liu <davies.liu@gmail.com>
|
||||||
Dean Prichard <dean.prichard@gmail.com>
|
Dean Prichard <dean.prichard@gmail.com>
|
||||||
|
Denis Bernard <db047h@gmail.com>
|
||||||
Denis Brandolini <denis.brandolini@gmail.com>
|
Denis Brandolini <denis.brandolini@gmail.com>
|
||||||
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
||||||
Derek Parker <parkerderek86@gmail.com>
|
Derek Parker <parkerderek86@gmail.com>
|
||||||
|
|
@ -240,6 +256,7 @@ Dmitry Chestnykh <dchest@gmail.com>
|
||||||
Dmitry Savintsev <dsavints@gmail.com>
|
Dmitry Savintsev <dsavints@gmail.com>
|
||||||
Dominik Honnef <dominik.honnef@gmail.com>
|
Dominik Honnef <dominik.honnef@gmail.com>
|
||||||
Dominik Vogt <vogt@linux.vnet.ibm.com>
|
Dominik Vogt <vogt@linux.vnet.ibm.com>
|
||||||
|
Donald Huang <don.hcd@gmail.com>
|
||||||
Donovan Hide <donovanhide@gmail.com>
|
Donovan Hide <donovanhide@gmail.com>
|
||||||
Drew Hintz <adhintz@google.com>
|
Drew Hintz <adhintz@google.com>
|
||||||
Duncan Holm <mail@frou.org>
|
Duncan Holm <mail@frou.org>
|
||||||
|
|
@ -255,21 +272,26 @@ Elias Naur <elias.naur@gmail.com>
|
||||||
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
||||||
Eoghan Sherry <ejsherry@gmail.com>
|
Eoghan Sherry <ejsherry@gmail.com>
|
||||||
Eric Clark <zerohp@gmail.com>
|
Eric Clark <zerohp@gmail.com>
|
||||||
|
Eric Garrido <ekg@google.com>
|
||||||
|
Eric Lagergren <ericscottlagergren@gmail.com>
|
||||||
Eric Milliken <emilliken@gmail.com>
|
Eric Milliken <emilliken@gmail.com>
|
||||||
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
||||||
Erik Aigner <aigner.erik@gmail.com>
|
Erik Aigner <aigner.erik@gmail.com>
|
||||||
Erik St. Martin <alakriti@gmail.com>
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
Erik Westrup <erik.westrup@gmail.com>
|
Erik Westrup <erik.westrup@gmail.com>
|
||||||
Esko Luontola <esko.luontola@gmail.com>
|
Esko Luontola <esko.luontola@gmail.com>
|
||||||
|
Evan Brown <evanbrown@google.com>
|
||||||
Evan Kroske <evankroske@google.com>
|
Evan Kroske <evankroske@google.com>
|
||||||
Evan Martin <evan.martin@gmail.com>
|
Evan Martin <evan.martin@gmail.com>
|
||||||
Evan Phoenix <evan@phx.io>
|
Evan Phoenix <evan@phx.io>
|
||||||
Evan Shaw <chickencha@gmail.com>
|
Evan Shaw <chickencha@gmail.com>
|
||||||
Ewan Chou <coocood@gmail.com>
|
Ewan Chou <coocood@gmail.com>
|
||||||
|
Fabian Wickborn <fabian@wickborn.net>
|
||||||
Fabrizio Milo <mistobaan@gmail.com>
|
Fabrizio Milo <mistobaan@gmail.com>
|
||||||
Fan Hongjian <fan.howard@gmail.com>
|
Fan Hongjian <fan.howard@gmail.com>
|
||||||
Fatih Arslan <fatih@arslan.io>
|
Fatih Arslan <fatih@arslan.io>
|
||||||
Fazlul Shahriar <fshahriar@gmail.com>
|
Fazlul Shahriar <fshahriar@gmail.com>
|
||||||
|
Federico Simoncelli <fsimonce@redhat.com>
|
||||||
Felix Geisendörfer <haimuiba@gmail.com>
|
Felix Geisendörfer <haimuiba@gmail.com>
|
||||||
Firmansyah Adiputra <frm.adiputra@gmail.com>
|
Firmansyah Adiputra <frm.adiputra@gmail.com>
|
||||||
Florian Uekermann <florian@uekermann-online.de> <f1@uekermann-online.de>
|
Florian Uekermann <florian@uekermann-online.de> <f1@uekermann-online.de>
|
||||||
|
|
@ -284,6 +306,7 @@ Frithjof Schulze <schulze@math.uni-hannover.de> <sfrithjof@gmail.com>
|
||||||
Fumitoshi Ukai <ukai@google.com>
|
Fumitoshi Ukai <ukai@google.com>
|
||||||
Gaal Yahas <gaal@google.com>
|
Gaal Yahas <gaal@google.com>
|
||||||
Gabriel Aszalos <gabriel.aszalos@gmail.com>
|
Gabriel Aszalos <gabriel.aszalos@gmail.com>
|
||||||
|
Garrick Evans <garrick@google.com>
|
||||||
Gary Burd <gary@beagledreams.com> <gary.burd@gmail.com>
|
Gary Burd <gary@beagledreams.com> <gary.burd@gmail.com>
|
||||||
Gautham Thambidorai <gautham.dorai@gmail.com>
|
Gautham Thambidorai <gautham.dorai@gmail.com>
|
||||||
Geert-Johan Riemer <gjr19912@gmail.com>
|
Geert-Johan Riemer <gjr19912@gmail.com>
|
||||||
|
|
@ -292,7 +315,9 @@ George Shammas <george@shamm.as> <georgyo@gmail.com>
|
||||||
Gerasimos Dimitriadis <gedimitr@gmail.com>
|
Gerasimos Dimitriadis <gedimitr@gmail.com>
|
||||||
Gideon Jan-Wessel Redelinghuys <gjredelinghuys@gmail.com>
|
Gideon Jan-Wessel Redelinghuys <gjredelinghuys@gmail.com>
|
||||||
Giles Lean <giles.lean@pobox.com>
|
Giles Lean <giles.lean@pobox.com>
|
||||||
|
Giovanni Bajo <rasky@develer.com>
|
||||||
Giulio Iotti <dullgiulio@gmail.com>
|
Giulio Iotti <dullgiulio@gmail.com>
|
||||||
|
Glenn Brown <glennb@google.com>
|
||||||
Glenn Lewis <gmlewis@google.com>
|
Glenn Lewis <gmlewis@google.com>
|
||||||
Gordon Klaus <gordon.klaus@gmail.com>
|
Gordon Klaus <gordon.klaus@gmail.com>
|
||||||
Graham King <graham4king@gmail.com>
|
Graham King <graham4king@gmail.com>
|
||||||
|
|
@ -317,6 +342,7 @@ Henrik Edwards <henrik.edwards@gmail.com>
|
||||||
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
||||||
Hong Ruiqi <hongruiqi@gmail.com>
|
Hong Ruiqi <hongruiqi@gmail.com>
|
||||||
Hossein Sheikh Attar <hattar@google.com>
|
Hossein Sheikh Attar <hattar@google.com>
|
||||||
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hyang-Ah Hana Kim <hakim@google.com> <hyangah@gmail.com>
|
Hyang-Ah Hana Kim <hakim@google.com> <hyangah@gmail.com>
|
||||||
Ian Lance Taylor <iant@golang.org>
|
Ian Lance Taylor <iant@golang.org>
|
||||||
Icarus Sparry <golang@icarus.freeuk.com>
|
Icarus Sparry <golang@icarus.freeuk.com>
|
||||||
|
|
@ -339,6 +365,8 @@ James Gray <james@james4k.com>
|
||||||
James Meneghello <rawrz0r@gmail.com>
|
James Meneghello <rawrz0r@gmail.com>
|
||||||
James P. Cooper <jamespcooper@gmail.com>
|
James P. Cooper <jamespcooper@gmail.com>
|
||||||
James Robinson <jamesr@google.com> <jamesr.gatech@gmail.com>
|
James Robinson <jamesr@google.com> <jamesr.gatech@gmail.com>
|
||||||
|
James Schofield <james@shoeboxapp.com>
|
||||||
|
James Sweet <james.sweet88@googlemail.com>
|
||||||
James Toy <nil@opensesame.st>
|
James Toy <nil@opensesame.st>
|
||||||
James Tucker <raggi@google.com>
|
James Tucker <raggi@google.com>
|
||||||
James Whitehead <jnwhiteh@gmail.com>
|
James Whitehead <jnwhiteh@gmail.com>
|
||||||
|
|
@ -354,6 +382,7 @@ Jan Ziak <0xe2.0x9a.0x9b@gmail.com>
|
||||||
Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
|
Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
|
||||||
Jaroslavas Počepko <jp@webmaster.ms>
|
Jaroslavas Počepko <jp@webmaster.ms>
|
||||||
Jason Del Ponte <delpontej@gmail.com>
|
Jason Del Ponte <delpontej@gmail.com>
|
||||||
|
Jason Hall <jasonhall@google.com>
|
||||||
Jason Travis <infomaniac7@gmail.com>
|
Jason Travis <infomaniac7@gmail.com>
|
||||||
Jay Weisskopf <jay@jayschwa.net>
|
Jay Weisskopf <jay@jayschwa.net>
|
||||||
Jean-Marc Eurin <jmeurin@google.com>
|
Jean-Marc Eurin <jmeurin@google.com>
|
||||||
|
|
@ -366,6 +395,8 @@ Jens Frederich <jfrederich@gmail.com>
|
||||||
Jeremiah Harmsen <jeremiah@google.com>
|
Jeremiah Harmsen <jeremiah@google.com>
|
||||||
Jeremy Jackins <jeremyjackins@gmail.com>
|
Jeremy Jackins <jeremyjackins@gmail.com>
|
||||||
Jeremy Schlatter <jeremy.schlatter@gmail.com>
|
Jeremy Schlatter <jeremy.schlatter@gmail.com>
|
||||||
|
Jihyun Yu <yjh0502@gmail.com>
|
||||||
|
Jim Cote <jfcote87@gmail.com>
|
||||||
Jim McGrath <jimmc2@gmail.com>
|
Jim McGrath <jimmc2@gmail.com>
|
||||||
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||||
Jingcheng Zhang <diogin@gmail.com>
|
Jingcheng Zhang <diogin@gmail.com>
|
||||||
|
|
@ -408,11 +439,13 @@ Josh Goebel <dreamer3@gmail.com>
|
||||||
Josh Hoak <jhoak@google.com>
|
Josh Hoak <jhoak@google.com>
|
||||||
Josh Holland <jrh@joshh.co.uk>
|
Josh Holland <jrh@joshh.co.uk>
|
||||||
Joshua Chase <jcjoshuachase@gmail.com>
|
Joshua Chase <jcjoshuachase@gmail.com>
|
||||||
|
Jostein Stuhaug <js@solidsystem.no>
|
||||||
JP Sugarbroad <jpsugar@google.com>
|
JP Sugarbroad <jpsugar@google.com>
|
||||||
JT Olds <jtolds@xnet5.com>
|
JT Olds <jtolds@xnet5.com>
|
||||||
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
||||||
Julian Phillips <julian@quantumfyre.co.uk>
|
Julian Phillips <julian@quantumfyre.co.uk>
|
||||||
Julien Schmidt <google@julienschmidt.com>
|
Julien Schmidt <google@julienschmidt.com>
|
||||||
|
Jungho Ahn <jhahn@google.com>
|
||||||
Justin Nuß <nuss.justin@gmail.com>
|
Justin Nuß <nuss.justin@gmail.com>
|
||||||
Kai Backman <kaib@golang.org>
|
Kai Backman <kaib@golang.org>
|
||||||
Kamil Kisiel <kamil@kamilkisiel.net> <kamil.kisiel@gmail.com>
|
Kamil Kisiel <kamil@kamilkisiel.net> <kamil.kisiel@gmail.com>
|
||||||
|
|
@ -420,18 +453,23 @@ Kang Hu <hukangustc@gmail.com>
|
||||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||||
Katrina Owen <katrina.owen@gmail.com>
|
Katrina Owen <katrina.owen@gmail.com>
|
||||||
Kay Zhu <kayzhu@google.com>
|
Kay Zhu <kayzhu@google.com>
|
||||||
|
KB Sriram <kbsriram@google.com>
|
||||||
Kei Son <hey.calmdown@gmail.com>
|
Kei Son <hey.calmdown@gmail.com>
|
||||||
|
Keith Ball <inflatablewoman@gmail.com>
|
||||||
Keith Randall <khr@golang.org>
|
Keith Randall <khr@golang.org>
|
||||||
Keith Rarick <kr@xph.us>
|
Keith Rarick <kr@xph.us>
|
||||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||||
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
||||||
Ken Friedenbach <kenliz@cruzio.com>
|
Ken Friedenbach <kenliz@cruzio.com>
|
||||||
Ken Rockot <ken@oz.gs> <ken.rockot@gmail.com>
|
Ken Rockot <ken@oz.gs> <ken.rockot@gmail.com>
|
||||||
|
Ken Sedgwick <ken@bonsai.com>
|
||||||
Ken Thompson <ken@golang.org>
|
Ken Thompson <ken@golang.org>
|
||||||
Kevin Ballard <kevin@sb.org>
|
Kevin Ballard <kevin@sb.org>
|
||||||
Kevin Klues <klueska@gmail.com> <klueska@google.com>
|
Kevin Klues <klueska@gmail.com> <klueska@google.com>
|
||||||
Kirklin McDonald <kirklin.mcdonald@gmail.com>
|
Kirklin McDonald <kirklin.mcdonald@gmail.com>
|
||||||
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
||||||
|
Kristopher Watts <traetox@gmail.com>
|
||||||
|
Kun Li <likunarmstrong@gmail.com>
|
||||||
Kyle Consalus <consalus@gmail.com>
|
Kyle Consalus <consalus@gmail.com>
|
||||||
Kyle Isom <kyle@gokyle.net>
|
Kyle Isom <kyle@gokyle.net>
|
||||||
Kyle Lemons <kyle@kylelemons.net> <kevlar@google.com>
|
Kyle Lemons <kyle@kylelemons.net> <kevlar@google.com>
|
||||||
|
|
@ -444,6 +482,7 @@ Lloyd Dewolf <foolswisdom@gmail.com>
|
||||||
Lorenzo Stoakes <lstoakes@gmail.com>
|
Lorenzo Stoakes <lstoakes@gmail.com>
|
||||||
Louis Kruger <louisk@google.com>
|
Louis Kruger <louisk@google.com>
|
||||||
Luca Greco <luca.greco@alcacoop.it>
|
Luca Greco <luca.greco@alcacoop.it>
|
||||||
|
Lucien Stuker <lucien.stuker@gmail.com>
|
||||||
Lucio De Re <lucio.dere@gmail.com>
|
Lucio De Re <lucio.dere@gmail.com>
|
||||||
Luit van Drongelen <luitvd@gmail.com>
|
Luit van Drongelen <luitvd@gmail.com>
|
||||||
Luka Zakrajšek <tr00.g33k@gmail.com>
|
Luka Zakrajšek <tr00.g33k@gmail.com>
|
||||||
|
|
@ -451,12 +490,14 @@ Luke Curley <qpingu@gmail.com>
|
||||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||||
Luuk van Dijk <lvd@golang.org> <lvd@google.com>
|
Luuk van Dijk <lvd@golang.org> <lvd@google.com>
|
||||||
Lynn Boger <laboger@linux.vnet.ibm.com>
|
Lynn Boger <laboger@linux.vnet.ibm.com>
|
||||||
|
Mal Curtis <mal@mal.co.nz>
|
||||||
Manoj Dayaram <platform-dev@moovweb.com> <manoj.dayaram@moovweb.com>
|
Manoj Dayaram <platform-dev@moovweb.com> <manoj.dayaram@moovweb.com>
|
||||||
Manu Garg <manugarg@google.com>
|
Manu Garg <manugarg@google.com>
|
||||||
Manuel Mendez <mmendez534@gmail.com>
|
Manuel Mendez <mmendez534@gmail.com>
|
||||||
Marc Weistroff <marc@weistroff.net>
|
Marc Weistroff <marc@weistroff.net>
|
||||||
Marcel van Lohuizen <mpvl@golang.org>
|
Marcel van Lohuizen <mpvl@golang.org>
|
||||||
Marco Hennings <marco.hennings@freiheit.com>
|
Marco Hennings <marco.hennings@freiheit.com>
|
||||||
|
Marga Manterola <marga@google.com>
|
||||||
Marius Nuennerich <mnu@google.com>
|
Marius Nuennerich <mnu@google.com>
|
||||||
Mark Bucciarelli <mkbucc@gmail.com>
|
Mark Bucciarelli <mkbucc@gmail.com>
|
||||||
Mark Theunissen <mark.theunissen@gmail.com>
|
Mark Theunissen <mark.theunissen@gmail.com>
|
||||||
|
|
@ -470,6 +511,7 @@ Markus Zimmermann <zimmski@gmail.com>
|
||||||
Martin Möhrmann <martisch@uos.de>
|
Martin Möhrmann <martisch@uos.de>
|
||||||
Martin Neubauer <m.ne@gmx.net>
|
Martin Neubauer <m.ne@gmx.net>
|
||||||
Martin Olsson <martin@minimum.se>
|
Martin Olsson <martin@minimum.se>
|
||||||
|
Marvin Stenger <marvin.stenger94@gmail.com>
|
||||||
Mateusz Czapliński <czapkofan@gmail.com>
|
Mateusz Czapliński <czapkofan@gmail.com>
|
||||||
Mathias Beke <git@denbeke.be>
|
Mathias Beke <git@denbeke.be>
|
||||||
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
||||||
|
|
@ -504,6 +546,7 @@ Michael Käufl <golang@c.michael-kaeufl.de>
|
||||||
Michael Kelly <mjk@google.com>
|
Michael Kelly <mjk@google.com>
|
||||||
Michael Lewis <mikelikespie@gmail.com>
|
Michael Lewis <mikelikespie@gmail.com>
|
||||||
Michael MacInnis <Michael.P.MacInnis@gmail.com>
|
Michael MacInnis <Michael.P.MacInnis@gmail.com>
|
||||||
|
Michael Marineau <michael.marineau@coreos.com>
|
||||||
Michael Matloob <matloob@google.com>
|
Michael Matloob <matloob@google.com>
|
||||||
Michael McGreevy <mcgreevy@golang.org>
|
Michael McGreevy <mcgreevy@golang.org>
|
||||||
Michael Pearson <mipearson@gmail.com>
|
Michael Pearson <mipearson@gmail.com>
|
||||||
|
|
@ -514,6 +557,7 @@ Michael Stapelberg <michael@stapelberg.de> <mstplbrg@googlemail.com>
|
||||||
Michael T. Jones <mtj@google.com> <michael.jones@gmail.com>
|
Michael T. Jones <mtj@google.com> <michael.jones@gmail.com>
|
||||||
Michael Teichgräber <mteichgraeber@gmx.de> <mt4swm@googlemail.com>
|
Michael Teichgräber <mteichgraeber@gmx.de> <mt4swm@googlemail.com>
|
||||||
Michael Vetter <g.bluehut@gmail.com>
|
Michael Vetter <g.bluehut@gmail.com>
|
||||||
|
Michal Bohuslávek <mbohuslavek@gmail.com>
|
||||||
Michal Cierniak <cierniak@google.com>
|
Michal Cierniak <cierniak@google.com>
|
||||||
Michał Derkacz <ziutek@lnet.pl>
|
Michał Derkacz <ziutek@lnet.pl>
|
||||||
Michalis Kargakis <michaliskargakis@gmail.com>
|
Michalis Kargakis <michaliskargakis@gmail.com>
|
||||||
|
|
@ -536,6 +580,8 @@ Nan Deng <monnand@gmail.com>
|
||||||
Nathan John Youngman <nj@nathany.com>
|
Nathan John Youngman <nj@nathany.com>
|
||||||
Nathan P Finch <nate.finch@gmail.com>
|
Nathan P Finch <nate.finch@gmail.com>
|
||||||
Nathan Youngman <git@nathany.com>
|
Nathan Youngman <git@nathany.com>
|
||||||
|
Nathan(yinian) Hu <nathanhu@google.com>
|
||||||
|
Neelesh Chandola <neelesh.c98@gmail.com>
|
||||||
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
||||||
Nicholas Katsaros <nick@nickkatsaros.com>
|
Nicholas Katsaros <nick@nickkatsaros.com>
|
||||||
Nicholas Presta <nick@nickpresta.ca> <nick1presta@gmail.com>
|
Nicholas Presta <nick@nickpresta.ca> <nick1presta@gmail.com>
|
||||||
|
|
@ -548,7 +594,11 @@ Nicolas Owens <mischief@offblast.org>
|
||||||
Nicolas S. Dade <nic.dade@gmail.com>
|
Nicolas S. Dade <nic.dade@gmail.com>
|
||||||
Nigel Kerr <nigel.kerr@gmail.com>
|
Nigel Kerr <nigel.kerr@gmail.com>
|
||||||
Nigel Tao <nigeltao@golang.org>
|
Nigel Tao <nigeltao@golang.org>
|
||||||
|
Nikolay Turpitko <nikolay@turpitko.com>
|
||||||
Noah Campbell <noahcampbell@gmail.com>
|
Noah Campbell <noahcampbell@gmail.com>
|
||||||
|
Nodir Turakulov <nodir@google.com>
|
||||||
|
Norberto Lopes <nlopes.ml@gmail.com>
|
||||||
|
Oleku Konko <oleku.konko@gmail.com>
|
||||||
Oling Cat <olingcat@gmail.com>
|
Oling Cat <olingcat@gmail.com>
|
||||||
Oliver Hookins <ohookins@gmail.com>
|
Oliver Hookins <ohookins@gmail.com>
|
||||||
Olivier Antoine <olivier.antoine@gmail.com>
|
Olivier Antoine <olivier.antoine@gmail.com>
|
||||||
|
|
@ -572,12 +622,15 @@ Paul Hammond <paul@paulhammond.org>
|
||||||
Paul Lalonde <paul.a.lalonde@gmail.com>
|
Paul Lalonde <paul.a.lalonde@gmail.com>
|
||||||
Paul Marks <pmarks@google.com>
|
Paul Marks <pmarks@google.com>
|
||||||
Paul Nasrat <pnasrat@google.com>
|
Paul Nasrat <pnasrat@google.com>
|
||||||
|
Paul Rosania <paul.rosania@gmail.com>
|
||||||
Paul Sbarra <Sbarra.Paul@gmail.com>
|
Paul Sbarra <Sbarra.Paul@gmail.com>
|
||||||
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
||||||
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
||||||
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
||||||
|
Pawel Knap <pawelknap88@gmail.com>
|
||||||
Pawel Szczur <filemon@google.com>
|
Pawel Szczur <filemon@google.com>
|
||||||
Percy Wegmann <ox.to.a.cart@gmail.com>
|
Percy Wegmann <ox.to.a.cart@gmail.com>
|
||||||
|
Perry Abbott <perry.j.abbott@gmail.com>
|
||||||
Petar Maymounkov <petarm@gmail.com>
|
Petar Maymounkov <petarm@gmail.com>
|
||||||
Peter Armitage <peter.armitage@gmail.com>
|
Peter Armitage <peter.armitage@gmail.com>
|
||||||
Peter Collingbourne <pcc@google.com>
|
Peter Collingbourne <pcc@google.com>
|
||||||
|
|
@ -589,12 +642,14 @@ Peter Mundy <go.peter.90@gmail.com>
|
||||||
Péter Surányi <speter.go1@gmail.com>
|
Péter Surányi <speter.go1@gmail.com>
|
||||||
Péter Szabó <pts@google.com>
|
Péter Szabó <pts@google.com>
|
||||||
Péter Szilágyi <peterke@gmail.com>
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
|
Peter Tseng <ptseng@squareup.com>
|
||||||
Peter Waldschmidt <peter@waldschmidt.com>
|
Peter Waldschmidt <peter@waldschmidt.com>
|
||||||
Peter Waller <peter.waller@gmail.com>
|
Peter Waller <peter.waller@gmail.com>
|
||||||
Peter Weinberger <pjw@golang.org>
|
Peter Weinberger <pjw@golang.org>
|
||||||
Peter Williams <pwil3058@gmail.com>
|
Peter Williams <pwil3058@gmail.com>
|
||||||
Phil Pennock <pdp@golang.org>
|
Phil Pennock <pdp@golang.org>
|
||||||
Philip K. Warren <pkwarren@gmail.com>
|
Philip K. Warren <pkwarren@gmail.com>
|
||||||
|
Pierre Roullon <pierre.roullon@gmail.com>
|
||||||
Pieter Droogendijk <pieter@binky.org.uk>
|
Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
Pietro Gagliardi <pietro10@mac.com>
|
Pietro Gagliardi <pietro10@mac.com>
|
||||||
Preetam Jinka <pj@preet.am>
|
Preetam Jinka <pj@preet.am>
|
||||||
|
|
@ -605,6 +660,7 @@ Raif S. Naffah <go@naffah-raif.name>
|
||||||
Rajat Goel <rajat.goel2010@gmail.com>
|
Rajat Goel <rajat.goel2010@gmail.com>
|
||||||
Raph Levien <raph@google.com>
|
Raph Levien <raph@google.com>
|
||||||
Raul Silvera <rsilvera@google.com>
|
Raul Silvera <rsilvera@google.com>
|
||||||
|
Reinaldo de Souza Jr <juniorz@gmail.com>
|
||||||
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
|
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
|
||||||
Richard Barnes <rlb@ipv.sx>
|
Richard Barnes <rlb@ipv.sx>
|
||||||
Richard Crowley <r@rcrowley.org>
|
Richard Crowley <r@rcrowley.org>
|
||||||
|
|
@ -613,15 +669,18 @@ Richard Musiol <mail@richard-musiol.de> <neelance@gmail.com>
|
||||||
Rick Arnold <rickarnoldjr@gmail.com>
|
Rick Arnold <rickarnoldjr@gmail.com>
|
||||||
Rick Hudson <rlh@golang.org>
|
Rick Hudson <rlh@golang.org>
|
||||||
Risto Jaakko Saarelma <rsaarelm@gmail.com>
|
Risto Jaakko Saarelma <rsaarelm@gmail.com>
|
||||||
|
Rob Earhart <earhart@google.com>
|
||||||
Rob Pike <r@golang.org>
|
Rob Pike <r@golang.org>
|
||||||
Robert Daniel Kortschak <dan.kortschak@adelaide.edu.au>
|
Robert Daniel Kortschak <dan.kortschak@adelaide.edu.au>
|
||||||
Robert Dinu <r@varp.se>
|
Robert Dinu <r@varp.se>
|
||||||
Robert Figueiredo <robfig@gmail.com>
|
Robert Figueiredo <robfig@gmail.com>
|
||||||
Robert Griesemer <gri@golang.org>
|
Robert Griesemer <gri@golang.org>
|
||||||
Robert Hencke <robert.hencke@gmail.com>
|
Robert Hencke <robert.hencke@gmail.com>
|
||||||
|
Robert Iannucci <iannucci@google.com>
|
||||||
Robert Obryk <robryk@gmail.com>
|
Robert Obryk <robryk@gmail.com>
|
||||||
Robert Sesek <rsesek@google.com>
|
Robert Sesek <rsesek@google.com>
|
||||||
Robert Snedegar <roberts@google.com>
|
Robert Snedegar <roberts@google.com>
|
||||||
|
Robert Stepanek <robert.stepanek@gmail.com>
|
||||||
Robin Eklind <r.eklind.87@gmail.com>
|
Robin Eklind <r.eklind.87@gmail.com>
|
||||||
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
||||||
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
||||||
|
|
@ -633,30 +692,39 @@ Ross Light <light@google.com> <rlight2@gmail.com>
|
||||||
Rowan Worth <sqweek@gmail.com>
|
Rowan Worth <sqweek@gmail.com>
|
||||||
Rui Ueyama <ruiu@google.com>
|
Rui Ueyama <ruiu@google.com>
|
||||||
Russ Cox <rsc@golang.org>
|
Russ Cox <rsc@golang.org>
|
||||||
|
Russell Haering <russellhaering@gmail.com>
|
||||||
Ryan Barrett <ryanb@google.com>
|
Ryan Barrett <ryanb@google.com>
|
||||||
Ryan Brown <ribrdb@google.com>
|
Ryan Brown <ribrdb@google.com>
|
||||||
Ryan Hitchman <hitchmanr@gmail.com>
|
Ryan Hitchman <hitchmanr@gmail.com>
|
||||||
|
Ryan Lower <rpjlower@gmail.com>
|
||||||
Ryan Seys <ryan@ryanseys.com>
|
Ryan Seys <ryan@ryanseys.com>
|
||||||
Ryan Slade <ryanslade@gmail.com>
|
Ryan Slade <ryanslade@gmail.com>
|
||||||
S.Çağlar Onur <caglar@10ur.org>
|
S.Çağlar Onur <caglar@10ur.org>
|
||||||
|
Salmān Aljammāz <s@0x65.net>
|
||||||
Sam Thorogood <thorogood@google.com> <sam.thorogood@gmail.com>
|
Sam Thorogood <thorogood@google.com> <sam.thorogood@gmail.com>
|
||||||
Sameer Ajmani <sameer@golang.org> <ajmani@gmail.com>
|
Sameer Ajmani <sameer@golang.org> <ajmani@gmail.com>
|
||||||
Sanjay Menakuru <balasanjay@gmail.com>
|
Sanjay Menakuru <balasanjay@gmail.com>
|
||||||
|
Sasha Lionheart <lionhearts@google.com>
|
||||||
Scott Barron <scott.barron@github.com>
|
Scott Barron <scott.barron@github.com>
|
||||||
Scott Ferguson <scottwferg@gmail.com>
|
Scott Ferguson <scottwferg@gmail.com>
|
||||||
Scott Lawrence <bytbox@gmail.com>
|
Scott Lawrence <bytbox@gmail.com>
|
||||||
Scott Schwartz <scotts@golang.org>
|
Scott Schwartz <scotts@golang.org>
|
||||||
|
Scott Van Woudenberg <scottvw@google.com>
|
||||||
Sean Burford <sburford@google.com>
|
Sean Burford <sburford@google.com>
|
||||||
|
Sean Dolphin <Sean.Dolphin@kpcompass.com>
|
||||||
Sebastien Binet <seb.binet@gmail.com>
|
Sebastien Binet <seb.binet@gmail.com>
|
||||||
Sébastien Paolacci <sebastien.paolacci@gmail.com>
|
Sébastien Paolacci <sebastien.paolacci@gmail.com>
|
||||||
Sergei Skorobogatov <skorobo@rambler.ru>
|
Sergei Skorobogatov <skorobo@rambler.ru>
|
||||||
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
||||||
Sergio Luis O. B. Correia <sergio@correia.cc>
|
Sergio Luis O. B. Correia <sergio@correia.cc>
|
||||||
Shane Hansen <shanemhansen@gmail.com>
|
Shane Hansen <shanemhansen@gmail.com>
|
||||||
|
Shaozhen Ding <dsz0111@gmail.com>
|
||||||
Shawn Ledbetter <sledbetter@google.com>
|
Shawn Ledbetter <sledbetter@google.com>
|
||||||
Shawn Smith <shawn.p.smith@gmail.com>
|
Shawn Smith <shawn.p.smith@gmail.com>
|
||||||
|
Shawn Walker-Salas <shawn.walker@oracle.com>
|
||||||
Shenghou Ma <minux@golang.org> <minux.ma@gmail.com>
|
Shenghou Ma <minux@golang.org> <minux.ma@gmail.com>
|
||||||
Shivakumar GN <shivakumar.gn@gmail.com>
|
Shivakumar GN <shivakumar.gn@gmail.com>
|
||||||
|
Shun Fan <sfan@google.com>
|
||||||
Silvan Jegen <s.jegen@gmail.com>
|
Silvan Jegen <s.jegen@gmail.com>
|
||||||
Simon Whitehead <chemnova@gmail.com>
|
Simon Whitehead <chemnova@gmail.com>
|
||||||
Sokolov Yura <funny.falcon@gmail.com>
|
Sokolov Yura <funny.falcon@gmail.com>
|
||||||
|
|
@ -678,14 +746,20 @@ Sven Almgren <sven@tras.se>
|
||||||
Szabolcs Nagy <nsz@port70.net>
|
Szabolcs Nagy <nsz@port70.net>
|
||||||
Tad Glines <tad.glines@gmail.com>
|
Tad Glines <tad.glines@gmail.com>
|
||||||
Taj Khattra <taj.khattra@gmail.com>
|
Taj Khattra <taj.khattra@gmail.com>
|
||||||
|
Takashi Matsuo <tmatsuo@google.com>
|
||||||
|
Takeshi YAMANASHI <9.nashi@gmail.com>
|
||||||
Tamir Duberstein <tamird@gmail.com>
|
Tamir Duberstein <tamird@gmail.com>
|
||||||
Tarmigan Casebolt <tarmigan@gmail.com>
|
Tarmigan Casebolt <tarmigan@gmail.com>
|
||||||
Taru Karttunen <taruti@taruti.net>
|
Taru Karttunen <taruti@taruti.net>
|
||||||
|
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
|
||||||
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
||||||
Thiago Fransosi Farina <thiago.farina@gmail.com> <tfarina@chromium.org>
|
Thiago Fransosi Farina <thiago.farina@gmail.com> <tfarina@chromium.org>
|
||||||
Thomas Alan Copeland <talan.copeland@gmail.com>
|
Thomas Alan Copeland <talan.copeland@gmail.com>
|
||||||
|
Thomas Desrosiers <thomasdesr@gmail.com>
|
||||||
Thomas Habets <habets@google.com>
|
Thomas Habets <habets@google.com>
|
||||||
Thomas Kappler <tkappler@gmail.com>
|
Thomas Kappler <tkappler@gmail.com>
|
||||||
|
Tim Cooijmans <timcooijmans@gmail.com>
|
||||||
|
Tim Hockin <thockin@google.com>
|
||||||
Timo Savola <timo.savola@gmail.com>
|
Timo Savola <timo.savola@gmail.com>
|
||||||
Timo Truyts <alkaloid.btx@gmail.com>
|
Timo Truyts <alkaloid.btx@gmail.com>
|
||||||
Tobias Columbus <tobias.columbus@gmail.com> <tobias.columbus@googlemail.com>
|
Tobias Columbus <tobias.columbus@gmail.com> <tobias.columbus@googlemail.com>
|
||||||
|
|
@ -696,14 +770,18 @@ Tom Linford <tomlinford@gmail.com>
|
||||||
Tom Szymanski <tgs@google.com>
|
Tom Szymanski <tgs@google.com>
|
||||||
Tommy Schaefer <tommy.schaefer@teecom.com>
|
Tommy Schaefer <tommy.schaefer@teecom.com>
|
||||||
Tor Andersson <tor.andersson@gmail.com>
|
Tor Andersson <tor.andersson@gmail.com>
|
||||||
|
Totoro W <tw19881113@gmail.com>
|
||||||
Travis Cline <travis.cline@gmail.com>
|
Travis Cline <travis.cline@gmail.com>
|
||||||
Trevor Strohman <trevor.strohman@gmail.com>
|
Trevor Strohman <trevor.strohman@gmail.com>
|
||||||
|
Trey Tacon <ttacon@gmail.com>
|
||||||
Tudor Golubenco <tudor.g@gmail.com>
|
Tudor Golubenco <tudor.g@gmail.com>
|
||||||
Tw <tw19881113@gmail.com>
|
|
||||||
Tyler Bunnell <tylerbunnell@gmail.com>
|
Tyler Bunnell <tylerbunnell@gmail.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
||||||
Ugorji Nwoke <ugorji@gmail.com>
|
Ugorji Nwoke <ugorji@gmail.com>
|
||||||
Ulf Holm Nielsen <doktor@dyregod.dk>
|
Ulf Holm Nielsen <doktor@dyregod.dk>
|
||||||
|
Ulrich Kunitz <uli.kunitz@gmail.com>
|
||||||
Uriel Mangado <uriel@berlinblue.org>
|
Uriel Mangado <uriel@berlinblue.org>
|
||||||
|
Uttam C Pawar <uttam.c.pawar@intel.com>
|
||||||
Vadim Vygonets <unixdj@gmail.com>
|
Vadim Vygonets <unixdj@gmail.com>
|
||||||
Vega Garcia Luis Alfonso <vegacom@gmail.com>
|
Vega Garcia Luis Alfonso <vegacom@gmail.com>
|
||||||
Vincent Ambo <tazjin@googlemail.com>
|
Vincent Ambo <tazjin@googlemail.com>
|
||||||
|
|
@ -723,7 +801,9 @@ William Orr <will@worrbase.com> <ay1244@gmail.com>
|
||||||
Xia Bin <snyh@snyh.org>
|
Xia Bin <snyh@snyh.org>
|
||||||
Xing Xing <mikespook@gmail.com>
|
Xing Xing <mikespook@gmail.com>
|
||||||
Yan Zou <yzou@google.com>
|
Yan Zou <yzou@google.com>
|
||||||
|
Yann Kerhervé <yann.kerherve@gmail.com>
|
||||||
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
||||||
|
Yesudeep Mangalapilly <yesudeep@google.com>
|
||||||
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
||||||
Yo-An Lin <yoanlin93@gmail.com>
|
Yo-An Lin <yoanlin93@gmail.com>
|
||||||
Yongjian Xu <i3dmaster@gmail.com>
|
Yongjian Xu <i3dmaster@gmail.com>
|
||||||
|
|
@ -731,7 +811,7 @@ Yoshiyuki Kanno <nekotaroh@gmail.com> <yoshiyuki.kanno@stoic.co.jp>
|
||||||
Yusuke Kagiwada <block.rxckin.beats@gmail.com>
|
Yusuke Kagiwada <block.rxckin.beats@gmail.com>
|
||||||
Yuusei Kuwana <kuwana@kumama.org>
|
Yuusei Kuwana <kuwana@kumama.org>
|
||||||
Yuval Pavel Zholkover <paulzhol@gmail.com>
|
Yuval Pavel Zholkover <paulzhol@gmail.com>
|
||||||
Yves Junqueira <yves.junqueira@gmail.com>
|
Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com>
|
||||||
Ziad Hatahet <hatahet@gmail.com>
|
Ziad Hatahet <hatahet@gmail.com>
|
||||||
Zorion Arrizabalaga <zorionk@gmail.com>
|
Zorion Arrizabalaga <zorionk@gmail.com>
|
||||||
申习之 <bronze1man@gmail.com>
|
申习之 <bronze1man@gmail.com>
|
||||||
|
|
|
||||||
|
|
@ -12,14 +12,12 @@ in your web browser.
|
||||||
Our canonical Git repository is located at https://go.googlesource.com/go.
|
Our canonical Git repository is located at https://go.googlesource.com/go.
|
||||||
There is a mirror of the repository at https://github.com/golang/go.
|
There is a mirror of the repository at https://github.com/golang/go.
|
||||||
|
|
||||||
Please report issues here: https://golang.org/issue/new
|
|
||||||
|
|
||||||
Go is the work of hundreds of contributors. We appreciate your help!
|
Go is the work of hundreds of contributors. We appreciate your help!
|
||||||
|
|
||||||
To contribute, please read the contribution guidelines:
|
To contribute, please read the contribution guidelines:
|
||||||
https://golang.org/doc/contribute.html
|
https://golang.org/doc/contribute.html
|
||||||
|
|
||||||
##### Please note that we do not use pull requests.
|
##### Note that we do not accept pull requests and that we use the issue tracker for bug reports and proposals only. Please ask questions on https://forum.golangbridge.org or https://groups.google.com/forum/#!forum/golang-nuts.
|
||||||
|
|
||||||
Unless otherwise noted, the Go source files are distributed
|
Unless otherwise noted, the Go source files are distributed
|
||||||
under the BSD-style license found in the LICENSE file.
|
under the BSD-style license found in the LICENSE file.
|
||||||
|
|
|
||||||
53
api/next.txt
53
api/next.txt
|
|
@ -0,0 +1,53 @@
|
||||||
|
pkg bufio, method (*Scanner) Buffer([]uint8, int)
|
||||||
|
pkg bufio, var ErrFinalToken error
|
||||||
|
pkg debug/dwarf, const ClassUnknown = 0
|
||||||
|
pkg debug/dwarf, const ClassUnknown Class
|
||||||
|
pkg html/template, func IsTrue(interface{}) (bool, bool)
|
||||||
|
pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
|
||||||
|
pkg image, method (*NYCbCrA) AOffset(int, int) int
|
||||||
|
pkg image, method (*NYCbCrA) At(int, int) color.Color
|
||||||
|
pkg image, method (*NYCbCrA) Bounds() Rectangle
|
||||||
|
pkg image, method (*NYCbCrA) COffset(int, int) int
|
||||||
|
pkg image, method (*NYCbCrA) ColorModel() color.Model
|
||||||
|
pkg image, method (*NYCbCrA) NYCbCrAAt(int, int) color.NYCbCrA
|
||||||
|
pkg image, method (*NYCbCrA) Opaque() bool
|
||||||
|
pkg image, method (*NYCbCrA) SubImage(Rectangle) Image
|
||||||
|
pkg image, method (*NYCbCrA) YCbCrAt(int, int) color.YCbCr
|
||||||
|
pkg image, method (*NYCbCrA) YOffset(int, int) int
|
||||||
|
pkg image, type NYCbCrA struct
|
||||||
|
pkg image, type NYCbCrA struct, A []uint8
|
||||||
|
pkg image, type NYCbCrA struct, AStride int
|
||||||
|
pkg image, type NYCbCrA struct, embedded YCbCr
|
||||||
|
pkg image/color, method (NYCbCrA) RGBA() (uint32, uint32, uint32, uint32)
|
||||||
|
pkg image/color, type NYCbCrA struct
|
||||||
|
pkg image/color, type NYCbCrA struct, A uint8
|
||||||
|
pkg image/color, type NYCbCrA struct, embedded YCbCr
|
||||||
|
pkg image/color, var NYCbCrAModel Model
|
||||||
|
pkg math/big, method (*Float) MarshalText() ([]uint8, error)
|
||||||
|
pkg math/big, method (*Float) UnmarshalText([]uint8) error
|
||||||
|
pkg math/big, method (*Int) Append([]uint8, int) []uint8
|
||||||
|
pkg math/big, method (*Int) Text(int) string
|
||||||
|
pkg math/rand, func Read([]uint8) (int, error)
|
||||||
|
pkg math/rand, method (*Rand) Read([]uint8) (int, error)
|
||||||
|
pkg net, type DNSError struct, IsTemporary bool
|
||||||
|
pkg net/http, const StatusNetworkAuthenticationRequired = 511
|
||||||
|
pkg net/http, const StatusNetworkAuthenticationRequired ideal-int
|
||||||
|
pkg net/http, const StatusPreconditionRequired = 428
|
||||||
|
pkg net/http, const StatusPreconditionRequired ideal-int
|
||||||
|
pkg net/http, const StatusRequestHeaderFieldsTooLarge = 431
|
||||||
|
pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
|
||||||
|
pkg net/http, const StatusTooManyRequests = 429
|
||||||
|
pkg net/http, const StatusTooManyRequests ideal-int
|
||||||
|
pkg net/http/httptest, method (*ResponseRecorder) WriteString(string) (int, error)
|
||||||
|
pkg net/url, method (*Error) Temporary() bool
|
||||||
|
pkg net/url, method (*Error) Timeout() bool
|
||||||
|
pkg strconv, func AppendQuoteRuneToGraphic([]uint8, int32) []uint8
|
||||||
|
pkg strconv, func AppendQuoteToGraphic([]uint8, string) []uint8
|
||||||
|
pkg strconv, func IsGraphic(int32) bool
|
||||||
|
pkg strconv, func QuoteRuneToGraphic(int32) string
|
||||||
|
pkg strconv, func QuoteToGraphic(string) string
|
||||||
|
pkg text/template, func IsTrue(interface{}) (bool, bool)
|
||||||
|
pkg text/template, method (ExecError) Error() string
|
||||||
|
pkg text/template, type ExecError struct
|
||||||
|
pkg text/template, type ExecError struct, Err error
|
||||||
|
pkg text/template, type ExecError struct, Name string
|
||||||
|
|
@ -577,7 +577,7 @@ might turn up:
|
||||||
<<<<<<< HEAD
|
<<<<<<< HEAD
|
||||||
if arg < 1e9 {
|
if arg < 1e9 {
|
||||||
=======
|
=======
|
||||||
if arg &lh; 1e10 {
|
if arg < 1e10 {
|
||||||
>>>>>>> mcgillicutty
|
>>>>>>> mcgillicutty
|
||||||
largeReduce(arg)
|
largeReduce(arg)
|
||||||
</pre>
|
</pre>
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,16 @@ Go 1.5 is a major release of Go.
|
||||||
Read the <a href="/doc/go1.5">Go 1.5 Release Notes</a> for more information.
|
Read the <a href="/doc/go1.5">Go 1.5 Release Notes</a> for more information.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 id="go1.5.minor">Minor revisions</h3>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
go1.5.1 (released 2015/09/08) includes bug fixes to the compiler, assembler, and
|
||||||
|
the <code>fmt</code>, <code>net/textproto</code>, <code>net/http</code>, and
|
||||||
|
<code>runtime</code> packages.
|
||||||
|
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.1">Go
|
||||||
|
1.5.1 milestone</a> on our issue tracker for details.
|
||||||
|
</p>
|
||||||
|
|
||||||
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
|
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
@ -38,6 +48,11 @@ go1.4.2 (released 2015/02/17) includes bug fixes to the <code>go</code> command,
|
||||||
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.2">Go 1.4.2 milestone on our issue tracker</a> for details.
|
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.2">Go 1.4.2 milestone on our issue tracker</a> for details.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
go1.4.3 (released 2015/09/22) includes security fixes to the <code>net/http</code> package and bug fixes to the <code>runtime</code> package.
|
||||||
|
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.3">Go 1.4.3 milestone on our issue tracker</a> for details.
|
||||||
|
</p>
|
||||||
|
|
||||||
<h2 id="go1.3">go1.3 (released 2014/06/18)</h2>
|
<h2 id="go1.3">go1.3 (released 2014/06/18)</h2>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
Tools:
|
Tools:
|
||||||
|
|
||||||
cmd/go: vendoring enabled by default (https://golang.org/cl/13967/)
|
cmd/go: vendoring enabled by default (https://golang.org/cl/13967/)
|
||||||
|
cmd/go: flags for tests must precede package name if present; also makes it easier to pass flags to test binaries (https://golang.org/cl/14826)
|
||||||
|
|
||||||
Ports:
|
Ports:
|
||||||
|
|
||||||
|
|
@ -8,5 +9,15 @@ NaCl is no longer restricted to pepper_41 (https://golang.org/cl/13958/)
|
||||||
|
|
||||||
API additions and behavior changes:
|
API additions and behavior changes:
|
||||||
|
|
||||||
strconv: QuoteTOGraphic (https://golang.org/cl/14184/)
|
bufio: add Scanner.Buffer (https://golang.org/cl/14599/)
|
||||||
|
bufio: add ErrFinalToken as a sentinel value for Scan's split functions (https://golang.org/cl/14924)
|
||||||
|
fmt: allow any integer type as an argument to the * operator (https://golang.org/cl/14491/)
|
||||||
|
math/rand: add Read (https://golang.org/cl/14522)
|
||||||
|
net/url: make *url.Error implement net.Error (https://golang.org/cl/15672)
|
||||||
|
strconv: QuoteToGraphic (https://golang.org/cl/14184/)
|
||||||
text/template: ExecError (https://golang.org/cl/13957/)
|
text/template: ExecError (https://golang.org/cl/13957/)
|
||||||
|
text/template: trimming spaces (https://golang.org/cl/14391/)
|
||||||
|
text/template: Funcs check names (https://golang.org/cl/14562/)
|
||||||
|
text/template: IsTrue (https://golang.org/cl/14562/)
|
||||||
|
text/template: blocks and permit redefinition (https://golang.org/cl/14005)
|
||||||
|
time: allow one and two-digit days of the month during Parse (https://golang.org/cl/14123/)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
<!--{
|
<!--{
|
||||||
"Title": "The Go Programming Language Specification",
|
"Title": "The Go Programming Language Specification",
|
||||||
"Subtitle": "Version of August 5, 2015",
|
"Subtitle": "Version of September 24, 2015",
|
||||||
"Path": "/ref/spec"
|
"Path": "/ref/spec"
|
||||||
}-->
|
}-->
|
||||||
|
|
||||||
|
|
@ -2210,9 +2210,8 @@ math.Sin // denotes the Sin function in package math
|
||||||
<p>
|
<p>
|
||||||
Composite literals construct values for structs, arrays, slices, and maps
|
Composite literals construct values for structs, arrays, slices, and maps
|
||||||
and create a new value each time they are evaluated.
|
and create a new value each time they are evaluated.
|
||||||
They consist of the type of the value
|
They consist of the type of the literal followed by a brace-bound list of elements.
|
||||||
followed by a brace-bound list of composite elements. An element may be
|
Each element may optionally be preceded by a corresponding key.
|
||||||
a single expression or a key-value pair.
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<pre class="ebnf">
|
<pre class="ebnf">
|
||||||
|
|
@ -2220,19 +2219,19 @@ CompositeLit = LiteralType LiteralValue .
|
||||||
LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
|
LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
|
||||||
SliceType | MapType | TypeName .
|
SliceType | MapType | TypeName .
|
||||||
LiteralValue = "{" [ ElementList [ "," ] ] "}" .
|
LiteralValue = "{" [ ElementList [ "," ] ] "}" .
|
||||||
ElementList = Element { "," Element } .
|
ElementList = KeyedElement { "," KeyedElement } .
|
||||||
Element = [ Key ":" ] Value .
|
KeyedElement = [ Key ":" ] Element .
|
||||||
Key = FieldName | Expression | LiteralValue .
|
Key = FieldName | Expression | LiteralValue .
|
||||||
FieldName = identifier .
|
FieldName = identifier .
|
||||||
Value = Expression | LiteralValue .
|
Element = Expression | LiteralValue .
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
The LiteralType must be a struct, array, slice, or map type
|
The LiteralType's underlying type must be a struct, array, slice, or map type
|
||||||
(the grammar enforces this constraint except when the type is given
|
(the grammar enforces this constraint except when the type is given
|
||||||
as a TypeName).
|
as a TypeName).
|
||||||
The types of the expressions must be <a href="#Assignability">assignable</a>
|
The types of the elements and keys must be <a href="#Assignability">assignable</a>
|
||||||
to the respective field, element, and key types of the LiteralType;
|
to the respective field, element, and key types of the literal type;
|
||||||
there is no additional conversion.
|
there is no additional conversion.
|
||||||
The key is interpreted as a field name for struct literals,
|
The key is interpreted as a field name for struct literals,
|
||||||
an index for array and slice literals, and a key for map literals.
|
an index for array and slice literals, and a key for map literals.
|
||||||
|
|
@ -2245,7 +2244,7 @@ constant key value.
|
||||||
For struct literals the following rules apply:
|
For struct literals the following rules apply:
|
||||||
</p>
|
</p>
|
||||||
<ul>
|
<ul>
|
||||||
<li>A key must be a field name declared in the LiteralType.
|
<li>A key must be a field name declared in the struct type.
|
||||||
</li>
|
</li>
|
||||||
<li>An element list that does not contain any keys must
|
<li>An element list that does not contain any keys must
|
||||||
list an element for each struct field in the
|
list an element for each struct field in the
|
||||||
|
|
@ -2307,7 +2306,7 @@ var pointer *Point3D = &Point3D{y: 1000}
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
The length of an array literal is the length specified in the LiteralType.
|
The length of an array literal is the length specified in the literal type.
|
||||||
If fewer elements than the length are provided in the literal, the missing
|
If fewer elements than the length are provided in the literal, the missing
|
||||||
elements are set to the zero value for the array element type.
|
elements are set to the zero value for the array element type.
|
||||||
It is an error to provide elements with index values outside the index range
|
It is an error to provide elements with index values outside the index range
|
||||||
|
|
|
||||||
|
|
@ -167,7 +167,7 @@ Then clone the repository and check out the latest release tag:</p>
|
||||||
<pre>
|
<pre>
|
||||||
$ git clone https://go.googlesource.com/go
|
$ git clone https://go.googlesource.com/go
|
||||||
$ cd go
|
$ cd go
|
||||||
$ git checkout go1.5
|
$ git checkout go1.5.1
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<h2 id="head">(Optional) Switch to the master branch</h2>
|
<h2 id="head">(Optional) Switch to the master branch</h2>
|
||||||
|
|
@ -346,7 +346,7 @@ New releases are announced on the
|
||||||
<a href="//groups.google.com/group/golang-announce">golang-announce</a>
|
<a href="//groups.google.com/group/golang-announce">golang-announce</a>
|
||||||
mailing list.
|
mailing list.
|
||||||
Each announcement mentions the latest release tag, for instance,
|
Each announcement mentions the latest release tag, for instance,
|
||||||
<code>go1.5</code>.
|
<code>go1.5.1</code>.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
||||||
15
misc/cgo/errors/issue11097a.go
Normal file
15
misc/cgo/errors/issue11097a.go
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
/*
|
||||||
|
//enum test { foo, bar };
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var a = C.enum_test(1) // ERROR HERE
|
||||||
|
_ = a
|
||||||
|
}
|
||||||
15
misc/cgo/errors/issue11097b.go
Normal file
15
misc/cgo/errors/issue11097b.go
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
/*
|
||||||
|
//enum test { foo, bar };
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
p := new(C.enum_test) // ERROR HERE
|
||||||
|
_ = p
|
||||||
|
}
|
||||||
|
|
@ -31,6 +31,8 @@ check err2.go
|
||||||
check err3.go
|
check err3.go
|
||||||
check issue7757.go
|
check issue7757.go
|
||||||
check issue8442.go
|
check issue8442.go
|
||||||
|
check issue11097a.go
|
||||||
|
check issue11097b.go
|
||||||
|
|
||||||
rm -rf errs _obj
|
rm -rf errs _obj
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ func testBuildID(t *testing.T) {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
t.Skip("no /proc/self/exe")
|
t.Skip("no /proc/self/exe")
|
||||||
}
|
}
|
||||||
t.Fatalf("opening /proc/self/exe: ", err)
|
t.Fatal("opening /proc/self/exe: ", err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ func testCallbackCallers(t *testing.T) {
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
f := runtime.FuncForPC(pc[i])
|
f := runtime.FuncForPC(pc[i])
|
||||||
if f == nil {
|
if f == nil {
|
||||||
t.Fatalf("expected non-nil Func for pc %p", pc[i])
|
t.Fatalf("expected non-nil Func for pc %d", pc[i])
|
||||||
}
|
}
|
||||||
fname := f.Name()
|
fname := f.Name()
|
||||||
// Remove the prepended pathname from automatically
|
// Remove the prepended pathname from automatically
|
||||||
|
|
|
||||||
|
|
@ -65,5 +65,6 @@ func Test9026(t *testing.T) { test9026(t) }
|
||||||
func Test9557(t *testing.T) { test9557(t) }
|
func Test9557(t *testing.T) { test9557(t) }
|
||||||
func Test10303(t *testing.T) { test10303(t, 10) }
|
func Test10303(t *testing.T) { test10303(t, 10) }
|
||||||
func Test11925(t *testing.T) { test11925(t) }
|
func Test11925(t *testing.T) { test11925(t) }
|
||||||
|
func Test12030(t *testing.T) { test12030(t) }
|
||||||
|
|
||||||
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
|
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ func testSetEnv(t *testing.T) {
|
||||||
keyc := C.CString(key)
|
keyc := C.CString(key)
|
||||||
defer C.free(unsafe.Pointer(keyc))
|
defer C.free(unsafe.Pointer(keyc))
|
||||||
v := C.getenv(keyc)
|
v := C.getenv(keyc)
|
||||||
if v == (*C.char)(unsafe.Pointer(uintptr(0))) {
|
if uintptr(unsafe.Pointer(v)) == 0 {
|
||||||
t.Fatal("getenv returned NULL")
|
t.Fatal("getenv returned NULL")
|
||||||
}
|
}
|
||||||
vs := C.GoString(v)
|
vs := C.GoString(v)
|
||||||
|
|
|
||||||
35
misc/cgo/test/issue12030.go
Normal file
35
misc/cgo/test/issue12030.go
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Issue 12030. sprintf is defined in both ntdll and msvcrt,
|
||||||
|
// Normally we want the one in the msvcrt.
|
||||||
|
|
||||||
|
package cgotest
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
void issue12030conv(char *buf, double x) {
|
||||||
|
sprintf(buf, "d=%g", x);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func test12030(t *testing.T) {
|
||||||
|
buf := (*C.char)(C.malloc(256))
|
||||||
|
defer C.free(unsafe.Pointer(buf))
|
||||||
|
for _, f := range []float64{1.0, 2.0, 3.14} {
|
||||||
|
C.issue12030conv(buf, C.double(f))
|
||||||
|
got := C.GoString(buf)
|
||||||
|
if want := fmt.Sprintf("d=%g", f); got != want {
|
||||||
|
t.Fatalf("C.sprintf failed for %g: %q != %q", f, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Test that pthread_cancel works as expected
|
// Test that pthread_cancel works as expected
|
||||||
// (NPTL uses SIGRTMIN to implement thread cancellation)
|
// (NPTL uses SIGRTMIN to implement thread cancelation)
|
||||||
// See https://golang.org/issue/6997
|
// See https://golang.org/issue/6997
|
||||||
package cgotest
|
package cgotest
|
||||||
|
|
||||||
|
|
@ -32,7 +32,7 @@ func test6997(t *testing.T) {
|
||||||
select {
|
select {
|
||||||
case r = <-c:
|
case r = <-c:
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
t.Error("pthread finished but wasn't cancelled??")
|
t.Error("pthread finished but wasn't canceled??")
|
||||||
}
|
}
|
||||||
case <-time.After(30 * time.Second):
|
case <-time.After(30 * time.Second):
|
||||||
t.Error("hung in pthread_cancel/pthread_join")
|
t.Error("hung in pthread_cancel/pthread_join")
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ var bad7665 unsafe.Pointer = C.f7665
|
||||||
var good7665 uintptr = uintptr(C.f7665)
|
var good7665 uintptr = uintptr(C.f7665)
|
||||||
|
|
||||||
func test7665(t *testing.T) {
|
func test7665(t *testing.T) {
|
||||||
if bad7665 == nil || bad7665 != unsafe.Pointer(good7665) {
|
if bad7665 == nil || uintptr(bad7665) != good7665 {
|
||||||
t.Errorf("ptrs = %p, %#x, want same non-nil pointer", bad7665, good7665)
|
t.Errorf("ptrs = %p, %#x, want same non-nil pointer", bad7665, good7665)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
TEXT ·RewindAndSetgid(SB),NOSPLIT,$-8-0
|
TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0
|
||||||
// Rewind stack pointer so anything that happens on the stack
|
// Rewind stack pointer so anything that happens on the stack
|
||||||
// will clobber the test pattern created by the caller
|
// will clobber the test pattern created by the caller
|
||||||
ADD $(1024 * 8), R1
|
ADD $(1024 * 8), R1
|
||||||
|
|
|
||||||
|
|
@ -14,11 +14,14 @@ package cgotest
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testSetgid(t *testing.T) {
|
func runTestSetgid() bool {
|
||||||
c := make(chan bool)
|
c := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
C.setgid(0)
|
C.setgid(0)
|
||||||
|
|
@ -26,7 +29,21 @@ func testSetgid(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
select {
|
select {
|
||||||
case <-c:
|
case <-c:
|
||||||
|
return true
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSetgid(t *testing.T) {
|
||||||
|
if !runTestSetgid() {
|
||||||
t.Error("setgid hung")
|
t.Error("setgid hung")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now try it again after using signal.Notify.
|
||||||
|
signal.Notify(make(chan os.Signal, 1), syscall.SIGINT)
|
||||||
|
if !runTestSetgid() {
|
||||||
|
t.Error("setgid hung after signal.Notify")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ int main(void) {
|
||||||
n = read(fd, buf, sizeof buf);
|
n = read(fd, buf, sizeof buf);
|
||||||
if (n >= 0)
|
if (n >= 0)
|
||||||
break;
|
break;
|
||||||
if (errno != EBADF) {
|
if (errno != EBADF && errno != EINVAL) {
|
||||||
fprintf(stderr, "BUG: read: %s\n", strerror(errno));
|
fprintf(stderr, "BUG: read: %s\n", strerror(errno));
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ goarch=$(go env GOARCH)
|
||||||
# Directory where cgo headers and outputs will be installed.
|
# Directory where cgo headers and outputs will be installed.
|
||||||
# The installation directory format varies depending on the platform.
|
# The installation directory format varies depending on the platform.
|
||||||
installdir=pkg/${goos}_${goarch}_testcshared_shared
|
installdir=pkg/${goos}_${goarch}_testcshared_shared
|
||||||
if [ "${goos}/${goarch}" == "android/arm" ] || [ "${goos}/${goarch}" == "darwin/amd64" ]; then
|
if [ "${goos}/${goarch}" == "darwin/amd64" ]; then
|
||||||
installdir=pkg/${goos}_${goarch}_testcshared
|
installdir=pkg/${goos}_${goarch}_testcshared
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
@ -81,9 +81,21 @@ GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
|
||||||
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
|
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
|
||||||
binpush libgo.$libext
|
binpush libgo.$libext
|
||||||
|
|
||||||
|
if [ "$goos" == "linux" ]; then
|
||||||
|
if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then
|
||||||
|
echo "libgo.$libext has TEXTREL set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
GOGCCFLAGS=$(go env GOGCCFLAGS)
|
||||||
|
if [ "$goos" == "android" ]; then
|
||||||
|
GOGCCFLAGS="${GOGCCFLAGS} -pie"
|
||||||
|
fi
|
||||||
|
|
||||||
# test0: exported symbols in shared lib are accessible.
|
# test0: exported symbols in shared lib are accessible.
|
||||||
# TODO(iant): using _shared here shouldn't really be necessary.
|
# TODO(iant): using _shared here shouldn't really be necessary.
|
||||||
$(go env CC) $(go env GOGCCFLAGS) -I ${installdir} -o testp main0.c libgo.$libext
|
$(go env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c libgo.$libext
|
||||||
binpush testp
|
binpush testp
|
||||||
|
|
||||||
output=$(run LD_LIBRARY_PATH=. ./testp)
|
output=$(run LD_LIBRARY_PATH=. ./testp)
|
||||||
|
|
@ -93,7 +105,7 @@ if [ "$output" != "PASS" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# test1: shared library can be dynamically loaded and exported symbols are accessible.
|
# test1: shared library can be dynamically loaded and exported symbols are accessible.
|
||||||
$(go env CC) $(go env GOGCCFLAGS) -o testp main1.c -ldl
|
$(go env CC) ${GOGCCFLAGS} -o testp main1.c -ldl
|
||||||
binpush testp
|
binpush testp
|
||||||
output=$(run ./testp ./libgo.$libext)
|
output=$(run ./testp ./libgo.$libext)
|
||||||
if [ "$output" != "PASS" ]; then
|
if [ "$output" != "PASS" ]; then
|
||||||
|
|
@ -108,7 +120,7 @@ linkflags="-Wl,--no-as-needed"
|
||||||
if [ "$goos" == "darwin" ]; then
|
if [ "$goos" == "darwin" ]; then
|
||||||
linkflags=""
|
linkflags=""
|
||||||
fi
|
fi
|
||||||
$(go env CC) $(go env GOGCCFLAGS) -o testp2 main2.c $linkflags libgo2.$libext
|
$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
|
||||||
binpush testp2
|
binpush testp2
|
||||||
output=$(run LD_LIBRARY_PATH=. ./testp2)
|
output=$(run LD_LIBRARY_PATH=. ./testp2)
|
||||||
if [ "$output" != "PASS" ]; then
|
if [ "$output" != "PASS" ]; then
|
||||||
|
|
@ -118,7 +130,7 @@ fi
|
||||||
|
|
||||||
# test3: tests main.main is exported on android.
|
# test3: tests main.main is exported on android.
|
||||||
if [ "$goos" == "android" ]; then
|
if [ "$goos" == "android" ]; then
|
||||||
$(go env CC) $(go env GOGCCFLAGS) -o testp3 main3.c -ldl
|
$(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
|
||||||
binpush testp3
|
binpush testp3
|
||||||
output=$(run ./testp ./libgo.so)
|
output=$(run ./testp ./libgo.so)
|
||||||
if [ "$output" != "PASS" ]; then
|
if [ "$output" != "PASS" ]; then
|
||||||
|
|
|
||||||
34
misc/cgo/testsanitizers/msan.go
Normal file
34
misc/cgo/testsanitizers/msan.go
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
/*
|
||||||
|
#cgo CFLAGS: -fsanitize=memory
|
||||||
|
#cgo LDFLAGS: -fsanitize=memory
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
void f(int32_t *p, int n) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < n; i++) {
|
||||||
|
p[i] = (int32_t)i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
a := make([]int32, 10)
|
||||||
|
C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
|
||||||
|
for i, v := range a {
|
||||||
|
if i != int(v) {
|
||||||
|
fmt.Println("bad %d: %v\n", i, a)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
34
misc/cgo/testsanitizers/test.bash
Executable file
34
misc/cgo/testsanitizers/test.bash
Executable file
|
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style
|
||||||
|
# license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
# This directory is intended to test the use of Go with sanitizers
|
||||||
|
# like msan, asan, etc. See https://github.com/google/sanitizers .
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# The sanitizers were originally developed with clang, so prefer it.
|
||||||
|
CC=cc
|
||||||
|
if test "$(type -p clang)" != ""; then
|
||||||
|
CC=clang
|
||||||
|
fi
|
||||||
|
export CC
|
||||||
|
|
||||||
|
if $CC -fsanitize=memory 2>&1 | grep "unrecognized" >& /dev/null; then
|
||||||
|
echo "skipping msan test: -fsanitize=memory not supported"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The memory sanitizer in versions of clang before 3.6 don't work with Go.
|
||||||
|
if $CC --version | grep clang >& /dev/null; then
|
||||||
|
ver=$($CC --version | sed -e 's/.* version \([0-9.-]*\).*/\1/')
|
||||||
|
major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/')
|
||||||
|
minor=$(echo $ver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/')
|
||||||
|
if test $major -lt 3 || test $major -eq 3 -a $minor -lt 6; then
|
||||||
|
echo "skipping msan test; clang version $major.$minor older than 3.6"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
go run msan.go
|
||||||
|
|
@ -163,6 +163,45 @@ func TestSOBuilt(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasDynTag(f *elf.File, tag elf.DynTag) bool {
|
||||||
|
ds := f.SectionByType(elf.SHT_DYNAMIC)
|
||||||
|
if ds == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d, err := ds.Data()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for len(d) > 0 {
|
||||||
|
var t elf.DynTag
|
||||||
|
switch f.Class {
|
||||||
|
case elf.ELFCLASS32:
|
||||||
|
t = elf.DynTag(f.ByteOrder.Uint32(d[0:4]))
|
||||||
|
d = d[8:]
|
||||||
|
case elf.ELFCLASS64:
|
||||||
|
t = elf.DynTag(f.ByteOrder.Uint64(d[0:8]))
|
||||||
|
d = d[16:]
|
||||||
|
}
|
||||||
|
if t == tag {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The shared library does not have relocations against the text segment.
|
||||||
|
func TestNoTextrel(t *testing.T) {
|
||||||
|
sopath := filepath.Join(gorootInstallDir, soname)
|
||||||
|
f, err := elf.Open(sopath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("elf.Open failed: ", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if hasDynTag(f, elf.DT_TEXTREL) {
|
||||||
|
t.Errorf("%s has DT_TEXTREL set", soname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// The install command should have created a "shlibname" file for the
|
// The install command should have created a "shlibname" file for the
|
||||||
// listed packages (and runtime/cgo) indicating the name of the shared
|
// listed packages (and runtime/cgo) indicating the name of the shared
|
||||||
// library containing it.
|
// library containing it.
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,9 @@ func detectDevID() string {
|
||||||
if !bytes.Contains(line, []byte("iPhone Developer")) {
|
if !bytes.Contains(line, []byte("iPhone Developer")) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if bytes.Contains(line, []byte("REVOKED")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
fields := bytes.Fields(line)
|
fields := bytes.Fields(line)
|
||||||
return string(fields[1])
|
return string(fields[1])
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -49,12 +50,36 @@ type regFileReader struct {
|
||||||
nb int64 // number of unread bytes for current file entry
|
nb int64 // number of unread bytes for current file entry
|
||||||
}
|
}
|
||||||
|
|
||||||
// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
|
// A sparseFileReader is a numBytesReader for reading sparse file data from a
|
||||||
|
// tar archive.
|
||||||
type sparseFileReader struct {
|
type sparseFileReader struct {
|
||||||
rfr *regFileReader // reads the sparse-encoded file data
|
rfr numBytesReader // Reads the sparse-encoded file data
|
||||||
sp []sparseEntry // the sparse map for the file
|
sp []sparseEntry // The sparse map for the file
|
||||||
pos int64 // keeps track of file position
|
pos int64 // Keeps track of file position
|
||||||
tot int64 // total size of the file
|
total int64 // Total size of the file
|
||||||
|
}
|
||||||
|
|
||||||
|
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||||
|
//
|
||||||
|
// Sparse files are represented using a series of sparseEntrys.
|
||||||
|
// Despite the name, a sparseEntry represents an actual data fragment that
|
||||||
|
// references data found in the underlying archive stream. All regions not
|
||||||
|
// covered by a sparseEntry are logically filled with zeros.
|
||||||
|
//
|
||||||
|
// For example, if the underlying raw file contains the 10-byte data:
|
||||||
|
// var compactData = "abcdefgh"
|
||||||
|
//
|
||||||
|
// And the sparse map has the following entries:
|
||||||
|
// var sp = []sparseEntry{
|
||||||
|
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
|
||||||
|
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Then the content of the resulting sparse file with a "real" size of 25 is:
|
||||||
|
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||||
|
type sparseEntry struct {
|
||||||
|
offset int64 // Starting position of the fragment
|
||||||
|
numBytes int64 // Length of the fragment
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keywords for GNU sparse files in a PAX extended header
|
// Keywords for GNU sparse files in a PAX extended header
|
||||||
|
|
@ -128,7 +153,10 @@ func (tr *Reader) Next() (*Header, error) {
|
||||||
if sp != nil {
|
if sp != nil {
|
||||||
// Current file is a PAX format GNU sparse file.
|
// Current file is a PAX format GNU sparse file.
|
||||||
// Set the current file reader to a sparse file reader.
|
// Set the current file reader to a sparse file reader.
|
||||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||||
|
if tr.err != nil {
|
||||||
|
return nil, tr.err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return hdr, nil
|
return hdr, nil
|
||||||
case TypeGNULongName:
|
case TypeGNULongName:
|
||||||
|
|
@ -137,18 +165,24 @@ func (tr *Reader) Next() (*Header, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hdr, err := tr.Next()
|
hdr, tr.err = tr.Next()
|
||||||
|
if tr.err != nil {
|
||||||
|
return nil, tr.err
|
||||||
|
}
|
||||||
hdr.Name = cString(realname)
|
hdr.Name = cString(realname)
|
||||||
return hdr, err
|
return hdr, nil
|
||||||
case TypeGNULongLink:
|
case TypeGNULongLink:
|
||||||
// We have a GNU long link header.
|
// We have a GNU long link header.
|
||||||
realname, err := ioutil.ReadAll(tr)
|
realname, err := ioutil.ReadAll(tr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hdr, err := tr.Next()
|
hdr, tr.err = tr.Next()
|
||||||
|
if tr.err != nil {
|
||||||
|
return nil, tr.err
|
||||||
|
}
|
||||||
hdr.Linkname = cString(realname)
|
hdr.Linkname = cString(realname)
|
||||||
return hdr, err
|
return hdr, nil
|
||||||
}
|
}
|
||||||
return hdr, tr.err
|
return hdr, tr.err
|
||||||
}
|
}
|
||||||
|
|
@ -541,21 +575,17 @@ func (tr *Reader) readHeader() *Header {
|
||||||
if tr.err != nil {
|
if tr.err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Current file is a GNU sparse file. Update the current file reader.
|
// Current file is a GNU sparse file. Update the current file reader.
|
||||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||||
|
if tr.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hdr
|
return hdr
|
||||||
}
|
}
|
||||||
|
|
||||||
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
|
||||||
// A sparse entry indicates the offset and size in a sparse file of a
|
|
||||||
// block of data.
|
|
||||||
type sparseEntry struct {
|
|
||||||
offset int64
|
|
||||||
numBytes int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
||||||
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
||||||
// then one or more extension headers are used to store the rest of the sparse map.
|
// then one or more extension headers are used to store the rest of the sparse map.
|
||||||
|
|
@ -688,40 +718,37 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
|
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
|
||||||
// The sparse map is stored in the PAX headers.
|
// version 0.1. The sparse map is stored in the PAX headers.
|
||||||
func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
|
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
|
||||||
// Get number of entries
|
// Get number of entries.
|
||||||
numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
|
// Use integer overflow resistant math to check this.
|
||||||
if !ok {
|
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
|
||||||
return nil, ErrHeader
|
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
|
||||||
}
|
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ErrHeader
|
return nil, ErrHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
|
// There should be two numbers in sparseMap for each entry.
|
||||||
|
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
|
||||||
// There should be two numbers in sparseMap for each entry
|
|
||||||
if int64(len(sparseMap)) != 2*numEntries {
|
if int64(len(sparseMap)) != 2*numEntries {
|
||||||
return nil, ErrHeader
|
return nil, ErrHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loop through the entries in the sparse map
|
// Loop through the entries in the sparse map.
|
||||||
|
// numEntries is trusted now.
|
||||||
sp := make([]sparseEntry, 0, numEntries)
|
sp := make([]sparseEntry, 0, numEntries)
|
||||||
for i := int64(0); i < numEntries; i++ {
|
for i := int64(0); i < numEntries; i++ {
|
||||||
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
|
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ErrHeader
|
return nil, ErrHeader
|
||||||
}
|
}
|
||||||
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
|
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ErrHeader
|
return nil, ErrHeader
|
||||||
}
|
}
|
||||||
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
}
|
}
|
||||||
|
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -739,9 +766,13 @@ func (tr *Reader) numBytes() int64 {
|
||||||
// It returns 0, io.EOF when it reaches the end of that entry,
|
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||||
// until Next is called to advance to the next entry.
|
// until Next is called to advance to the next entry.
|
||||||
func (tr *Reader) Read(b []byte) (n int, err error) {
|
func (tr *Reader) Read(b []byte) (n int, err error) {
|
||||||
|
if tr.err != nil {
|
||||||
|
return 0, tr.err
|
||||||
|
}
|
||||||
if tr.curr == nil {
|
if tr.curr == nil {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err = tr.curr.Read(b)
|
n, err = tr.curr.Read(b)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
tr.err = err
|
tr.err = err
|
||||||
|
|
@ -771,9 +802,33 @@ func (rfr *regFileReader) numBytes() int64 {
|
||||||
return rfr.nb
|
return rfr.nb
|
||||||
}
|
}
|
||||||
|
|
||||||
// readHole reads a sparse file hole ending at offset toOffset
|
// newSparseFileReader creates a new sparseFileReader, but validates all of the
|
||||||
func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
// sparse entries before doing so.
|
||||||
n64 := toOffset - sfr.pos
|
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
|
||||||
|
if total < 0 {
|
||||||
|
return nil, ErrHeader // Total size cannot be negative
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate all sparse entries. These are the same checks as performed by
|
||||||
|
// the BSD tar utility.
|
||||||
|
for i, s := range sp {
|
||||||
|
switch {
|
||||||
|
case s.offset < 0 || s.numBytes < 0:
|
||||||
|
return nil, ErrHeader // Negative values are never okay
|
||||||
|
case s.offset > math.MaxInt64-s.numBytes:
|
||||||
|
return nil, ErrHeader // Integer overflow with large length
|
||||||
|
case s.offset+s.numBytes > total:
|
||||||
|
return nil, ErrHeader // Region extends beyond the "real" size
|
||||||
|
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
|
||||||
|
return nil, ErrHeader // Regions can't overlap and must be in order
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readHole reads a sparse hole ending at endOffset.
|
||||||
|
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
|
||||||
|
n64 := endOffset - sfr.pos
|
||||||
if n64 > int64(len(b)) {
|
if n64 > int64(len(b)) {
|
||||||
n64 = int64(len(b))
|
n64 = int64(len(b))
|
||||||
}
|
}
|
||||||
|
|
@ -787,49 +842,54 @@ func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
||||||
|
|
||||||
// Read reads the sparse file data in expanded form.
|
// Read reads the sparse file data in expanded form.
|
||||||
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
|
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||||
if len(sfr.sp) == 0 {
|
// Skip past all empty fragments.
|
||||||
// No more data fragments to read from.
|
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
|
||||||
if sfr.pos < sfr.tot {
|
sfr.sp = sfr.sp[1:]
|
||||||
// We're in the last hole
|
|
||||||
n = sfr.readHole(b, sfr.tot)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise, we're at the end of the file
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
if sfr.tot < sfr.sp[0].offset {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if sfr.pos < sfr.sp[0].offset {
|
|
||||||
// We're in a hole
|
|
||||||
n = sfr.readHole(b, sfr.sp[0].offset)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We're not in a hole, so we'll read from the next data fragment
|
// If there are no more fragments, then it is possible that there
|
||||||
posInFragment := sfr.pos - sfr.sp[0].offset
|
// is one last sparse hole.
|
||||||
bytesLeft := sfr.sp[0].numBytes - posInFragment
|
if len(sfr.sp) == 0 {
|
||||||
|
// This behavior matches the BSD tar utility.
|
||||||
|
// However, GNU tar stops returning data even if sfr.total is unmet.
|
||||||
|
if sfr.pos < sfr.total {
|
||||||
|
return sfr.readHole(b, sfr.total), nil
|
||||||
|
}
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// In front of a data fragment, so read a hole.
|
||||||
|
if sfr.pos < sfr.sp[0].offset {
|
||||||
|
return sfr.readHole(b, sfr.sp[0].offset), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// In a data fragment, so read from it.
|
||||||
|
// This math is overflow free since we verify that offset and numBytes can
|
||||||
|
// be safely added when creating the sparseFileReader.
|
||||||
|
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
|
||||||
|
bytesLeft := endPos - sfr.pos // Bytes left in fragment
|
||||||
if int64(len(b)) > bytesLeft {
|
if int64(len(b)) > bytesLeft {
|
||||||
b = b[0:bytesLeft]
|
b = b[:bytesLeft]
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err = sfr.rfr.Read(b)
|
n, err = sfr.rfr.Read(b)
|
||||||
sfr.pos += int64(n)
|
sfr.pos += int64(n)
|
||||||
|
if err == io.EOF {
|
||||||
if int64(n) == bytesLeft {
|
if sfr.pos < endPos {
|
||||||
// We're done with this fragment
|
err = io.ErrUnexpectedEOF // There was supposed to be more data
|
||||||
sfr.sp = sfr.sp[1:]
|
} else if sfr.pos < sfr.total {
|
||||||
|
err = nil // There is still an implicit sparse hole at the end
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == io.EOF && sfr.pos < sfr.tot {
|
if sfr.pos == endPos {
|
||||||
// We reached the end of the last fragment's data, but there's a final hole
|
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
|
||||||
err = nil
|
|
||||||
}
|
}
|
||||||
return
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// numBytes returns the number of bytes left to read in the sparse file's
|
// numBytes returns the number of bytes left to read in the sparse file's
|
||||||
// sparse-encoded data in the tar archive.
|
// sparse-encoded data in the tar archive.
|
||||||
func (sfr *sparseFileReader) numBytes() int64 {
|
func (sfr *sparseFileReader) numBytes() int64 {
|
||||||
return sfr.rfr.nb
|
return sfr.rfr.numBytes()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -18,9 +19,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type untarTest struct {
|
type untarTest struct {
|
||||||
file string
|
file string // Test input file
|
||||||
headers []*Header
|
headers []*Header // Expected output headers
|
||||||
cksums []string
|
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||||
|
err error // Expected error to occur
|
||||||
}
|
}
|
||||||
|
|
||||||
var gnuTarTest = &untarTest{
|
var gnuTarTest = &untarTest{
|
||||||
|
|
@ -49,7 +51,7 @@ var gnuTarTest = &untarTest{
|
||||||
Gname: "eng",
|
Gname: "eng",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cksums: []string{
|
chksums: []string{
|
||||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||||
},
|
},
|
||||||
|
|
@ -129,7 +131,7 @@ var sparseTarTest = &untarTest{
|
||||||
Devminor: 0,
|
Devminor: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cksums: []string{
|
chksums: []string{
|
||||||
"6f53234398c2449fe67c1812d993012f",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"6f53234398c2449fe67c1812d993012f",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
"6f53234398c2449fe67c1812d993012f",
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
|
|
@ -286,37 +288,101 @@ var untarTests = []*untarTest{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/neg-size.tar",
|
||||||
|
err: ErrHeader,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/issue10968.tar",
|
||||||
|
err: ErrHeader,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/issue11169.tar",
|
||||||
|
// TODO(dsnet): Currently the library does not detect that this file is
|
||||||
|
// malformed. Instead it incorrectly believes that file just ends.
|
||||||
|
// At least the library doesn't crash anymore.
|
||||||
|
// err: ErrHeader,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/issue12435.tar",
|
||||||
|
// TODO(dsnet): Currently the library does not detect that this file is
|
||||||
|
// malformed. Instead, it incorrectly believes that file just ends.
|
||||||
|
// At least the library doesn't crash anymore.
|
||||||
|
// err: ErrHeader,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
testLoop:
|
for i, v := range untarTests {
|
||||||
for i, test := range untarTests {
|
f, err := os.Open(v.file)
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
tr := NewReader(f)
|
|
||||||
for j, header := range test.headers {
|
// Capture all headers and checksums.
|
||||||
hdr, err := tr.Next()
|
var (
|
||||||
if err != nil || hdr == nil {
|
tr = NewReader(f)
|
||||||
t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
|
hdrs []*Header
|
||||||
f.Close()
|
chksums []string
|
||||||
continue testLoop
|
rdbuf = make([]byte, 8)
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
var hdr *Header
|
||||||
|
hdr, err = tr.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil // Expected error
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(*hdr, *header) {
|
hdrs = append(hdrs, hdr)
|
||||||
t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
|
|
||||||
i, j, *hdr, *header)
|
if v.chksums == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h := md5.New()
|
||||||
|
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, hdr := range hdrs {
|
||||||
|
if j >= len(v.headers) {
|
||||||
|
t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v",
|
||||||
|
v.file, i, j, *hdr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*hdr, *v.headers[j]) {
|
||||||
|
t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v",
|
||||||
|
v.file, i, j, *hdr, *v.headers[j])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hdr, err := tr.Next()
|
if len(hdrs) != len(v.headers) {
|
||||||
if err == io.EOF {
|
t.Errorf("file %s, test %d: got %d headers, want %d headers",
|
||||||
continue testLoop
|
v.file, i, len(hdrs), len(v.headers))
|
||||||
}
|
}
|
||||||
if hdr != nil || err != nil {
|
|
||||||
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
for j, sum := range chksums {
|
||||||
|
if j >= len(v.chksums) {
|
||||||
|
t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s",
|
||||||
|
v.file, i, j, sum)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if sum != v.chksums[j] {
|
||||||
|
t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s",
|
||||||
|
v.file, i, j, sum, v.chksums[j])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err != v.err {
|
||||||
|
t.Errorf("file %s, test %d: unexpected error: got %v, want %v",
|
||||||
|
v.file, i, err, v.err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -356,60 +422,6 @@ func TestPartialRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIncrementalRead(t *testing.T) {
|
|
||||||
test := gnuTarTest
|
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
tr := NewReader(f)
|
|
||||||
|
|
||||||
headers := test.headers
|
|
||||||
cksums := test.cksums
|
|
||||||
nread := 0
|
|
||||||
|
|
||||||
// loop over all files
|
|
||||||
for ; ; nread++ {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if hdr == nil || err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the header
|
|
||||||
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
|
||||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
|
||||||
*hdr, headers[nread])
|
|
||||||
}
|
|
||||||
|
|
||||||
// read file contents in little chunks EOF,
|
|
||||||
// checksumming all the way
|
|
||||||
h := md5.New()
|
|
||||||
rdbuf := make([]uint8, 8)
|
|
||||||
for {
|
|
||||||
nr, err := tr.Read(rdbuf)
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Read: unexpected error %v\n", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h.Write(rdbuf[0:nr])
|
|
||||||
}
|
|
||||||
// verify checksum
|
|
||||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
|
||||||
want := cksums[nread]
|
|
||||||
if want != have {
|
|
||||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if nread != len(headers) {
|
|
||||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNonSeekable(t *testing.T) {
|
func TestNonSeekable(t *testing.T) {
|
||||||
test := gnuTarTest
|
test := gnuTarTest
|
||||||
f, err := os.Open(test.file)
|
f, err := os.Open(test.file)
|
||||||
|
|
@ -514,187 +526,232 @@ func TestMergePAX(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSparseEndToEnd(t *testing.T) {
|
|
||||||
test := sparseTarTest
|
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
tr := NewReader(f)
|
|
||||||
|
|
||||||
headers := test.headers
|
|
||||||
cksums := test.cksums
|
|
||||||
nread := 0
|
|
||||||
|
|
||||||
// loop over all files
|
|
||||||
for ; ; nread++ {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if hdr == nil || err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the header
|
|
||||||
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
|
||||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
|
||||||
*hdr, headers[nread])
|
|
||||||
}
|
|
||||||
|
|
||||||
// read and checksum the file data
|
|
||||||
h := md5.New()
|
|
||||||
_, err = io.Copy(h, tr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify checksum
|
|
||||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
|
||||||
want := cksums[nread]
|
|
||||||
if want != have {
|
|
||||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if nread != len(headers) {
|
|
||||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type sparseFileReadTest struct {
|
|
||||||
sparseData []byte
|
|
||||||
sparseMap []sparseEntry
|
|
||||||
realSize int64
|
|
||||||
expected []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
var sparseFileReadTests = []sparseFileReadTest{
|
|
||||||
{
|
|
||||||
sparseData: []byte("abcde"),
|
|
||||||
sparseMap: []sparseEntry{
|
|
||||||
{offset: 0, numBytes: 2},
|
|
||||||
{offset: 5, numBytes: 3},
|
|
||||||
},
|
|
||||||
realSize: 8,
|
|
||||||
expected: []byte("ab\x00\x00\x00cde"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sparseData: []byte("abcde"),
|
|
||||||
sparseMap: []sparseEntry{
|
|
||||||
{offset: 0, numBytes: 2},
|
|
||||||
{offset: 5, numBytes: 3},
|
|
||||||
},
|
|
||||||
realSize: 10,
|
|
||||||
expected: []byte("ab\x00\x00\x00cde\x00\x00"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sparseData: []byte("abcde"),
|
|
||||||
sparseMap: []sparseEntry{
|
|
||||||
{offset: 1, numBytes: 3},
|
|
||||||
{offset: 6, numBytes: 2},
|
|
||||||
},
|
|
||||||
realSize: 8,
|
|
||||||
expected: []byte("\x00abc\x00\x00de"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sparseData: []byte("abcde"),
|
|
||||||
sparseMap: []sparseEntry{
|
|
||||||
{offset: 1, numBytes: 3},
|
|
||||||
{offset: 6, numBytes: 2},
|
|
||||||
},
|
|
||||||
realSize: 10,
|
|
||||||
expected: []byte("\x00abc\x00\x00de\x00\x00"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sparseData: []byte(""),
|
|
||||||
sparseMap: nil,
|
|
||||||
realSize: 2,
|
|
||||||
expected: []byte("\x00\x00"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSparseFileReader(t *testing.T) {
|
func TestSparseFileReader(t *testing.T) {
|
||||||
for i, test := range sparseFileReadTests {
|
var vectors = []struct {
|
||||||
r := bytes.NewReader(test.sparseData)
|
realSize int64 // Real size of the output file
|
||||||
nb := int64(r.Len())
|
sparseMap []sparseEntry // Input sparse map
|
||||||
sfr := &sparseFileReader{
|
sparseData string // Input compact data
|
||||||
rfr: ®FileReader{r: r, nb: nb},
|
expected string // Expected output data
|
||||||
sp: test.sparseMap,
|
err error // Expected error outcome
|
||||||
pos: 0,
|
}{{
|
||||||
tot: test.realSize,
|
realSize: 8,
|
||||||
}
|
sparseMap: []sparseEntry{
|
||||||
if sfr.numBytes() != nb {
|
{offset: 0, numBytes: 2},
|
||||||
t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
|
{offset: 5, numBytes: 3},
|
||||||
}
|
},
|
||||||
buf, err := ioutil.ReadAll(sfr)
|
sparseData: "abcde",
|
||||||
|
expected: "ab\x00\x00\x00cde",
|
||||||
|
}, {
|
||||||
|
realSize: 10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 0, numBytes: 2},
|
||||||
|
{offset: 5, numBytes: 3},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
expected: "ab\x00\x00\x00cde\x00\x00",
|
||||||
|
}, {
|
||||||
|
realSize: 8,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
expected: "\x00abc\x00\x00de",
|
||||||
|
}, {
|
||||||
|
realSize: 8,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 0},
|
||||||
|
{offset: 6, numBytes: 0},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
expected: "\x00abc\x00\x00de",
|
||||||
|
}, {
|
||||||
|
realSize: 10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
expected: "\x00abc\x00\x00de\x00\x00",
|
||||||
|
}, {
|
||||||
|
realSize: 10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
{offset: 8, numBytes: 0},
|
||||||
|
{offset: 8, numBytes: 0},
|
||||||
|
{offset: 8, numBytes: 0},
|
||||||
|
{offset: 8, numBytes: 0},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
expected: "\x00abc\x00\x00de\x00\x00",
|
||||||
|
}, {
|
||||||
|
realSize: 2,
|
||||||
|
sparseMap: []sparseEntry{},
|
||||||
|
sparseData: "",
|
||||||
|
expected: "\x00\x00",
|
||||||
|
}, {
|
||||||
|
realSize: -2,
|
||||||
|
sparseMap: []sparseEntry{},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
realSize: -10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
realSize: 10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 5},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
realSize: 35,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 5},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: io.ErrUnexpectedEOF,
|
||||||
|
}, {
|
||||||
|
realSize: 35,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: -5},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
realSize: 35,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: math.MaxInt64, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: -5},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
realSize: 10,
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 2, numBytes: 2},
|
||||||
|
},
|
||||||
|
sparseData: "abcde",
|
||||||
|
err: ErrHeader,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for i, v := range vectors {
|
||||||
|
r := bytes.NewReader([]byte(v.sparseData))
|
||||||
|
rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))}
|
||||||
|
|
||||||
|
var sfr *sparseFileReader
|
||||||
|
var err error
|
||||||
|
var buf []byte
|
||||||
|
|
||||||
|
sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
goto fail
|
||||||
}
|
}
|
||||||
if e := test.expected; !bytes.Equal(buf, e) {
|
if sfr.numBytes() != int64(len(v.sparseData)) {
|
||||||
t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
|
t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData))
|
||||||
|
}
|
||||||
|
buf, err = ioutil.ReadAll(sfr)
|
||||||
|
if err != nil {
|
||||||
|
goto fail
|
||||||
|
}
|
||||||
|
if string(buf) != v.expected {
|
||||||
|
t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected)
|
||||||
}
|
}
|
||||||
if sfr.numBytes() != 0 {
|
if sfr.numBytes() != 0 {
|
||||||
t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
|
t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSparseIncrementalRead(t *testing.T) {
|
fail:
|
||||||
sparseMap := []sparseEntry{{10, 2}}
|
if err != v.err {
|
||||||
sparseData := []byte("Go")
|
t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
|
||||||
expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
|
|
||||||
|
|
||||||
r := bytes.NewReader(sparseData)
|
|
||||||
nb := int64(r.Len())
|
|
||||||
sfr := &sparseFileReader{
|
|
||||||
rfr: ®FileReader{r: r, nb: nb},
|
|
||||||
sp: sparseMap,
|
|
||||||
pos: 0,
|
|
||||||
tot: int64(len(expected)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll read the data 6 bytes at a time, with a hole of size 10 at
|
|
||||||
// the beginning and one of size 8 at the end.
|
|
||||||
var outputBuf bytes.Buffer
|
|
||||||
buf := make([]byte, 6)
|
|
||||||
for {
|
|
||||||
n, err := sfr.Read(buf)
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Read: unexpected error %v\n", err)
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
_, err := outputBuf.Write(buf[:n])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Write: unexpected error %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
got := outputBuf.String()
|
|
||||||
if got != expected {
|
|
||||||
t.Errorf("Contents = %v, want %v", got, expected)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadGNUSparseMap0x1(t *testing.T) {
|
func TestReadGNUSparseMap0x1(t *testing.T) {
|
||||||
headers := map[string]string{
|
const (
|
||||||
paxGNUSparseNumBlocks: "4",
|
maxUint = ^uint(0)
|
||||||
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
maxInt = int(maxUint >> 1)
|
||||||
}
|
)
|
||||||
expected := []sparseEntry{
|
var (
|
||||||
{offset: 0, numBytes: 5},
|
big1 = fmt.Sprintf("%d", int64(maxInt))
|
||||||
{offset: 10, numBytes: 5},
|
big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1)
|
||||||
{offset: 20, numBytes: 5},
|
big3 = fmt.Sprintf("%d", (int64(maxInt) / 3))
|
||||||
{offset: 30, numBytes: 5},
|
)
|
||||||
}
|
|
||||||
|
|
||||||
sp, err := readGNUSparseMap0x1(headers)
|
var vectors = []struct {
|
||||||
if err != nil {
|
extHdrs map[string]string // Input data
|
||||||
t.Errorf("Unexpected error: %v", err)
|
sparseMap []sparseEntry // Expected sparse entries to be outputted
|
||||||
}
|
err error // Expected errors that may be raised
|
||||||
if !reflect.DeepEqual(sp, expected) {
|
}{{
|
||||||
t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
|
extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: big1,
|
||||||
|
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: big2,
|
||||||
|
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: big3,
|
||||||
|
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: "4",
|
||||||
|
paxGNUSparseMap: "0.5,5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: "4",
|
||||||
|
paxGNUSparseMap: "0,5.5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: "4",
|
||||||
|
paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
err: ErrHeader,
|
||||||
|
}, {
|
||||||
|
extHdrs: map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: "4",
|
||||||
|
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||||
|
},
|
||||||
|
sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for i, v := range vectors {
|
||||||
|
sp, err := readGNUSparseMap0x1(v.extHdrs)
|
||||||
|
if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) {
|
||||||
|
t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap)
|
||||||
|
}
|
||||||
|
if err != v.err {
|
||||||
|
t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -746,53 +803,3 @@ func TestUninitializedRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Negative header size should not cause panic.
|
|
||||||
// Issues 10959 and 10960.
|
|
||||||
func TestNegativeHdrSize(t *testing.T) {
|
|
||||||
f, err := os.Open("testdata/neg-size.tar")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
r := NewReader(f)
|
|
||||||
_, err = r.Next()
|
|
||||||
if err != ErrHeader {
|
|
||||||
t.Error("want ErrHeader, got", err)
|
|
||||||
}
|
|
||||||
io.Copy(ioutil.Discard, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This used to hang in (*sparseFileReader).readHole due to missing
|
|
||||||
// verification of sparse offsets against file size.
|
|
||||||
func TestIssue10968(t *testing.T) {
|
|
||||||
f, err := os.Open("testdata/issue10968.tar")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
r := NewReader(f)
|
|
||||||
_, err = r.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = io.Copy(ioutil.Discard, r)
|
|
||||||
if err != io.ErrUnexpectedEOF {
|
|
||||||
t.Fatalf("expected %q, got %q", io.ErrUnexpectedEOF, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do not panic if there are errors in header blocks after the pax header.
|
|
||||||
// Issue 11169
|
|
||||||
func TestIssue11169(t *testing.T) {
|
|
||||||
f, err := os.Open("testdata/issue11169.tar")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
r := NewReader(f)
|
|
||||||
_, err = r.Next()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Unexpected success")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
BIN
src/archive/tar/testdata/issue12435.tar
vendored
Normal file
BIN
src/archive/tar/testdata/issue12435.tar
vendored
Normal file
Binary file not shown.
|
|
@ -23,7 +23,6 @@ var (
|
||||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||||
errNameTooLong = errors.New("archive/tar: name too long")
|
|
||||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -215,26 +214,14 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||||
_, paxPathUsed := paxHeaders[paxPath]
|
_, paxPathUsed := paxHeaders[paxPath]
|
||||||
// try to use a ustar header when only the name is too long
|
// try to use a ustar header when only the name is too long
|
||||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||||
suffix := hdr.Name
|
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||||
prefix := ""
|
if ok {
|
||||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
// Since we can encode in USTAR format, disable PAX header.
|
||||||
var err error
|
delete(paxHeaders, paxPath)
|
||||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
|
||||||
if err == nil {
|
|
||||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
|
||||||
|
|
||||||
// remove the path field from the pax header. this will suppress the pax header
|
// Update the path fields
|
||||||
delete(paxHeaders, paxPath)
|
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||||
|
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||||
// update the path fields
|
|
||||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
|
||||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
|
||||||
|
|
||||||
// Use the ustar magic if we used ustar long names.
|
|
||||||
if len(prefix) > 0 && !tw.usedBinary {
|
|
||||||
copy(header[257:265], []byte("ustar\x00"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -270,28 +257,25 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||||
return tw.err
|
return tw.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||||
// name must be < 256 characters. errNameTooLong is returned
|
// If the path is not splittable, then it will return ("", "", false).
|
||||||
// if hdr.Name can't be split. The splitting heuristic
|
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||||
// is compatible with gnu tar.
|
|
||||||
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
|
||||||
length := len(name)
|
length := len(name)
|
||||||
if length > fileNamePrefixSize+1 {
|
if length <= fileNameSize || !isASCII(name) {
|
||||||
|
return "", "", false
|
||||||
|
} else if length > fileNamePrefixSize+1 {
|
||||||
length = fileNamePrefixSize + 1
|
length = fileNamePrefixSize + 1
|
||||||
} else if name[length-1] == '/' {
|
} else if name[length-1] == '/' {
|
||||||
length--
|
length--
|
||||||
}
|
}
|
||||||
|
|
||||||
i := strings.LastIndex(name[:length], "/")
|
i := strings.LastIndex(name[:length], "/")
|
||||||
// nlen contains the resulting length in the name field.
|
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||||
// plen contains the resulting length in the prefix field.
|
plen := i // plen is length of prefix
|
||||||
nlen := len(name) - i - 1
|
|
||||||
plen := i
|
|
||||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||||
err = errNameTooLong
|
return "", "", false
|
||||||
return
|
|
||||||
}
|
}
|
||||||
prefix, suffix = name[:i], name[i+1:]
|
return name[:i], name[i+1:], true
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// writePaxHeader writes an extended pax header to the
|
// writePaxHeader writes an extended pax header to the
|
||||||
|
|
|
||||||
|
|
@ -544,3 +544,37 @@ func TestWriteAfterClose(t *testing.T) {
|
||||||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSplitUSTARPath(t *testing.T) {
|
||||||
|
var sr = strings.Repeat
|
||||||
|
|
||||||
|
var vectors = []struct {
|
||||||
|
input string // Input path
|
||||||
|
prefix string // Expected output prefix
|
||||||
|
suffix string // Expected output suffix
|
||||||
|
ok bool // Split success?
|
||||||
|
}{
|
||||||
|
{"", "", "", false},
|
||||||
|
{"abc", "", "", false},
|
||||||
|
{"用戶名", "", "", false},
|
||||||
|
{sr("a", fileNameSize), "", "", false},
|
||||||
|
{sr("a", fileNameSize) + "/", "", "", false},
|
||||||
|
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||||
|
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||||
|
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||||
|
{sr("a", fileNameSize+1), "", "", false},
|
||||||
|
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||||
|
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||||
|
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||||
|
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||||
|
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range vectors {
|
||||||
|
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||||
|
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||||
|
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||||
|
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -376,14 +376,16 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error)
|
||||||
}
|
}
|
||||||
d.comment = string(b[:l])
|
d.comment = string(b[:l])
|
||||||
|
|
||||||
p, err := findDirectory64End(r, directoryEndOffset)
|
// These values mean that the file can be a zip64 file
|
||||||
if err == nil && p >= 0 {
|
if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
|
||||||
err = readDirectory64End(r, p, d)
|
p, err := findDirectory64End(r, directoryEndOffset)
|
||||||
|
if err == nil && p >= 0 {
|
||||||
|
err = readDirectory64End(r, p, d)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure directoryOffset points to somewhere in our file.
|
// Make sure directoryOffset points to somewhere in our file.
|
||||||
if o := int64(d.directoryOffset); o < 0 || o >= size {
|
if o := int64(d.directoryOffset); o < 0 || o >= size {
|
||||||
return nil, ErrFormat
|
return nil, ErrFormat
|
||||||
|
|
@ -407,8 +409,13 @@ func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error)
|
||||||
if sig := b.uint32(); sig != directory64LocSignature {
|
if sig := b.uint32(); sig != directory64LocSignature {
|
||||||
return -1, nil
|
return -1, nil
|
||||||
}
|
}
|
||||||
b = b[4:] // skip number of the disk with the start of the zip64 end of central directory
|
if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
|
||||||
p := b.uint64() // relative offset of the zip64 end of central directory record
|
return -1, nil // the file is not a valid zip64-file
|
||||||
|
}
|
||||||
|
p := b.uint64() // relative offset of the zip64 end of central directory record
|
||||||
|
if b.uint32() != 1 { // total number of disks
|
||||||
|
return -1, nil // the file is not a valid zip64-file
|
||||||
|
}
|
||||||
return int64(p), nil
|
return int64(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -605,3 +605,40 @@ func TestIssue11146(t *testing.T) {
|
||||||
}
|
}
|
||||||
r.Close()
|
r.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify we do not treat non-zip64 archives as zip64
|
||||||
|
func TestIssue12449(t *testing.T) {
|
||||||
|
data := []byte{
|
||||||
|
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
|
||||||
|
0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
|
||||||
|
0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
|
||||||
|
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
|
||||||
|
0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
|
||||||
|
0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
|
||||||
|
0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
|
||||||
|
0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
|
||||||
|
0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
|
||||||
|
0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
|
||||||
|
0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
|
||||||
|
0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
|
||||||
|
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
|
||||||
|
0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
|
||||||
|
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
|
||||||
|
0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
|
||||||
|
0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
|
||||||
|
0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
|
||||||
|
0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
|
||||||
|
0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
|
||||||
|
0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
|
||||||
|
0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
// Read in the archive.
|
||||||
|
_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error reading the archive: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -80,3 +80,32 @@ func ExampleScanner_custom() {
|
||||||
// 5678
|
// 5678
|
||||||
// Invalid input: strconv.ParseInt: parsing "1234567901234567890": value out of range
|
// Invalid input: strconv.ParseInt: parsing "1234567901234567890": value out of range
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use a Scanner with a custom split function to parse a comma-separated
|
||||||
|
// list with an empty final value.
|
||||||
|
func ExampleScanner_emptyFinalToken() {
|
||||||
|
// Comma-separated list; last entry is empty.
|
||||||
|
const input = "1,2,3,4,"
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(input))
|
||||||
|
// Define a split function that separates on commas.
|
||||||
|
onComma := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
if data[i] == ',' {
|
||||||
|
return i + 1, data[:i], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// There is one final token to be delivered, which may be the empty string.
|
||||||
|
// Returning bufio.ErrFinalToken here tells Scan there are no more tokens after this
|
||||||
|
// but does not trigger an error to be returned from Scan itself.
|
||||||
|
return 0, data, bufio.ErrFinalToken
|
||||||
|
}
|
||||||
|
scanner.Split(onComma)
|
||||||
|
// Scan.
|
||||||
|
for scanner.Scan() {
|
||||||
|
fmt.Printf("%q ", scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "reading input:", err)
|
||||||
|
}
|
||||||
|
// Output: "1" "2" "3" "4" ""
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,8 @@ type Scanner struct {
|
||||||
end int // End of data in buf.
|
end int // End of data in buf.
|
||||||
err error // Sticky error.
|
err error // Sticky error.
|
||||||
empties int // Count of successive empty tokens.
|
empties int // Count of successive empty tokens.
|
||||||
|
scanCalled bool // Scan has been called; buffer is in use.
|
||||||
|
done bool // Scan has finished.
|
||||||
}
|
}
|
||||||
|
|
||||||
// SplitFunc is the signature of the split function used to tokenize the
|
// SplitFunc is the signature of the split function used to tokenize the
|
||||||
|
|
@ -65,10 +67,13 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// MaxScanTokenSize is the maximum size used to buffer a token.
|
// MaxScanTokenSize is the maximum size used to buffer a token
|
||||||
|
// unless the user provides an explicit buffer with Scan.Buffer.
|
||||||
// The actual maximum token size may be smaller as the buffer
|
// The actual maximum token size may be smaller as the buffer
|
||||||
// may need to include, for instance, a newline.
|
// may need to include, for instance, a newline.
|
||||||
MaxScanTokenSize = 64 * 1024
|
MaxScanTokenSize = 64 * 1024
|
||||||
|
|
||||||
|
startBufSize = 4096 // Size of initial allocation for buffer.
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewScanner returns a new Scanner to read from r.
|
// NewScanner returns a new Scanner to read from r.
|
||||||
|
|
@ -78,7 +83,6 @@ func NewScanner(r io.Reader) *Scanner {
|
||||||
r: r,
|
r: r,
|
||||||
split: ScanLines,
|
split: ScanLines,
|
||||||
maxTokenSize: MaxScanTokenSize,
|
maxTokenSize: MaxScanTokenSize,
|
||||||
buf: make([]byte, 4096), // Plausible starting size; needn't be large.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -103,6 +107,16 @@ func (s *Scanner) Text() string {
|
||||||
return string(s.token)
|
return string(s.token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrFinalToken is a special sentinel error value. It is intended to be
|
||||||
|
// returned by a Split function to indicate that the token being delivered
|
||||||
|
// with the error is the last token and scanning should stop after this one.
|
||||||
|
// After ErrFinalToken is received by Scan, scanning stops with no error.
|
||||||
|
// The value is useful to stop processing early or when it is necessary to
|
||||||
|
// deliver a final empty token. One could achieve the same behavior
|
||||||
|
// with a custom error value but providing one here is tidier.
|
||||||
|
// See the emptyFinalToken example for a use of this value.
|
||||||
|
var ErrFinalToken = errors.New("final token")
|
||||||
|
|
||||||
// Scan advances the Scanner to the next token, which will then be
|
// Scan advances the Scanner to the next token, which will then be
|
||||||
// available through the Bytes or Text method. It returns false when the
|
// available through the Bytes or Text method. It returns false when the
|
||||||
// scan stops, either by reaching the end of the input or an error.
|
// scan stops, either by reaching the end of the input or an error.
|
||||||
|
|
@ -112,6 +126,10 @@ func (s *Scanner) Text() string {
|
||||||
// Scan panics if the split function returns 100 empty tokens without
|
// Scan panics if the split function returns 100 empty tokens without
|
||||||
// advancing the input. This is a common error mode for scanners.
|
// advancing the input. This is a common error mode for scanners.
|
||||||
func (s *Scanner) Scan() bool {
|
func (s *Scanner) Scan() bool {
|
||||||
|
if s.done {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s.scanCalled = true
|
||||||
// Loop until we have a token.
|
// Loop until we have a token.
|
||||||
for {
|
for {
|
||||||
// See if we can get a token with what we already have.
|
// See if we can get a token with what we already have.
|
||||||
|
|
@ -120,6 +138,11 @@ func (s *Scanner) Scan() bool {
|
||||||
if s.end > s.start || s.err != nil {
|
if s.end > s.start || s.err != nil {
|
||||||
advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
|
advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == ErrFinalToken {
|
||||||
|
s.token = token
|
||||||
|
s.done = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
s.setErr(err)
|
s.setErr(err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -158,11 +181,16 @@ func (s *Scanner) Scan() bool {
|
||||||
}
|
}
|
||||||
// Is the buffer full? If so, resize.
|
// Is the buffer full? If so, resize.
|
||||||
if s.end == len(s.buf) {
|
if s.end == len(s.buf) {
|
||||||
if len(s.buf) >= s.maxTokenSize {
|
// Guarantee no overflow in the multiplication below.
|
||||||
|
const maxInt = int(^uint(0) >> 1)
|
||||||
|
if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
|
||||||
s.setErr(ErrTooLong)
|
s.setErr(ErrTooLong)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
newSize := len(s.buf) * 2
|
newSize := len(s.buf) * 2
|
||||||
|
if newSize == 0 {
|
||||||
|
newSize = startBufSize
|
||||||
|
}
|
||||||
if newSize > s.maxTokenSize {
|
if newSize > s.maxTokenSize {
|
||||||
newSize = s.maxTokenSize
|
newSize = s.maxTokenSize
|
||||||
}
|
}
|
||||||
|
|
@ -217,9 +245,31 @@ func (s *Scanner) setErr(err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split sets the split function for the Scanner. If called, it must be
|
// Buffer sets the initial buffer to use when scanning and the maximum
|
||||||
// called before Scan. The default split function is ScanLines.
|
// size of buffer that may be allocated during scanning. The maximum
|
||||||
|
// token size is the larger of max and cap(buf). If max <= cap(buf),
|
||||||
|
// Scan will use this buffer only and do no allocation.
|
||||||
|
//
|
||||||
|
// By default, Scan uses an internal buffer and sets the
|
||||||
|
// maximum token size to MaxScanTokenSize.
|
||||||
|
//
|
||||||
|
// Buffer panics if it is called after scanning has started.
|
||||||
|
func (s *Scanner) Buffer(buf []byte, max int) {
|
||||||
|
if s.scanCalled {
|
||||||
|
panic("Buffer called after Scan")
|
||||||
|
}
|
||||||
|
s.buf = buf[0:cap(buf)]
|
||||||
|
s.maxTokenSize = max
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split sets the split function for the Scanner.
|
||||||
|
// The default split function is ScanLines.
|
||||||
|
//
|
||||||
|
// Split panics if it is called after scanning has started.
|
||||||
func (s *Scanner) Split(split SplitFunc) {
|
func (s *Scanner) Split(split SplitFunc) {
|
||||||
|
if s.scanCalled {
|
||||||
|
panic("Split called after Scan")
|
||||||
|
}
|
||||||
s.split = split
|
s.split = split
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -429,33 +429,37 @@ func commaSplit(data []byte, atEOF bool) (advance int, token []byte, err error)
|
||||||
return i + 1, data[:i], nil
|
return i + 1, data[:i], nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !atEOF {
|
return 0, data, ErrFinalToken
|
||||||
return 0, nil, nil
|
|
||||||
}
|
|
||||||
return 0, data, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyTokens(t *testing.T) {
|
func testEmptyTokens(t *testing.T, text string, values []string) {
|
||||||
s := NewScanner(strings.NewReader("1,2,3,"))
|
s := NewScanner(strings.NewReader(text))
|
||||||
values := []string{"1", "2", "3", ""}
|
|
||||||
s.Split(commaSplit)
|
s.Split(commaSplit)
|
||||||
var i int
|
var i int
|
||||||
for i = 0; i < len(values); i++ {
|
for i = 0; s.Scan(); i++ {
|
||||||
if !s.Scan() {
|
if i >= len(values) {
|
||||||
break
|
t.Fatalf("got %d fields, expected %d", i+1, len(values))
|
||||||
}
|
}
|
||||||
if s.Text() != values[i] {
|
if s.Text() != values[i] {
|
||||||
t.Errorf("%d: expected %q got %q", i, values[i], s.Text())
|
t.Errorf("%d: expected %q got %q", i, values[i], s.Text())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i != len(values) {
|
if i != len(values) {
|
||||||
t.Errorf("got %d fields, expected %d", i, len(values))
|
t.Fatalf("got %d fields, expected %d", i, len(values))
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEmptyTokens(t *testing.T) {
|
||||||
|
testEmptyTokens(t, "1,2,3,", []string{"1", "2", "3", ""})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithNoEmptyTokens(t *testing.T) {
|
||||||
|
testEmptyTokens(t, "1,2,3", []string{"1", "2", "3"})
|
||||||
|
}
|
||||||
|
|
||||||
func loopAtEOFSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
func loopAtEOFSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||||
if len(data) > 0 {
|
if len(data) > 0 {
|
||||||
return 1, data[:1], nil
|
return 1, data[:1], nil
|
||||||
|
|
@ -522,3 +526,19 @@ func TestEmptyLinesOK(t *testing.T) {
|
||||||
t.Fatalf("stopped with %d left to process", c)
|
t.Fatalf("stopped with %d left to process", c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure we can read a huge token if a big enough buffer is provided.
|
||||||
|
func TestHugeBuffer(t *testing.T) {
|
||||||
|
text := strings.Repeat("x", 2*MaxScanTokenSize)
|
||||||
|
s := NewScanner(strings.NewReader(text + "\n"))
|
||||||
|
s.Buffer(make([]byte, 100), 3*MaxScanTokenSize)
|
||||||
|
for s.Scan() {
|
||||||
|
token := s.Text()
|
||||||
|
if token != text {
|
||||||
|
t.Errorf("scan got incorrect token of length %d", len(token))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.Err() != nil {
|
||||||
|
t.Fatal("after scan:", s.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -428,10 +428,15 @@ func (w *Walker) Import(name string) (*types.Package, error) {
|
||||||
}
|
}
|
||||||
w.imported[name] = &importing
|
w.imported[name] = &importing
|
||||||
|
|
||||||
|
root := w.root
|
||||||
|
if strings.HasPrefix(name, "golang.org/x/") {
|
||||||
|
root = filepath.Join(root, "vendor")
|
||||||
|
}
|
||||||
|
|
||||||
// Determine package files.
|
// Determine package files.
|
||||||
dir := filepath.Join(w.root, filepath.FromSlash(name))
|
dir := filepath.Join(root, filepath.FromSlash(name))
|
||||||
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
|
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
|
||||||
log.Fatalf("no source in tree for package %q", pkg)
|
log.Fatalf("no source in tree for import %q: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
context := w.context
|
context := w.context
|
||||||
|
|
|
||||||
|
|
@ -252,7 +252,9 @@ func archArm64() *Arch {
|
||||||
register["EQ"] = arm64.COND_EQ
|
register["EQ"] = arm64.COND_EQ
|
||||||
register["NE"] = arm64.COND_NE
|
register["NE"] = arm64.COND_NE
|
||||||
register["HS"] = arm64.COND_HS
|
register["HS"] = arm64.COND_HS
|
||||||
|
register["CS"] = arm64.COND_HS
|
||||||
register["LO"] = arm64.COND_LO
|
register["LO"] = arm64.COND_LO
|
||||||
|
register["CC"] = arm64.COND_LO
|
||||||
register["MI"] = arm64.COND_MI
|
register["MI"] = arm64.COND_MI
|
||||||
register["PL"] = arm64.COND_PL
|
register["PL"] = arm64.COND_PL
|
||||||
register["VS"] = arm64.COND_VS
|
register["VS"] = arm64.COND_VS
|
||||||
|
|
|
||||||
|
|
@ -27,15 +27,18 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
||||||
case '5':
|
case '5':
|
||||||
if !arch.ARMConditionCodes(prog, cond) {
|
if !arch.ARMConditionCodes(prog, cond) {
|
||||||
p.errorf("unrecognized condition code .%q", cond)
|
p.errorf("unrecognized condition code .%q", cond)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
case '7':
|
case '7':
|
||||||
if !arch.ARM64Suffix(prog, cond) {
|
if !arch.ARM64Suffix(prog, cond) {
|
||||||
p.errorf("unrecognized suffix .%q", cond)
|
p.errorf("unrecognized suffix .%q", cond)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
p.errorf("unrecognized suffix .%q", cond)
|
p.errorf("unrecognized suffix .%q", cond)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.firstProg == nil {
|
if p.firstProg == nil {
|
||||||
|
|
@ -49,6 +52,7 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
||||||
for _, label := range p.pendingLabels {
|
for _, label := range p.pendingLabels {
|
||||||
if p.labels[label] != nil {
|
if p.labels[label] != nil {
|
||||||
p.errorf("label %q multiply defined", label)
|
p.errorf("label %q multiply defined", label)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
p.labels[label] = prog
|
p.labels[label] = prog
|
||||||
}
|
}
|
||||||
|
|
@ -63,14 +67,17 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateSymbol checks that addr represents a valid name for a pseudo-op.
|
// validSymbol checks that addr represents a valid name for a pseudo-op.
|
||||||
func (p *Parser) validateSymbol(pseudo string, addr *obj.Addr, offsetOk bool) {
|
func (p *Parser) validSymbol(pseudo string, addr *obj.Addr, offsetOk bool) bool {
|
||||||
if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
|
if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
|
||||||
p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr))
|
p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr))
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
if !offsetOk && addr.Offset != 0 {
|
if !offsetOk && addr.Offset != 0 {
|
||||||
p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr))
|
p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr))
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// evalInteger evaluates an integer constant for a pseudo-op.
|
// evalInteger evaluates an integer constant for a pseudo-op.
|
||||||
|
|
@ -79,11 +86,13 @@ func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 {
|
||||||
return p.getConstantPseudo(pseudo, &addr)
|
return p.getConstantPseudo(pseudo, &addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateImmediate checks that addr represents an immediate constant.
|
// validImmediate checks that addr represents an immediate constant.
|
||||||
func (p *Parser) validateImmediate(pseudo string, addr *obj.Addr) {
|
func (p *Parser) validImmediate(pseudo string, addr *obj.Addr) bool {
|
||||||
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
|
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
|
||||||
p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
|
p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// asmText assembles a TEXT pseudo-op.
|
// asmText assembles a TEXT pseudo-op.
|
||||||
|
|
@ -102,7 +111,9 @@ func (p *Parser) asmText(word string, operands [][]lex.Token) {
|
||||||
// Operand 0 is the symbol name in the form foo(SB).
|
// Operand 0 is the symbol name in the form foo(SB).
|
||||||
// That means symbol plus indirect on SB and no offset.
|
// That means symbol plus indirect on SB and no offset.
|
||||||
nameAddr := p.address(operands[0])
|
nameAddr := p.address(operands[0])
|
||||||
p.validateSymbol("TEXT", &nameAddr, false)
|
if !p.validSymbol("TEXT", &nameAddr, false) {
|
||||||
|
return
|
||||||
|
}
|
||||||
name := symbolName(&nameAddr)
|
name := symbolName(&nameAddr)
|
||||||
next := 1
|
next := 1
|
||||||
|
|
||||||
|
|
@ -144,6 +155,7 @@ func (p *Parser) asmText(word string, operands [][]lex.Token) {
|
||||||
// There is an argument size. It must be a minus sign followed by a non-negative integer literal.
|
// There is an argument size. It must be a minus sign followed by a non-negative integer literal.
|
||||||
if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
|
if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
|
||||||
p.errorf("TEXT %s: argument size must be of form -integer", name)
|
p.errorf("TEXT %s: argument size must be of form -integer", name)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
argSize = p.positiveAtoi(op[1].String())
|
argSize = p.positiveAtoi(op[1].String())
|
||||||
}
|
}
|
||||||
|
|
@ -185,7 +197,9 @@ func (p *Parser) asmData(word string, operands [][]lex.Token) {
|
||||||
scale := p.parseScale(op[n-1].String())
|
scale := p.parseScale(op[n-1].String())
|
||||||
op = op[:n-2]
|
op = op[:n-2]
|
||||||
nameAddr := p.address(op)
|
nameAddr := p.address(op)
|
||||||
p.validateSymbol("DATA", &nameAddr, true)
|
if !p.validSymbol("DATA", &nameAddr, true) {
|
||||||
|
return
|
||||||
|
}
|
||||||
name := symbolName(&nameAddr)
|
name := symbolName(&nameAddr)
|
||||||
|
|
||||||
// Operand 1 is an immediate constant or address.
|
// Operand 1 is an immediate constant or address.
|
||||||
|
|
@ -195,11 +209,13 @@ func (p *Parser) asmData(word string, operands [][]lex.Token) {
|
||||||
// OK
|
// OK
|
||||||
default:
|
default:
|
||||||
p.errorf("DATA value must be an immediate constant or address")
|
p.errorf("DATA value must be an immediate constant or address")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// The addresses must not overlap. Easiest test: require monotonicity.
|
// The addresses must not overlap. Easiest test: require monotonicity.
|
||||||
if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
|
if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
|
||||||
p.errorf("overlapping DATA entry for %s", name)
|
p.errorf("overlapping DATA entry for %s", name)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
p.dataAddr[name] = nameAddr.Offset + int64(scale)
|
p.dataAddr[name] = nameAddr.Offset + int64(scale)
|
||||||
|
|
||||||
|
|
@ -228,7 +244,9 @@ func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
|
||||||
|
|
||||||
// Operand 0 has the general form foo<>+0x04(SB).
|
// Operand 0 has the general form foo<>+0x04(SB).
|
||||||
nameAddr := p.address(operands[0])
|
nameAddr := p.address(operands[0])
|
||||||
p.validateSymbol("GLOBL", &nameAddr, false)
|
if !p.validSymbol("GLOBL", &nameAddr, false) {
|
||||||
|
return
|
||||||
|
}
|
||||||
next := 1
|
next := 1
|
||||||
|
|
||||||
// Next operand is the optional flag, a literal integer.
|
// Next operand is the optional flag, a literal integer.
|
||||||
|
|
@ -240,7 +258,9 @@ func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
|
||||||
|
|
||||||
// Final operand is an immediate constant.
|
// Final operand is an immediate constant.
|
||||||
addr := p.address(operands[next])
|
addr := p.address(operands[next])
|
||||||
p.validateImmediate("GLOBL", &addr)
|
if !p.validImmediate("GLOBL", &addr) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// log.Printf("GLOBL %s %d, $%d", name, flag, size)
|
// log.Printf("GLOBL %s %d, $%d", name, flag, size)
|
||||||
prog := &obj.Prog{
|
prog := &obj.Prog{
|
||||||
|
|
@ -266,11 +286,15 @@ func (p *Parser) asmPCData(word string, operands [][]lex.Token) {
|
||||||
|
|
||||||
// Operand 0 must be an immediate constant.
|
// Operand 0 must be an immediate constant.
|
||||||
key := p.address(operands[0])
|
key := p.address(operands[0])
|
||||||
p.validateImmediate("PCDATA", &key)
|
if !p.validImmediate("PCDATA", &key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Operand 1 must be an immediate constant.
|
// Operand 1 must be an immediate constant.
|
||||||
value := p.address(operands[1])
|
value := p.address(operands[1])
|
||||||
p.validateImmediate("PCDATA", &value)
|
if !p.validImmediate("PCDATA", &value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
|
// log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
|
||||||
prog := &obj.Prog{
|
prog := &obj.Prog{
|
||||||
|
|
@ -293,11 +317,15 @@ func (p *Parser) asmFuncData(word string, operands [][]lex.Token) {
|
||||||
|
|
||||||
// Operand 0 must be an immediate constant.
|
// Operand 0 must be an immediate constant.
|
||||||
valueAddr := p.address(operands[0])
|
valueAddr := p.address(operands[0])
|
||||||
p.validateImmediate("FUNCDATA", &valueAddr)
|
if !p.validImmediate("FUNCDATA", &valueAddr) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Operand 1 is a symbol name in the form foo(SB).
|
// Operand 1 is a symbol name in the form foo(SB).
|
||||||
nameAddr := p.address(operands[1])
|
nameAddr := p.address(operands[1])
|
||||||
p.validateSymbol("FUNCDATA", &nameAddr, true)
|
if !p.validSymbol("FUNCDATA", &nameAddr, true) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
prog := &obj.Prog{
|
prog := &obj.Prog{
|
||||||
Ctxt: p.ctxt,
|
Ctxt: p.ctxt,
|
||||||
|
|
@ -340,6 +368,7 @@ func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
|
||||||
reg, ok := p.arch.RegisterNumber("R", int16(reg))
|
reg, ok := p.arch.RegisterNumber("R", int16(reg))
|
||||||
if !ok {
|
if !ok {
|
||||||
p.errorf("bad register number %d", reg)
|
p.errorf("bad register number %d", reg)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
prog.Reg = reg
|
prog.Reg = reg
|
||||||
break
|
break
|
||||||
|
|
@ -390,6 +419,7 @@ func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
|
||||||
prog.To = a[0]
|
prog.To = a[0]
|
||||||
default:
|
default:
|
||||||
p.errorf("cannot assemble jump %+v", target)
|
p.errorf("cannot assemble jump %+v", target)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.append(prog, cond, true)
|
p.append(prog, cond, true)
|
||||||
|
|
@ -400,9 +430,9 @@ func (p *Parser) patch() {
|
||||||
targetProg := p.labels[patch.label]
|
targetProg := p.labels[patch.label]
|
||||||
if targetProg == nil {
|
if targetProg == nil {
|
||||||
p.errorf("undefined label %s", patch.label)
|
p.errorf("undefined label %s", patch.label)
|
||||||
} else {
|
return
|
||||||
p.branch(patch.prog, targetProg)
|
|
||||||
}
|
}
|
||||||
|
p.branch(patch.prog, targetProg)
|
||||||
}
|
}
|
||||||
p.toPatch = p.toPatch[:0]
|
p.toPatch = p.toPatch[:0]
|
||||||
}
|
}
|
||||||
|
|
@ -468,6 +498,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
p.errorf("unrecognized addressing for %s", obj.Aconv(op))
|
p.errorf("unrecognized addressing for %s", obj.Aconv(op))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if arch.IsARMFloatCmp(op) {
|
if arch.IsARMFloatCmp(op) {
|
||||||
prog.From = a[0]
|
prog.From = a[0]
|
||||||
|
|
@ -506,6 +537,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
prog.To = a[1]
|
prog.To = a[1]
|
||||||
if a[2].Type != obj.TYPE_REG {
|
if a[2].Type != obj.TYPE_REG {
|
||||||
p.errorf("invalid addressing modes for third operand to %s instruction, must be register", obj.Aconv(op))
|
p.errorf("invalid addressing modes for third operand to %s instruction, must be register", obj.Aconv(op))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
prog.RegTo2 = a[2].Reg
|
prog.RegTo2 = a[2].Reg
|
||||||
break
|
break
|
||||||
|
|
@ -541,9 +573,11 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
prog.To = a[2]
|
prog.To = a[2]
|
||||||
default:
|
default:
|
||||||
p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
|
p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
p.errorf("TODO: implement three-operand instructions for this architecture")
|
p.errorf("TODO: implement three-operand instructions for this architecture")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
case 4:
|
case 4:
|
||||||
if p.arch.Thechar == '5' && arch.IsARMMULA(op) {
|
if p.arch.Thechar == '5' && arch.IsARMMULA(op) {
|
||||||
|
|
@ -577,6 +611,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
|
p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
|
||||||
|
return
|
||||||
case 5:
|
case 5:
|
||||||
if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
|
if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
|
||||||
// Always reg, reg, con, con, reg. (con, con is a 'mask').
|
// Always reg, reg, con, con, reg. (con, con is a 'mask').
|
||||||
|
|
@ -598,6 +633,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op))
|
p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op))
|
||||||
|
return
|
||||||
case 6:
|
case 6:
|
||||||
if p.arch.Thechar == '5' && arch.IsARMMRC(op) {
|
if p.arch.Thechar == '5' && arch.IsARMMRC(op) {
|
||||||
// Strange special case: MCR, MRC.
|
// Strange special case: MCR, MRC.
|
||||||
|
|
@ -621,6 +657,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
p.errorf("can't handle %s instruction with %d operands", obj.Aconv(op), len(a))
|
p.errorf("can't handle %s instruction with %d operands", obj.Aconv(op), len(a))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.append(prog, cond, true)
|
p.append(prog, cond, true)
|
||||||
|
|
|
||||||
|
|
@ -293,6 +293,7 @@ var armOperandTests = []operandTest{
|
||||||
{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
|
{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
|
||||||
{"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
|
{"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
|
||||||
{"[):[R0-FP", ""},
|
{"[):[R0-FP", ""},
|
||||||
|
{"(", ""}, // Issue 12466 - backed up before beginning of line.
|
||||||
}
|
}
|
||||||
|
|
||||||
var ppc64OperandTests = []operandTest{
|
var ppc64OperandTests = []operandTest{
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@ type Parser struct {
|
||||||
firstProg *obj.Prog
|
firstProg *obj.Prog
|
||||||
lastProg *obj.Prog
|
lastProg *obj.Prog
|
||||||
dataAddr map[string]int64 // Most recent address for DATA for this symbol.
|
dataAddr map[string]int64 // Most recent address for DATA for this symbol.
|
||||||
|
isJump bool // Instruction being assembled is a jump.
|
||||||
errorWriter io.Writer
|
errorWriter io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -155,6 +156,7 @@ func (p *Parser) line() bool {
|
||||||
// Remember this location so we can swap the operands below.
|
// Remember this location so we can swap the operands below.
|
||||||
if colon >= 0 {
|
if colon >= 0 {
|
||||||
p.errorf("invalid ':' in operand")
|
p.errorf("invalid ':' in operand")
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
colon = len(operands)
|
colon = len(operands)
|
||||||
}
|
}
|
||||||
|
|
@ -196,15 +198,15 @@ func (p *Parser) line() bool {
|
||||||
|
|
||||||
func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
|
func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
|
||||||
p.addr = p.addr[0:0]
|
p.addr = p.addr[0:0]
|
||||||
isJump := p.arch.IsJump(word)
|
p.isJump = p.arch.IsJump(word)
|
||||||
for _, op := range operands {
|
for _, op := range operands {
|
||||||
addr := p.address(op)
|
addr := p.address(op)
|
||||||
if !isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
|
if !p.isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
|
||||||
p.errorf("illegal use of pseudo-register in %s", word)
|
p.errorf("illegal use of pseudo-register in %s", word)
|
||||||
}
|
}
|
||||||
p.addr = append(p.addr, addr)
|
p.addr = append(p.addr, addr)
|
||||||
}
|
}
|
||||||
if isJump {
|
if p.isJump {
|
||||||
p.asmJump(op, cond, p.addr)
|
p.asmJump(op, cond, p.addr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -338,8 +340,13 @@ func (p *Parser) operand(a *obj.Addr) bool {
|
||||||
case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~':
|
case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~':
|
||||||
haveConstant = true
|
haveConstant = true
|
||||||
case '(':
|
case '(':
|
||||||
// Could be parenthesized expression or (R).
|
// Could be parenthesized expression or (R). Must be something, though.
|
||||||
rname := p.next().String()
|
tok := p.next()
|
||||||
|
if tok.ScanToken == scanner.EOF {
|
||||||
|
p.errorf("missing right parenthesis")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
rname := tok.String()
|
||||||
p.back()
|
p.back()
|
||||||
haveConstant = !p.atStartOfRegister(rname)
|
haveConstant = !p.atStartOfRegister(rname)
|
||||||
if !haveConstant {
|
if !haveConstant {
|
||||||
|
|
@ -361,6 +368,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
|
||||||
if p.have(scanner.String) {
|
if p.have(scanner.String) {
|
||||||
if prefix != '$' {
|
if prefix != '$' {
|
||||||
p.errorf("string constant must be an immediate")
|
p.errorf("string constant must be an immediate")
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
str, err := strconv.Unquote(p.get(scanner.String).String())
|
str, err := strconv.Unquote(p.get(scanner.String).String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -568,12 +576,14 @@ func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
|
||||||
}
|
}
|
||||||
a.Sym = obj.Linklookup(p.ctxt, name, isStatic)
|
a.Sym = obj.Linklookup(p.ctxt, name, isStatic)
|
||||||
if p.peek() == scanner.EOF {
|
if p.peek() == scanner.EOF {
|
||||||
if prefix != 0 {
|
if prefix == 0 && p.isJump {
|
||||||
p.errorf("illegal addressing mode for symbol %s", name)
|
// Symbols without prefix or suffix are jump labels.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
p.errorf("illegal or missing addressing mode for symbol %s", name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Expect (SB) or (FP), (PC), (SB), or (SP)
|
// Expect (SB), (FP), (PC), or (SP)
|
||||||
p.get('(')
|
p.get('(')
|
||||||
reg := p.get(scanner.Ident).String()
|
reg := p.get(scanner.Ident).String()
|
||||||
p.get(')')
|
p.get(')')
|
||||||
|
|
@ -952,7 +962,11 @@ func (p *Parser) next() lex.Token {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) back() {
|
func (p *Parser) back() {
|
||||||
p.inputPos--
|
if p.inputPos == 0 {
|
||||||
|
p.errorf("internal error: backing up before BOL")
|
||||||
|
} else {
|
||||||
|
p.inputPos--
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) peek() lex.ScanToken {
|
func (p *Parser) peek() lex.ScanToken {
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,8 @@ func TestErroneous(t *testing.T) {
|
||||||
{"TEXT", "%", "expect two or three operands for TEXT"},
|
{"TEXT", "%", "expect two or three operands for TEXT"},
|
||||||
{"TEXT", "1, 1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
{"TEXT", "1, 1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
||||||
{"TEXT", "$\"foo\", 0, $1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
{"TEXT", "$\"foo\", 0, $1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
||||||
|
{"TEXT", "$0É:0, 0, $1", "expected EOF, found É"}, // Issue #12467.
|
||||||
|
{"TEXT", "$:0:(SB, 0, $1", "expected '(', found 0"}, // Issue 12468.
|
||||||
{"FUNCDATA", "", "expect two operands for FUNCDATA"},
|
{"FUNCDATA", "", "expect two operands for FUNCDATA"},
|
||||||
{"FUNCDATA", "(SB ", "expect two operands for FUNCDATA"},
|
{"FUNCDATA", "(SB ", "expect two operands for FUNCDATA"},
|
||||||
{"DATA", "", "expect two operands for DATA"},
|
{"DATA", "", "expect two operands for DATA"},
|
||||||
|
|
|
||||||
2
src/cmd/asm/internal/asm/testdata/arm64.out
vendored
2
src/cmd/asm/internal/asm/testdata/arm64.out
vendored
|
|
@ -37,7 +37,7 @@
|
||||||
147 00037 (testdata/arm64.s:147) CSEL LT, R1, R2, ZR
|
147 00037 (testdata/arm64.s:147) CSEL LT, R1, R2, ZR
|
||||||
148 00038 (testdata/arm64.s:148) CSINC GT, R1, ZR, R3
|
148 00038 (testdata/arm64.s:148) CSINC GT, R1, ZR, R3
|
||||||
149 00039 (testdata/arm64.s:149) CSNEG MI, R1, R2, R3
|
149 00039 (testdata/arm64.s:149) CSNEG MI, R1, R2, R3
|
||||||
150 00040 (testdata/arm64.s:150) CSINV 0, R1, R2, R3
|
150 00040 (testdata/arm64.s:150) CSINV HS, R1, R2, R3
|
||||||
156 00041 (testdata/arm64.s:156) CSEL LT, R1, R2
|
156 00041 (testdata/arm64.s:156) CSEL LT, R1, R2
|
||||||
164 00042 (testdata/arm64.s:164) CCMN MI, ZR, R1, $4
|
164 00042 (testdata/arm64.s:164) CCMN MI, ZR, R1, $4
|
||||||
173 00043 (testdata/arm64.s:173) FADDD $(0.5), F1
|
173 00043 (testdata/arm64.s:173) FADDD $(0.5), F1
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,12 @@ func predefine(defines flags.MultiFlag) map[string]*Macro {
|
||||||
return macros
|
return macros
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var panicOnError bool // For testing.
|
||||||
|
|
||||||
func (in *Input) Error(args ...interface{}) {
|
func (in *Input) Error(args ...interface{}) {
|
||||||
|
if panicOnError {
|
||||||
|
panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))
|
fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
@ -113,6 +118,10 @@ func (in *Input) Next() ScanToken {
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
|
if tok == scanner.EOF && len(in.ifdefStack) > 0 {
|
||||||
|
// We're skipping text but have run out of input with no #endif.
|
||||||
|
in.Error("unclosed #ifdef or #ifndef")
|
||||||
|
}
|
||||||
in.beginningOfLine = tok == '\n'
|
in.beginningOfLine = tok == '\n'
|
||||||
if in.enabled() {
|
if in.enabled() {
|
||||||
in.text = in.Stack.Text()
|
in.text = in.Stack.Text()
|
||||||
|
|
@ -251,6 +260,9 @@ func (in *Input) macroDefinition(name string) ([]string, []Token) {
|
||||||
var tokens []Token
|
var tokens []Token
|
||||||
// Scan to newline. Backslashes escape newlines.
|
// Scan to newline. Backslashes escape newlines.
|
||||||
for tok != '\n' {
|
for tok != '\n' {
|
||||||
|
if tok == scanner.EOF {
|
||||||
|
in.Error("missing newline in macro definition for %q\n", name)
|
||||||
|
}
|
||||||
if tok == '\\' {
|
if tok == '\\' {
|
||||||
tok = in.Stack.Next()
|
tok = in.Stack.Next()
|
||||||
if tok != '\n' && tok != '\\' {
|
if tok != '\n' && tok != '\\' {
|
||||||
|
|
|
||||||
|
|
@ -226,6 +226,35 @@ var lexTests = []lexTest{
|
||||||
),
|
),
|
||||||
"C.\n",
|
"C.\n",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"nested #define",
|
||||||
|
lines(
|
||||||
|
"#define A #define B THIS",
|
||||||
|
"A",
|
||||||
|
"B",
|
||||||
|
),
|
||||||
|
"THIS.\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nested #define with args",
|
||||||
|
lines(
|
||||||
|
"#define A #define B(x) x",
|
||||||
|
"A",
|
||||||
|
"B(THIS)",
|
||||||
|
),
|
||||||
|
"THIS.\n",
|
||||||
|
},
|
||||||
|
/* This one fails. See comment in Slice.Col.
|
||||||
|
{
|
||||||
|
"nested #define with args",
|
||||||
|
lines(
|
||||||
|
"#define A #define B (x) x",
|
||||||
|
"A",
|
||||||
|
"B(THIS)",
|
||||||
|
),
|
||||||
|
"x.\n",
|
||||||
|
},
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLex(t *testing.T) {
|
func TestLex(t *testing.T) {
|
||||||
|
|
@ -258,3 +287,76 @@ func drain(input *Input) string {
|
||||||
buf.WriteString(input.Text())
|
buf.WriteString(input.Text())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type badLexTest struct {
|
||||||
|
input string
|
||||||
|
error string
|
||||||
|
}
|
||||||
|
|
||||||
|
var badLexTests = []badLexTest{
|
||||||
|
{
|
||||||
|
"3 #define foo bar\n",
|
||||||
|
"'#' must be first item on line",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#ifdef foo\nhello",
|
||||||
|
"unclosed #ifdef or #ifndef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#ifndef foo\nhello",
|
||||||
|
"unclosed #ifdef or #ifndef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#ifdef foo\nhello\n#else\nbye",
|
||||||
|
"unclosed #ifdef or #ifndef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#define A() A()\nA()",
|
||||||
|
"recursive macro invocation",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#define A a\n#define A a\n",
|
||||||
|
"redefinition of macro",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"#define A a",
|
||||||
|
"no newline after macro definition",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBadLex(t *testing.T) {
|
||||||
|
for _, test := range badLexTests {
|
||||||
|
input := NewInput(test.error)
|
||||||
|
input.Push(NewTokenizer(test.error, strings.NewReader(test.input), nil))
|
||||||
|
err := firstError(input)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got no error", test.error)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), test.error) {
|
||||||
|
t.Errorf("got error %q expected %q", err.Error(), test.error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// firstError returns the first error value triggered by the input.
|
||||||
|
func firstError(input *Input) (err error) {
|
||||||
|
panicOnError = true
|
||||||
|
defer func() {
|
||||||
|
panicOnError = false
|
||||||
|
switch e := recover(); e := e.(type) {
|
||||||
|
case nil:
|
||||||
|
case error:
|
||||||
|
err = e
|
||||||
|
default:
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok := input.Next()
|
||||||
|
if tok == scanner.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -44,8 +44,16 @@ func (s *Slice) Line() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Slice) Col() int {
|
func (s *Slice) Col() int {
|
||||||
// Col is only called when defining a macro, which can't reach here.
|
// TODO: Col is only called when defining a macro and all it cares about is increasing
|
||||||
panic("cannot happen: slice col")
|
// position to discover whether there is a blank before the parenthesis.
|
||||||
|
// We only get here if defining a macro inside a macro.
|
||||||
|
// This imperfect implementation means we cannot tell the difference between
|
||||||
|
// #define A #define B(x) x
|
||||||
|
// and
|
||||||
|
// #define A #define B (x) x
|
||||||
|
// The first has definition of B has an argument, the second doesn't. Because we let
|
||||||
|
// text/scanner strip the blanks for us, this is extremely rare, hard to fix, and not worth it.
|
||||||
|
return s.pos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Slice) SetPos(line int, file string) {
|
func (s *Slice) SetPos(line int, file string) {
|
||||||
|
|
|
||||||
|
|
@ -391,17 +391,13 @@ the translation process.
|
||||||
|
|
||||||
Translating Go
|
Translating Go
|
||||||
|
|
||||||
[The rest of this comment refers to 6g, the Go compiler that is part
|
|
||||||
of the amd64 port of the gc Go toolchain. Everything here applies to
|
|
||||||
another architecture's compilers as well.]
|
|
||||||
|
|
||||||
Given the input Go files x.go and y.go, cgo generates these source
|
Given the input Go files x.go and y.go, cgo generates these source
|
||||||
files:
|
files:
|
||||||
|
|
||||||
x.cgo1.go # for 6g
|
x.cgo1.go # for gc (cmd/compile)
|
||||||
y.cgo1.go # for 6g
|
y.cgo1.go # for gc
|
||||||
_cgo_gotypes.go # for 6g
|
_cgo_gotypes.go # for gc
|
||||||
_cgo_import.go # for 6g (if -dynout _cgo_import.go)
|
_cgo_import.go # for gc (if -dynout _cgo_import.go)
|
||||||
x.cgo2.c # for gcc
|
x.cgo2.c # for gcc
|
||||||
y.cgo2.c # for gcc
|
y.cgo2.c # for gcc
|
||||||
_cgo_defun.c # for gcc (if -gccgo)
|
_cgo_defun.c # for gcc (if -gccgo)
|
||||||
|
|
@ -464,7 +460,7 @@ Linking
|
||||||
|
|
||||||
Once the _cgo_export.c and *.cgo2.c files have been compiled with gcc,
|
Once the _cgo_export.c and *.cgo2.c files have been compiled with gcc,
|
||||||
they need to be linked into the final binary, along with the libraries
|
they need to be linked into the final binary, along with the libraries
|
||||||
they might depend on (in the case of puts, stdio). 6l has been
|
they might depend on (in the case of puts, stdio). cmd/link has been
|
||||||
extended to understand basic ELF files, but it does not understand ELF
|
extended to understand basic ELF files, but it does not understand ELF
|
||||||
in the full complexity that modern C libraries embrace, so it cannot
|
in the full complexity that modern C libraries embrace, so it cannot
|
||||||
in general generate direct references to the system libraries.
|
in general generate direct references to the system libraries.
|
||||||
|
|
@ -495,23 +491,23 @@ _cgo_import.go, which looks like:
|
||||||
//go:cgo_import_dynamic _ _ "libc.so.6"
|
//go:cgo_import_dynamic _ _ "libc.so.6"
|
||||||
|
|
||||||
In the end, the compiled Go package, which will eventually be
|
In the end, the compiled Go package, which will eventually be
|
||||||
presented to 6l as part of a larger program, contains:
|
presented to cmd/link as part of a larger program, contains:
|
||||||
|
|
||||||
_go_.6 # 6g-compiled object for _cgo_gotypes.go, _cgo_import.go, *.cgo1.go
|
_go_.o # gc-compiled object for _cgo_gotypes.go, _cgo_import.go, *.cgo1.go
|
||||||
_all.o # gcc-compiled object for _cgo_export.c, *.cgo2.c
|
_all.o # gcc-compiled object for _cgo_export.c, *.cgo2.c
|
||||||
|
|
||||||
The final program will be a dynamic executable, so that 6l can avoid
|
The final program will be a dynamic executable, so that cmd/link can avoid
|
||||||
needing to process arbitrary .o files. It only needs to process the .o
|
needing to process arbitrary .o files. It only needs to process the .o
|
||||||
files generated from C files that cgo writes, and those are much more
|
files generated from C files that cgo writes, and those are much more
|
||||||
limited in the ELF or other features that they use.
|
limited in the ELF or other features that they use.
|
||||||
|
|
||||||
In essence, the _cgo_import.6 file includes the extra linking
|
In essence, the _cgo_import.o file includes the extra linking
|
||||||
directives that 6l is not sophisticated enough to derive from _all.o
|
directives that cmd/link is not sophisticated enough to derive from _all.o
|
||||||
on its own. Similarly, the _all.o uses dynamic references to real
|
on its own. Similarly, the _all.o uses dynamic references to real
|
||||||
system object code because 6l is not sophisticated enough to process
|
system object code because cmd/link is not sophisticated enough to process
|
||||||
the real code.
|
the real code.
|
||||||
|
|
||||||
The main benefits of this system are that 6l remains relatively simple
|
The main benefits of this system are that cmd/link remains relatively simple
|
||||||
(it does not need to implement a complete ELF and Mach-O linker) and
|
(it does not need to implement a complete ELF and Mach-O linker) and
|
||||||
that gcc is not needed after the package is compiled. For example,
|
that gcc is not needed after the package is compiled. For example,
|
||||||
package net uses cgo for access to name resolution functions provided
|
package net uses cgo for access to name resolution functions provided
|
||||||
|
|
@ -540,17 +536,17 @@ system calls.
|
||||||
|
|
||||||
Internal and External Linking
|
Internal and External Linking
|
||||||
|
|
||||||
The text above describes "internal" linking, in which 6l parses and
|
The text above describes "internal" linking, in which cmd/link parses and
|
||||||
links host object files (ELF, Mach-O, PE, and so on) into the final
|
links host object files (ELF, Mach-O, PE, and so on) into the final
|
||||||
executable itself. Keeping 6l simple means we cannot possibly
|
executable itself. Keeping cmd/link simple means we cannot possibly
|
||||||
implement the full semantics of the host linker, so the kinds of
|
implement the full semantics of the host linker, so the kinds of
|
||||||
objects that can be linked directly into the binary is limited (other
|
objects that can be linked directly into the binary is limited (other
|
||||||
code can only be used as a dynamic library). On the other hand, when
|
code can only be used as a dynamic library). On the other hand, when
|
||||||
using internal linking, 6l can generate Go binaries by itself.
|
using internal linking, cmd/link can generate Go binaries by itself.
|
||||||
|
|
||||||
In order to allow linking arbitrary object files without requiring
|
In order to allow linking arbitrary object files without requiring
|
||||||
dynamic libraries, cgo supports an "external" linking mode too. In
|
dynamic libraries, cgo supports an "external" linking mode too. In
|
||||||
external linking mode, 6l does not process any host object files.
|
external linking mode, cmd/link does not process any host object files.
|
||||||
Instead, it collects all the Go code and writes a single go.o object
|
Instead, it collects all the Go code and writes a single go.o object
|
||||||
file containing it. Then it invokes the host linker (usually gcc) to
|
file containing it. Then it invokes the host linker (usually gcc) to
|
||||||
combine the go.o object file and any supporting non-Go code into a
|
combine the go.o object file and any supporting non-Go code into a
|
||||||
|
|
@ -582,8 +578,8 @@ to be made when linking the final binary.
|
||||||
Linking Directives
|
Linking Directives
|
||||||
|
|
||||||
In either linking mode, package-specific directives must be passed
|
In either linking mode, package-specific directives must be passed
|
||||||
through to 6l. These are communicated by writing //go: directives in a
|
through to cmd/link. These are communicated by writing //go: directives in a
|
||||||
Go source file compiled by 6g. The directives are copied into the .6
|
Go source file compiled by gc. The directives are copied into the .o
|
||||||
object file and then processed by the linker.
|
object file and then processed by the linker.
|
||||||
|
|
||||||
The directives are:
|
The directives are:
|
||||||
|
|
@ -672,7 +668,7 @@ Example
|
||||||
As a simple example, consider a package that uses cgo to call C.sin.
|
As a simple example, consider a package that uses cgo to call C.sin.
|
||||||
The following code will be generated by cgo:
|
The following code will be generated by cgo:
|
||||||
|
|
||||||
// compiled by 6g
|
// compiled by gc
|
||||||
|
|
||||||
//go:cgo_ldflag "-lm"
|
//go:cgo_ldflag "-lm"
|
||||||
|
|
||||||
|
|
@ -708,7 +704,7 @@ Otherwise the link will be an internal one.
|
||||||
The linking directives are used according to the kind of final link
|
The linking directives are used according to the kind of final link
|
||||||
used.
|
used.
|
||||||
|
|
||||||
In internal mode, 6l itself processes all the host object files, in
|
In internal mode, cmd/link itself processes all the host object files, in
|
||||||
particular foo.cgo2.o. To do so, it uses the cgo_import_dynamic and
|
particular foo.cgo2.o. To do so, it uses the cgo_import_dynamic and
|
||||||
cgo_dynamic_linker directives to learn that the otherwise undefined
|
cgo_dynamic_linker directives to learn that the otherwise undefined
|
||||||
reference to sin in foo.cgo2.o should be rewritten to refer to the
|
reference to sin in foo.cgo2.o should be rewritten to refer to the
|
||||||
|
|
@ -716,56 +712,56 @@ symbol sin with version GLIBC_2.2.5 from the dynamic library
|
||||||
"libm.so.6", and the binary should request "/lib/ld-linux.so.2" as its
|
"libm.so.6", and the binary should request "/lib/ld-linux.so.2" as its
|
||||||
runtime dynamic linker.
|
runtime dynamic linker.
|
||||||
|
|
||||||
In external mode, 6l does not process any host object files, in
|
In external mode, cmd/link does not process any host object files, in
|
||||||
particular foo.cgo2.o. It links together the 6g-generated object
|
particular foo.cgo2.o. It links together the gc-generated object
|
||||||
files, along with any other Go code, into a go.o file. While doing
|
files, along with any other Go code, into a go.o file. While doing
|
||||||
that, 6l will discover that there is no definition for
|
that, cmd/link will discover that there is no definition for
|
||||||
_cgo_gcc_Cfunc_sin, referred to by the 6g-compiled source file. This
|
_cgo_gcc_Cfunc_sin, referred to by the gc-compiled source file. This
|
||||||
is okay, because 6l also processes the cgo_import_static directive and
|
is okay, because cmd/link also processes the cgo_import_static directive and
|
||||||
knows that _cgo_gcc_Cfunc_sin is expected to be supplied by a host
|
knows that _cgo_gcc_Cfunc_sin is expected to be supplied by a host
|
||||||
object file, so 6l does not treat the missing symbol as an error when
|
object file, so cmd/link does not treat the missing symbol as an error when
|
||||||
creating go.o. Indeed, the definition for _cgo_gcc_Cfunc_sin will be
|
creating go.o. Indeed, the definition for _cgo_gcc_Cfunc_sin will be
|
||||||
provided to the host linker by foo2.cgo.o, which in turn will need the
|
provided to the host linker by foo2.cgo.o, which in turn will need the
|
||||||
symbol 'sin'. 6l also processes the cgo_ldflag directives, so that it
|
symbol 'sin'. cmd/link also processes the cgo_ldflag directives, so that it
|
||||||
knows that the eventual host link command must include the -lm
|
knows that the eventual host link command must include the -lm
|
||||||
argument, so that the host linker will be able to find 'sin' in the
|
argument, so that the host linker will be able to find 'sin' in the
|
||||||
math library.
|
math library.
|
||||||
|
|
||||||
6l Command Line Interface
|
cmd/link Command Line Interface
|
||||||
|
|
||||||
The go command and any other Go-aware build systems invoke 6l
|
The go command and any other Go-aware build systems invoke cmd/link
|
||||||
to link a collection of packages into a single binary. By default, 6l will
|
to link a collection of packages into a single binary. By default, cmd/link will
|
||||||
present the same interface it does today:
|
present the same interface it does today:
|
||||||
|
|
||||||
6l main.a
|
cmd/link main.a
|
||||||
|
|
||||||
produces a file named 6.out, even if 6l does so by invoking the host
|
produces a file named a.out, even if cmd/link does so by invoking the host
|
||||||
linker in external linking mode.
|
linker in external linking mode.
|
||||||
|
|
||||||
By default, 6l will decide the linking mode as follows: if the only
|
By default, cmd/link will decide the linking mode as follows: if the only
|
||||||
packages using cgo are those on a whitelist of standard library
|
packages using cgo are those on a whitelist of standard library
|
||||||
packages (net, os/user, runtime/cgo), 6l will use internal linking
|
packages (net, os/user, runtime/cgo), cmd/link will use internal linking
|
||||||
mode. Otherwise, there are non-standard cgo packages involved, and 6l
|
mode. Otherwise, there are non-standard cgo packages involved, and cmd/link
|
||||||
will use external linking mode. The first rule means that a build of
|
will use external linking mode. The first rule means that a build of
|
||||||
the godoc binary, which uses net but no other cgo, can run without
|
the godoc binary, which uses net but no other cgo, can run without
|
||||||
needing gcc available. The second rule means that a build of a
|
needing gcc available. The second rule means that a build of a
|
||||||
cgo-wrapped library like sqlite3 can generate a standalone executable
|
cgo-wrapped library like sqlite3 can generate a standalone executable
|
||||||
instead of needing to refer to a dynamic library. The specific choice
|
instead of needing to refer to a dynamic library. The specific choice
|
||||||
can be overridden using a command line flag: 6l -linkmode=internal or
|
can be overridden using a command line flag: cmd/link -linkmode=internal or
|
||||||
6l -linkmode=external.
|
cmd/link -linkmode=external.
|
||||||
|
|
||||||
In an external link, 6l will create a temporary directory, write any
|
In an external link, cmd/link will create a temporary directory, write any
|
||||||
host object files found in package archives to that directory (renamed
|
host object files found in package archives to that directory (renamed
|
||||||
to avoid conflicts), write the go.o file to that directory, and invoke
|
to avoid conflicts), write the go.o file to that directory, and invoke
|
||||||
the host linker. The default value for the host linker is $CC, split
|
the host linker. The default value for the host linker is $CC, split
|
||||||
into fields, or else "gcc". The specific host linker command line can
|
into fields, or else "gcc". The specific host linker command line can
|
||||||
be overridden using command line flags: 6l -extld=clang
|
be overridden using command line flags: cmd/link -extld=clang
|
||||||
-extldflags='-ggdb -O3'. If any package in a build includes a .cc or
|
-extldflags='-ggdb -O3'. If any package in a build includes a .cc or
|
||||||
other file compiled by the C++ compiler, the go tool will use the
|
other file compiled by the C++ compiler, the go tool will use the
|
||||||
-extld option to set the host linker to the C++ compiler.
|
-extld option to set the host linker to the C++ compiler.
|
||||||
|
|
||||||
These defaults mean that Go-aware build systems can ignore the linking
|
These defaults mean that Go-aware build systems can ignore the linking
|
||||||
changes and keep running plain '6l' and get reasonable results, but
|
changes and keep running plain 'cmd/link' and get reasonable results, but
|
||||||
they can also control the linking details if desired.
|
they can also control the linking details if desired.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -607,6 +607,10 @@ func (p *Package) rewriteRef(f *File) {
|
||||||
if r.Name.Kind != "func" {
|
if r.Name.Kind != "func" {
|
||||||
if r.Name.Kind == "type" {
|
if r.Name.Kind == "type" {
|
||||||
r.Context = "type"
|
r.Context = "type"
|
||||||
|
if r.Name.Type == nil {
|
||||||
|
error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
|
||||||
|
break
|
||||||
|
}
|
||||||
expr = r.Name.Type.Go
|
expr = r.Name.Type.Go
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
@ -658,6 +662,10 @@ func (p *Package) rewriteRef(f *File) {
|
||||||
}
|
}
|
||||||
} else if r.Name.Kind == "type" {
|
} else if r.Name.Kind == "type" {
|
||||||
// Okay - might be new(T)
|
// Okay - might be new(T)
|
||||||
|
if r.Name.Type == nil {
|
||||||
|
error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
|
||||||
|
break
|
||||||
|
}
|
||||||
expr = r.Name.Type.Go
|
expr = r.Name.Type.Go
|
||||||
} else if r.Name.Kind == "var" {
|
} else if r.Name.Kind == "var" {
|
||||||
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
|
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"go/printer"
|
"go/printer"
|
||||||
"go/token"
|
"go/token"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -19,7 +20,7 @@ func (p *Package) godefs(f *File, srcfile string) string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "// Created by cgo -godefs - DO NOT EDIT\n")
|
fmt.Fprintf(&buf, "// Created by cgo -godefs - DO NOT EDIT\n")
|
||||||
fmt.Fprintf(&buf, "// %s\n", strings.Join(os.Args, " "))
|
fmt.Fprintf(&buf, "// %s %s\n", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " "))
|
||||||
fmt.Fprintf(&buf, "\n")
|
fmt.Fprintf(&buf, "\n")
|
||||||
|
|
||||||
override := make(map[string]string)
|
override := make(map[string]string)
|
||||||
|
|
|
||||||
|
|
@ -279,11 +279,7 @@ func main() {
|
||||||
if nerrors > 0 {
|
if nerrors > 0 {
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
pkg := f.Package
|
p.PackagePath = f.Package
|
||||||
if dir := os.Getenv("CGOPKGPATH"); dir != "" {
|
|
||||||
pkg = filepath.Join(dir, pkg)
|
|
||||||
}
|
|
||||||
p.PackagePath = pkg
|
|
||||||
p.Record(f)
|
p.Record(f)
|
||||||
if *godefs {
|
if *godefs {
|
||||||
os.Stdout.WriteString(p.godefs(f, input))
|
os.Stdout.WriteString(p.godefs(f, input))
|
||||||
|
|
|
||||||
|
|
@ -933,23 +933,15 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
||||||
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
|
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(fgcch, "extern %s %s %s;\n", cRet, exp.ExpName, cParams)
|
||||||
|
|
||||||
// We need to use a name that will be exported by the
|
// We need to use a name that will be exported by the
|
||||||
// Go code; otherwise gccgo will make it static and we
|
// Go code; otherwise gccgo will make it static and we
|
||||||
// will not be able to link against it from the C
|
// will not be able to link against it from the C
|
||||||
// code.
|
// code.
|
||||||
goName := "Cgoexp_" + exp.ExpName
|
goName := "Cgoexp_" + exp.ExpName
|
||||||
fmt.Fprintf(fgcch, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
|
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
|
||||||
fmt.Fprint(fgcch, "\n")
|
fmt.Fprint(fgcc, "\n")
|
||||||
|
|
||||||
// Use a #define so that the C code that includes
|
|
||||||
// cgo_export.h will be able to refer to the Go
|
|
||||||
// function using the expected name.
|
|
||||||
fmt.Fprintf(fgcch, "#define %s %s\n", exp.ExpName, goName)
|
|
||||||
|
|
||||||
// Use a #undef in _cgo_export.c so that we ignore the
|
|
||||||
// #define from cgo_export.h, since here we are
|
|
||||||
// defining the real function.
|
|
||||||
fmt.Fprintf(fgcc, "#undef %s\n", exp.ExpName)
|
|
||||||
|
|
||||||
fmt.Fprint(fgcc, "\n")
|
fmt.Fprint(fgcc, "\n")
|
||||||
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
|
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
|
||||||
|
|
|
||||||
|
|
@ -80,17 +80,27 @@ func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
|
||||||
gins(x86.ACLD, nil, nil)
|
gins(x86.ACLD, nil, nil)
|
||||||
} else {
|
} else {
|
||||||
// normal direction
|
// normal direction
|
||||||
if q > 128 || (gc.Nacl && q >= 4) {
|
if q > 128 || (gc.Nacl && q >= 4) || (obj.Getgoos() == "plan9" && q >= 4) {
|
||||||
gconreg(movptr, q, x86.REG_CX)
|
gconreg(movptr, q, x86.REG_CX)
|
||||||
gins(x86.AREP, nil, nil) // repeat
|
gins(x86.AREP, nil, nil) // repeat
|
||||||
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
|
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
|
||||||
} else if q >= 4 {
|
} else if q >= 4 {
|
||||||
|
var oldx0 gc.Node
|
||||||
|
var x0 gc.Node
|
||||||
|
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
|
||||||
|
|
||||||
p := gins(obj.ADUFFCOPY, nil, nil)
|
p := gins(obj.ADUFFCOPY, nil, nil)
|
||||||
p.To.Type = obj.TYPE_ADDR
|
p.To.Type = obj.TYPE_ADDR
|
||||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
|
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
|
||||||
|
|
||||||
// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
|
// 64 blocks taking 14 bytes each
|
||||||
p.To.Offset = 14 * (128 - q)
|
// see ../../../../runtime/mkduff.go
|
||||||
|
p.To.Offset = 14 * (64 - q/2)
|
||||||
|
restx(&x0, &oldx0)
|
||||||
|
|
||||||
|
if q%2 != 0 {
|
||||||
|
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
|
||||||
|
}
|
||||||
} else if !gc.Nacl && c == 0 {
|
} else if !gc.Nacl && c == 0 {
|
||||||
// We don't need the MOVSQ side-effect of updating SI and DI,
|
// We don't need the MOVSQ side-effect of updating SI and DI,
|
||||||
// and issuing a sequence of MOVQs directly is faster.
|
// and issuing a sequence of MOVQs directly is faster.
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ func defframe(ptxt *obj.Prog) {
|
||||||
hi := int64(0)
|
hi := int64(0)
|
||||||
lo := hi
|
lo := hi
|
||||||
ax := uint32(0)
|
ax := uint32(0)
|
||||||
|
x0 := uint32(0)
|
||||||
|
|
||||||
// iterate through declarations - they are sorted in decreasing xoffset order.
|
// iterate through declarations - they are sorted in decreasing xoffset order.
|
||||||
for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
|
for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||||
|
|
@ -50,7 +51,7 @@ func defframe(ptxt *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// zero old range
|
// zero old range
|
||||||
p = zerorange(p, int64(frame), lo, hi, &ax)
|
p = zerorange(p, int64(frame), lo, hi, &ax, &x0)
|
||||||
|
|
||||||
// set new range
|
// set new range
|
||||||
hi = n.Xoffset + n.Type.Width
|
hi = n.Xoffset + n.Type.Width
|
||||||
|
|
@ -59,88 +60,104 @@ func defframe(ptxt *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// zero final range
|
// zero final range
|
||||||
zerorange(p, int64(frame), lo, hi, &ax)
|
zerorange(p, int64(frame), lo, hi, &ax, &x0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DUFFZERO consists of repeated blocks of 4 MOVs + ADD,
|
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
|
||||||
// with 4 STOSQs at the very end.
|
|
||||||
// The trailing STOSQs prevent the need for a DI preadjustment
|
|
||||||
// for small numbers of words to clear.
|
|
||||||
// See runtime/mkduff.go.
|
// See runtime/mkduff.go.
|
||||||
const (
|
const (
|
||||||
dzBlocks = 31 // number of MOV/ADD blocks
|
dzBlocks = 16 // number of MOV/ADD blocks
|
||||||
dzBlockLen = 4 // number of clears per block
|
dzBlockLen = 4 // number of clears per block
|
||||||
dzBlockSize = 19 // size of instructions in a single block
|
dzBlockSize = 19 // size of instructions in a single block
|
||||||
dzMovSize = 4 // size of single MOV instruction w/ offset
|
dzMovSize = 4 // size of single MOV instruction w/ offset
|
||||||
dzAddSize = 4 // size of single ADD instruction
|
dzAddSize = 4 // size of single ADD instruction
|
||||||
dzDIStep = 8 // number of bytes cleared by each MOV instruction
|
dzClearStep = 16 // number of bytes cleared by each MOV instruction
|
||||||
|
|
||||||
dzTailLen = 4 // number of final STOSQ instructions
|
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
|
||||||
dzTailSize = 2 // size of single STOSQ instruction
|
dzSize = dzBlocks * dzBlockSize
|
||||||
|
|
||||||
dzSize = dzBlocks*dzBlockSize + dzTailLen*dzTailSize // total size of DUFFZERO routine
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
|
|
||||||
// q is the number of words to zero.
|
|
||||||
func dzDI(q int64) int64 {
|
|
||||||
if q < dzTailLen {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
q -= dzTailLen
|
|
||||||
if q%dzBlockLen == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return -dzDIStep * (dzBlockLen - q%dzBlockLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dzOff returns the offset for a jump into DUFFZERO.
|
// dzOff returns the offset for a jump into DUFFZERO.
|
||||||
// q is the number of words to zero.
|
// b is the number of bytes to zero.
|
||||||
func dzOff(q int64) int64 {
|
func dzOff(b int64) int64 {
|
||||||
off := int64(dzSize)
|
off := int64(dzSize)
|
||||||
if q < dzTailLen {
|
off -= b / dzClearLen * dzBlockSize
|
||||||
return off - q*dzTailSize
|
tailLen := b % dzClearLen
|
||||||
}
|
if tailLen >= dzClearStep {
|
||||||
off -= dzTailLen * dzTailSize
|
off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
|
||||||
q -= dzTailLen
|
|
||||||
blocks, steps := q/dzBlockLen, q%dzBlockLen
|
|
||||||
off -= dzBlockSize * blocks
|
|
||||||
if steps > 0 {
|
|
||||||
off -= dzAddSize + dzMovSize*steps
|
|
||||||
}
|
}
|
||||||
return off
|
return off
|
||||||
}
|
}
|
||||||
|
|
||||||
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
|
// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
|
||||||
|
// b is the number of bytes to zero.
|
||||||
|
func dzDI(b int64) int64 {
|
||||||
|
tailLen := b % dzClearLen
|
||||||
|
if tailLen < dzClearStep {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
tailSteps := tailLen / dzClearStep
|
||||||
|
return -dzClearStep * (dzBlockLen - tailSteps)
|
||||||
|
}
|
||||||
|
|
||||||
|
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
|
||||||
cnt := hi - lo
|
cnt := hi - lo
|
||||||
if cnt == 0 {
|
if cnt == 0 {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
if *ax == 0 {
|
|
||||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
|
||||||
*ax = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if cnt%int64(gc.Widthreg) != 0 {
|
if cnt%int64(gc.Widthreg) != 0 {
|
||||||
// should only happen with nacl
|
// should only happen with nacl
|
||||||
if cnt%int64(gc.Widthptr) != 0 {
|
if cnt%int64(gc.Widthptr) != 0 {
|
||||||
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
|
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
|
||||||
}
|
}
|
||||||
|
if *ax == 0 {
|
||||||
|
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||||
|
*ax = 1
|
||||||
|
}
|
||||||
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
|
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
|
||||||
lo += int64(gc.Widthptr)
|
lo += int64(gc.Widthptr)
|
||||||
cnt -= int64(gc.Widthptr)
|
cnt -= int64(gc.Widthptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cnt <= int64(4*gc.Widthreg) {
|
if cnt == 8 {
|
||||||
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
|
if *ax == 0 {
|
||||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
|
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||||
|
*ax = 1
|
||||||
|
}
|
||||||
|
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
|
||||||
|
} else if cnt <= int64(8*gc.Widthreg) {
|
||||||
|
if *x0 == 0 {
|
||||||
|
p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||||
|
*x0 = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := int64(0); i < cnt/16; i++ {
|
||||||
|
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cnt%16 != 0 {
|
||||||
|
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
|
||||||
}
|
}
|
||||||
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
|
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
|
||||||
q := cnt / int64(gc.Widthreg)
|
if *x0 == 0 {
|
||||||
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0)
|
p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||||
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q))
|
*x0 = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
|
||||||
|
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
|
||||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
||||||
|
|
||||||
|
if cnt%16 != 0 {
|
||||||
|
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if *ax == 0 {
|
||||||
|
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||||
|
*ax = 1
|
||||||
|
}
|
||||||
|
|
||||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
|
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
|
||||||
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
|
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
|
||||||
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||||
|
|
@ -537,106 +554,150 @@ func clearfat(nl *gc.Node) {
|
||||||
gc.Dump("\nclearfat", nl)
|
gc.Dump("\nclearfat", nl)
|
||||||
}
|
}
|
||||||
|
|
||||||
w := nl.Type.Width
|
|
||||||
|
|
||||||
// Avoid taking the address for simple enough types.
|
// Avoid taking the address for simple enough types.
|
||||||
if gc.Componentgen(nil, nl) {
|
if gc.Componentgen(nil, nl) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c := w % 8 // bytes
|
w := nl.Type.Width
|
||||||
q := w / 8 // quads
|
|
||||||
|
|
||||||
if q < 4 {
|
if w > 1024 || (gc.Nacl && w >= 64) {
|
||||||
// Write sequence of MOV 0, off(base) instead of using STOSQ.
|
var oldn1 gc.Node
|
||||||
// The hope is that although the code will be slightly longer,
|
|
||||||
// the MOVs will have no dependencies and pipeline better
|
|
||||||
// than the unrolled STOSQ loop.
|
|
||||||
// NOTE: Must use agen, not igen, so that optimizer sees address
|
|
||||||
// being taken. We are not writing on field boundaries.
|
|
||||||
var n1 gc.Node
|
var n1 gc.Node
|
||||||
gc.Agenr(nl, &n1, nil)
|
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
|
||||||
|
gc.Agen(nl, &n1)
|
||||||
|
|
||||||
n1.Op = gc.OINDREG
|
var ax gc.Node
|
||||||
var z gc.Node
|
var oldax gc.Node
|
||||||
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
|
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
|
||||||
for ; q > 0; q-- {
|
gconreg(x86.AMOVL, 0, x86.REG_AX)
|
||||||
n1.Type = z.Type
|
gconreg(movptr, w/8, x86.REG_CX)
|
||||||
gins(x86.AMOVQ, &z, &n1)
|
|
||||||
n1.Xoffset += 8
|
gins(x86.AREP, nil, nil) // repeat
|
||||||
|
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
|
||||||
|
|
||||||
|
if w%8 != 0 {
|
||||||
|
n1.Op = gc.OINDREG
|
||||||
|
clearfat_tail(&n1, w%8)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c >= 4 {
|
restx(&n1, &oldn1)
|
||||||
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
|
restx(&ax, &oldax)
|
||||||
n1.Type = z.Type
|
|
||||||
gins(x86.AMOVL, &z, &n1)
|
|
||||||
n1.Xoffset += 4
|
|
||||||
c -= 4
|
|
||||||
}
|
|
||||||
|
|
||||||
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
|
|
||||||
for ; c > 0; c-- {
|
|
||||||
n1.Type = z.Type
|
|
||||||
gins(x86.AMOVB, &z, &n1)
|
|
||||||
n1.Xoffset++
|
|
||||||
}
|
|
||||||
|
|
||||||
gc.Regfree(&n1)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var oldn1 gc.Node
|
if w >= 64 {
|
||||||
var n1 gc.Node
|
var oldn1 gc.Node
|
||||||
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
|
var n1 gc.Node
|
||||||
gc.Agen(nl, &n1)
|
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
|
||||||
|
gc.Agen(nl, &n1)
|
||||||
|
|
||||||
var ax gc.Node
|
var vec_zero gc.Node
|
||||||
var oldax gc.Node
|
var old_x0 gc.Node
|
||||||
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
|
savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
|
||||||
gconreg(x86.AMOVL, 0, x86.REG_AX)
|
gins(x86.AXORPS, &vec_zero, &vec_zero)
|
||||||
|
|
||||||
if q > 128 || gc.Nacl {
|
if di := dzDI(w); di != 0 {
|
||||||
gconreg(movptr, q, x86.REG_CX)
|
|
||||||
gins(x86.AREP, nil, nil) // repeat
|
|
||||||
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
|
|
||||||
} else {
|
|
||||||
if di := dzDI(q); di != 0 {
|
|
||||||
gconreg(addptr, di, x86.REG_DI)
|
gconreg(addptr, di, x86.REG_DI)
|
||||||
}
|
}
|
||||||
p := gins(obj.ADUFFZERO, nil, nil)
|
p := gins(obj.ADUFFZERO, nil, nil)
|
||||||
p.To.Type = obj.TYPE_ADDR
|
p.To.Type = obj.TYPE_ADDR
|
||||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
||||||
p.To.Offset = dzOff(q)
|
p.To.Offset = dzOff(w)
|
||||||
|
|
||||||
|
if w%16 != 0 {
|
||||||
|
n1.Op = gc.OINDREG
|
||||||
|
n1.Xoffset -= 16 - w%16
|
||||||
|
gins(x86.AMOVUPS, &vec_zero, &n1)
|
||||||
|
}
|
||||||
|
|
||||||
|
restx(&vec_zero, &old_x0)
|
||||||
|
restx(&n1, &oldn1)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
z := ax
|
// NOTE: Must use agen, not igen, so that optimizer sees address
|
||||||
di := n1
|
// being taken. We are not writing on field boundaries.
|
||||||
if w >= 8 && c >= 4 {
|
var n1 gc.Node
|
||||||
di.Op = gc.OINDREG
|
gc.Agenr(nl, &n1, nil)
|
||||||
z.Type = gc.Types[gc.TINT64]
|
n1.Op = gc.OINDREG
|
||||||
di.Type = z.Type
|
|
||||||
p := gins(x86.AMOVQ, &z, &di)
|
clearfat_tail(&n1, w)
|
||||||
p.To.Scale = 1
|
|
||||||
p.To.Offset = c - 8
|
gc.Regfree(&n1)
|
||||||
} else if c >= 4 {
|
}
|
||||||
di.Op = gc.OINDREG
|
|
||||||
z.Type = gc.Types[gc.TINT32]
|
func clearfat_tail(n1 *gc.Node, b int64) {
|
||||||
di.Type = z.Type
|
if b >= 16 {
|
||||||
gins(x86.AMOVL, &z, &di)
|
var vec_zero gc.Node
|
||||||
if c > 4 {
|
gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
|
||||||
p := gins(x86.AMOVL, &z, &di)
|
gins(x86.AXORPS, &vec_zero, &vec_zero)
|
||||||
p.To.Scale = 1
|
|
||||||
p.To.Offset = c - 4
|
for b >= 16 {
|
||||||
|
gins(x86.AMOVUPS, &vec_zero, n1)
|
||||||
|
n1.Xoffset += 16
|
||||||
|
b -= 16
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
for c > 0 {
|
// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
|
||||||
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
|
if b != 0 {
|
||||||
c--
|
n1.Xoffset -= 16 - b
|
||||||
|
gins(x86.AMOVUPS, &vec_zero, n1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gc.Regfree(&vec_zero)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write sequence of MOV 0, off(base) instead of using STOSQ.
|
||||||
|
// The hope is that although the code will be slightly longer,
|
||||||
|
// the MOVs will have no dependencies and pipeline better
|
||||||
|
// than the unrolled STOSQ loop.
|
||||||
|
var z gc.Node
|
||||||
|
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
|
||||||
|
if b >= 8 {
|
||||||
|
n1.Type = z.Type
|
||||||
|
gins(x86.AMOVQ, &z, n1)
|
||||||
|
n1.Xoffset += 8
|
||||||
|
b -= 8
|
||||||
|
|
||||||
|
if b != 0 {
|
||||||
|
n1.Xoffset -= 8 - b
|
||||||
|
gins(x86.AMOVQ, &z, n1)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if b >= 4 {
|
||||||
|
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
|
||||||
|
n1.Type = z.Type
|
||||||
|
gins(x86.AMOVL, &z, n1)
|
||||||
|
n1.Xoffset += 4
|
||||||
|
b -= 4
|
||||||
|
|
||||||
|
if b != 0 {
|
||||||
|
n1.Xoffset -= 4 - b
|
||||||
|
gins(x86.AMOVL, &z, n1)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if b >= 2 {
|
||||||
|
gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
|
||||||
|
n1.Type = z.Type
|
||||||
|
gins(x86.AMOVW, &z, n1)
|
||||||
|
n1.Xoffset += 2
|
||||||
|
b -= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
|
||||||
|
for b > 0 {
|
||||||
|
n1.Type = z.Type
|
||||||
|
gins(x86.AMOVB, &z, n1)
|
||||||
|
n1.Xoffset++
|
||||||
|
b--
|
||||||
}
|
}
|
||||||
|
|
||||||
restx(&n1, &oldn1)
|
|
||||||
restx(&ax, &oldax)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called after regopt and peep have run.
|
// Called after regopt and peep have run.
|
||||||
|
|
|
||||||
|
|
@ -136,6 +136,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
||||||
x86.AMOVL: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
|
x86.AMOVL: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||||
x86.AMOVQ: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
|
x86.AMOVQ: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||||
x86.AMOVW: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
|
x86.AMOVW: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||||
|
x86.AMOVUPS: {Flags: gc.LeftRead | gc.RightWrite | gc.Move},
|
||||||
x86.AMOVSB: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
x86.AMOVSB: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||||
x86.AMOVSL: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
x86.AMOVSL: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||||
x86.AMOVSQ: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
x86.AMOVSQ: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||||
|
|
@ -248,6 +249,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
||||||
x86.AXORL: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
|
x86.AXORL: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||||
x86.AXORQ: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
|
x86.AXORQ: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||||
x86.AXORW: {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
|
x86.AXORW: {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||||
|
x86.AXORPS: {Flags: gc.LeftRead | RightRdwr},
|
||||||
}
|
}
|
||||||
|
|
||||||
func progflags(p *obj.Prog) uint32 {
|
func progflags(p *obj.Prog) uint32 {
|
||||||
|
|
|
||||||
|
|
@ -467,30 +467,18 @@ hard:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
|
||||||
switch {
|
|
||||||
case n == nil:
|
|
||||||
return
|
|
||||||
case gc.Isconst(n, gc.CTINT):
|
|
||||||
return n.Int(), true
|
|
||||||
case gc.Isconst(n, gc.CTBOOL):
|
|
||||||
return int64(obj.Bool2int(n.Bool())), true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// gins is called by the front end.
|
// gins is called by the front end.
|
||||||
// It synthesizes some multiple-instruction sequences
|
// It synthesizes some multiple-instruction sequences
|
||||||
// so the front end can stay simpler.
|
// so the front end can stay simpler.
|
||||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||||
if as >= obj.A_ARCHSPECIFIC {
|
if as >= obj.A_ARCHSPECIFIC {
|
||||||
if x, ok := intLiteral(f); ok {
|
if x, ok := f.IntLiteral(); ok {
|
||||||
ginscon(as, x, t)
|
ginscon(as, x, t)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if as == arm64.ACMP {
|
if as == arm64.ACMP {
|
||||||
if x, ok := intLiteral(t); ok {
|
if x, ok := t.IntLiteral(); ok {
|
||||||
ginscon2(as, f, x)
|
ginscon2(as, f, x)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1272,7 +1272,7 @@ func (z *Float) usub(x, y *Float) {
|
||||||
ex = ey
|
ex = ey
|
||||||
}
|
}
|
||||||
|
|
||||||
// operands may have cancelled each other out
|
// operands may have canceled each other out
|
||||||
if len(z.mant) == 0 {
|
if len(z.mant) == 0 {
|
||||||
z.acc = Exact
|
z.acc = Exact
|
||||||
z.form = zero
|
z.form = zero
|
||||||
|
|
|
||||||
|
|
@ -698,7 +698,9 @@ func TestGcd(t *testing.T) {
|
||||||
testGcd(t, d, x, y, a, b)
|
testGcd(t, d, x, y, a, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
quick.Check(checkGcd, nil)
|
if err := quick.Check(checkGcd, nil); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var primes = []string{
|
var primes = []string{
|
||||||
|
|
|
||||||
|
|
@ -810,10 +810,7 @@ func cgen_wbptr(n, res *Node) {
|
||||||
a := &p.To
|
a := &p.To
|
||||||
a.Type = obj.TYPE_MEM
|
a.Type = obj.TYPE_MEM
|
||||||
a.Reg = int16(Thearch.REGSP)
|
a.Reg = int16(Thearch.REGSP)
|
||||||
a.Offset = 0
|
a.Offset = Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
a.Offset += int64(Widthptr)
|
|
||||||
}
|
|
||||||
p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
|
p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
|
||||||
p2.To = p.To
|
p2.To = p.To
|
||||||
p2.To.Offset += int64(Widthptr)
|
p2.To.Offset += int64(Widthptr)
|
||||||
|
|
@ -849,10 +846,7 @@ func cgen_wbfat(n, res *Node) {
|
||||||
a := &p.To
|
a := &p.To
|
||||||
a.Type = obj.TYPE_MEM
|
a.Type = obj.TYPE_MEM
|
||||||
a.Reg = int16(Thearch.REGSP)
|
a.Reg = int16(Thearch.REGSP)
|
||||||
a.Offset = 0
|
a.Offset = Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
a.Offset += int64(Widthptr)
|
|
||||||
}
|
|
||||||
if needType {
|
if needType {
|
||||||
a.Offset += int64(Widthptr)
|
a.Offset += int64(Widthptr)
|
||||||
}
|
}
|
||||||
|
|
@ -1686,10 +1680,7 @@ func Igen(n *Node, a *Node, res *Node) {
|
||||||
a.Op = OINDREG
|
a.Op = OINDREG
|
||||||
a.Reg = int16(Thearch.REGSP)
|
a.Reg = int16(Thearch.REGSP)
|
||||||
a.Addable = true
|
a.Addable = true
|
||||||
a.Xoffset = fp.Width
|
a.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
a.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
a.Type = n.Type
|
a.Type = n.Type
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
@ -2219,11 +2210,7 @@ func stkof(n *Node) int64 {
|
||||||
var flist Iter
|
var flist Iter
|
||||||
t = Structfirst(&flist, Getoutarg(t))
|
t = Structfirst(&flist, Getoutarg(t))
|
||||||
if t != nil {
|
if t != nil {
|
||||||
w := t.Width
|
return t.Width + Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
w += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
return w
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2379,17 +2366,11 @@ func Ginscall(f *Node, proc int) {
|
||||||
// size of arguments at 0(SP)
|
// size of arguments at 0(SP)
|
||||||
stk.Op = OINDREG
|
stk.Op = OINDREG
|
||||||
stk.Reg = int16(Thearch.REGSP)
|
stk.Reg = int16(Thearch.REGSP)
|
||||||
stk.Xoffset = 0
|
stk.Xoffset = Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
|
Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
|
||||||
|
|
||||||
// FuncVal* at 8(SP)
|
// FuncVal* at 8(SP)
|
||||||
stk.Xoffset = int64(Widthptr)
|
stk.Xoffset = int64(Widthptr) + Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
|
|
||||||
var reg Node
|
var reg Node
|
||||||
Nodreg(®, Types[Tptr], Thearch.REGCALLX2)
|
Nodreg(®, Types[Tptr], Thearch.REGCALLX2)
|
||||||
|
|
@ -2447,10 +2428,7 @@ func cgen_callinter(n *Node, res *Node, proc int) {
|
||||||
|
|
||||||
var nodsp Node
|
var nodsp Node
|
||||||
Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
|
Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
|
||||||
nodsp.Xoffset = 0
|
nodsp.Xoffset = Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
nodsp.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
if proc != 0 {
|
if proc != 0 {
|
||||||
nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
|
nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
|
||||||
}
|
}
|
||||||
|
|
@ -2541,11 +2519,6 @@ func cgen_call(n *Node, proc int) {
|
||||||
Ginscall(n.Left, proc)
|
Ginscall(n.Left, proc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func HasLinkRegister() bool {
|
|
||||||
c := Ctxt.Arch.Thechar
|
|
||||||
return c != '6' && c != '8'
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call to n has already been generated.
|
* call to n has already been generated.
|
||||||
* generate:
|
* generate:
|
||||||
|
|
@ -2568,10 +2541,7 @@ func cgen_callret(n *Node, res *Node) {
|
||||||
nod.Reg = int16(Thearch.REGSP)
|
nod.Reg = int16(Thearch.REGSP)
|
||||||
nod.Addable = true
|
nod.Addable = true
|
||||||
|
|
||||||
nod.Xoffset = fp.Width
|
nod.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
nod.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
nod.Type = fp.Type
|
nod.Type = fp.Type
|
||||||
Cgen_as(res, &nod)
|
Cgen_as(res, &nod)
|
||||||
}
|
}
|
||||||
|
|
@ -2597,10 +2567,7 @@ func cgen_aret(n *Node, res *Node) {
|
||||||
nod1.Op = OINDREG
|
nod1.Op = OINDREG
|
||||||
nod1.Reg = int16(Thearch.REGSP)
|
nod1.Reg = int16(Thearch.REGSP)
|
||||||
nod1.Addable = true
|
nod1.Addable = true
|
||||||
nod1.Xoffset = fp.Width
|
nod1.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
nod1.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
nod1.Type = fp.Type
|
nod1.Type = fp.Type
|
||||||
|
|
||||||
if res.Op != OREGISTER {
|
if res.Op != OREGISTER {
|
||||||
|
|
@ -2858,10 +2825,7 @@ func cgen_append(n, res *Node) {
|
||||||
arg.Op = OINDREG
|
arg.Op = OINDREG
|
||||||
arg.Reg = int16(Thearch.REGSP)
|
arg.Reg = int16(Thearch.REGSP)
|
||||||
arg.Addable = true
|
arg.Addable = true
|
||||||
arg.Xoffset = 0
|
arg.Xoffset = Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
arg.Xoffset = int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
arg.Type = Ptrto(Types[TUINT8])
|
arg.Type = Ptrto(Types[TUINT8])
|
||||||
Cgen(typename(res.Type), &arg)
|
Cgen(typename(res.Type), &arg)
|
||||||
arg.Xoffset += int64(Widthptr)
|
arg.Xoffset += int64(Widthptr)
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,19 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IntLiteral returns the Node's literal value as an interger.
|
||||||
|
func (n *Node) IntLiteral() (x int64, ok bool) {
|
||||||
|
switch {
|
||||||
|
case n == nil:
|
||||||
|
return
|
||||||
|
case Isconst(n, CTINT):
|
||||||
|
return n.Int(), true
|
||||||
|
case Isconst(n, CTBOOL):
|
||||||
|
return int64(obj.Bool2int(n.Bool())), true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Int returns n as an int.
|
// Int returns n as an int.
|
||||||
// n must be an integer constant.
|
// n must be an integer constant.
|
||||||
func (n *Node) Int() int64 {
|
func (n *Node) Int() int64 {
|
||||||
|
|
@ -434,19 +447,8 @@ func overflow(v Val, t *Type) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !doesoverflow(v, t) {
|
if doesoverflow(v, t) {
|
||||||
return
|
Yyerror("constant %s overflows %v", Vconv(v, 0), t)
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Ctype() {
|
|
||||||
case CTINT, CTRUNE:
|
|
||||||
Yyerror("constant %v overflows %v", v.U.(*Mpint), t)
|
|
||||||
|
|
||||||
case CTFLT:
|
|
||||||
Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
|
|
||||||
|
|
||||||
case CTCPLX:
|
|
||||||
Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -997,37 +999,37 @@ func evconst(n *Node) {
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case OEQ<<16 | CTSTR:
|
case OEQ<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) == 0 {
|
if strlit(nl) == strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case ONE<<16 | CTSTR:
|
case ONE<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) != 0 {
|
if strlit(nl) != strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case OLT<<16 | CTSTR:
|
case OLT<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) < 0 {
|
if strlit(nl) < strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case OLE<<16 | CTSTR:
|
case OLE<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) <= 0 {
|
if strlit(nl) <= strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case OGE<<16 | CTSTR:
|
case OGE<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) >= 0 {
|
if strlit(nl) >= strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
||||||
case OGT<<16 | CTSTR:
|
case OGT<<16 | CTSTR:
|
||||||
if cmpslit(nl, nr) > 0 {
|
if strlit(nl) > strlit(nr) {
|
||||||
goto settrue
|
goto settrue
|
||||||
}
|
}
|
||||||
goto setfalse
|
goto setfalse
|
||||||
|
|
@ -1352,8 +1354,9 @@ func defaultlit2(lp **Node, rp **Node, force int) {
|
||||||
Convlit(rp, Types[TINT])
|
Convlit(rp, Types[TINT])
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmpslit(l, r *Node) int {
|
// strlit returns the value of a literal string Node as a string.
|
||||||
return stringsCompare(l.Val().U.(string), r.Val().U.(string))
|
func strlit(n *Node) string {
|
||||||
|
return n.Val().U.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Smallintconst(n *Node) bool {
|
func Smallintconst(n *Node) bool {
|
||||||
|
|
|
||||||
|
|
@ -79,16 +79,6 @@ func popdcl() {
|
||||||
block = d.Block
|
block = d.Block
|
||||||
}
|
}
|
||||||
|
|
||||||
func poptodcl() {
|
|
||||||
// pop the old marker and push a new one
|
|
||||||
// (cannot reuse the existing one)
|
|
||||||
// because we use the markers to identify blocks
|
|
||||||
// for the goto restriction checks.
|
|
||||||
popdcl()
|
|
||||||
|
|
||||||
markdcl()
|
|
||||||
}
|
|
||||||
|
|
||||||
func markdcl() {
|
func markdcl() {
|
||||||
d := push()
|
d := push()
|
||||||
d.Name = "" // used as a mark in fifo
|
d.Name = "" // used as a mark in fifo
|
||||||
|
|
@ -192,7 +182,7 @@ func declare(n *Node, ctxt uint8) {
|
||||||
|
|
||||||
gen := 0
|
gen := 0
|
||||||
if ctxt == PEXTERN {
|
if ctxt == PEXTERN {
|
||||||
externdcl = list(externdcl, n)
|
externdcl = append(externdcl, n)
|
||||||
if dflag() {
|
if dflag() {
|
||||||
fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), s, n)
|
fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), s, n)
|
||||||
}
|
}
|
||||||
|
|
@ -1509,5 +1499,5 @@ func makefuncsym(s *Sym) {
|
||||||
s1 := funcsym(s)
|
s1 := funcsym(s)
|
||||||
s1.Def = newfuncname(s1)
|
s1.Def = newfuncname(s1)
|
||||||
s1.Def.Func.Shortname = newname(s)
|
s1.Def.Func.Shortname = newname(s)
|
||||||
funcsyms = list(funcsyms, s1.Def)
|
funcsyms = append(funcsyms, s1.Def)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -248,17 +248,6 @@ func satInc8(x int8) int8 {
|
||||||
return x + 1
|
return x + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func satAdd8(x, y int8) int8 {
|
|
||||||
z := x + y
|
|
||||||
if x^y < 0 || x^z >= 0 {
|
|
||||||
return z
|
|
||||||
}
|
|
||||||
if x < 0 {
|
|
||||||
return -128
|
|
||||||
}
|
|
||||||
return 127
|
|
||||||
}
|
|
||||||
|
|
||||||
func min8(a, b int8) int8 {
|
func min8(a, b int8) int8 {
|
||||||
if a < b {
|
if a < b {
|
||||||
return a
|
return a
|
||||||
|
|
@ -385,10 +374,9 @@ func escMax(e, etype uint16) uint16 {
|
||||||
// something whose address is returned -- but that implies stored into the heap,
|
// something whose address is returned -- but that implies stored into the heap,
|
||||||
// hence EscHeap, which means that the details are not currently relevant. )
|
// hence EscHeap, which means that the details are not currently relevant. )
|
||||||
const (
|
const (
|
||||||
bitsPerOutputInTag = 3 // For each output, the number of bits for a tag
|
bitsPerOutputInTag = 3 // For each output, the number of bits for a tag
|
||||||
bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag.
|
bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag.
|
||||||
outputsPerTag = (16 - EscReturnBits) / bitsPerOutputInTag // The number of outputs that can be tagged.
|
maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag.
|
||||||
maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag.
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type EscState struct {
|
type EscState struct {
|
||||||
|
|
@ -662,10 +650,15 @@ func esc(e *EscState, n *Node, up *Node) {
|
||||||
|
|
||||||
n.Left.Sym.Label = nil
|
n.Left.Sym.Label = nil
|
||||||
|
|
||||||
// Everything but fixed array is a dereference.
|
|
||||||
case ORANGE:
|
case ORANGE:
|
||||||
if n.List != nil && n.List.Next != nil {
|
if n.List != nil && n.List.Next != nil {
|
||||||
if Isfixedarray(n.Type) {
|
// Everything but fixed array is a dereference.
|
||||||
|
|
||||||
|
// If fixed array is really the address of fixed array,
|
||||||
|
// it is also a dereference, because it is implicitly
|
||||||
|
// dereferenced (see #12588)
|
||||||
|
if Isfixedarray(n.Type) &&
|
||||||
|
!(Isptr[n.Right.Type.Etype] && Eqtype(n.Right.Type.Type, n.Type)) {
|
||||||
escassign(e, n.List.Next.N, n.Right)
|
escassign(e, n.List.Next.N, n.Right)
|
||||||
} else {
|
} else {
|
||||||
escassignDereference(e, n.List.Next.N, n.Right)
|
escassignDereference(e, n.List.Next.N, n.Right)
|
||||||
|
|
@ -958,6 +951,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
|
||||||
OMAPLIT,
|
OMAPLIT,
|
||||||
OSTRUCTLIT,
|
OSTRUCTLIT,
|
||||||
OPTRLIT,
|
OPTRLIT,
|
||||||
|
ODDDARG,
|
||||||
OCALLPART:
|
OCALLPART:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
@ -1463,8 +1457,9 @@ func esccall(e *EscState, n *Node, up *Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var src *Node
|
||||||
for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
|
for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
|
||||||
src := ll.N
|
src = ll.N
|
||||||
if t.Isddd && !n.Isddd {
|
if t.Isddd && !n.Isddd {
|
||||||
// Introduce ODDDARG node to represent ... allocation.
|
// Introduce ODDDARG node to represent ... allocation.
|
||||||
src = Nod(ODDDARG, nil, nil)
|
src = Nod(ODDDARG, nil, nil)
|
||||||
|
|
@ -1505,17 +1500,17 @@ func esccall(e *EscState, n *Node, up *Node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if src != ll.N {
|
if src != ll.N {
|
||||||
|
// This occurs when function parameter type Isddd and n not Isddd
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
t = t.Down
|
t = t.Down
|
||||||
}
|
}
|
||||||
|
|
||||||
// "..." arguments are untracked
|
|
||||||
for ; ll != nil; ll = ll.Next {
|
for ; ll != nil; ll = ll.Next {
|
||||||
escassign(e, &e.theSink, ll.N)
|
|
||||||
if Debug['m'] > 2 {
|
if Debug['m'] > 2 {
|
||||||
fmt.Printf("%v::esccall:: ... <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
|
fmt.Printf("%v::esccall:: ... <- %v\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
|
||||||
}
|
}
|
||||||
|
escassign(e, src, ll.N) // args to slice
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1701,6 +1696,16 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
case OAPPEND:
|
case OAPPEND:
|
||||||
escwalk(e, level, dst, src.List.N)
|
escwalk(e, level, dst, src.List.N)
|
||||||
|
|
||||||
|
case ODDDARG:
|
||||||
|
if leaks {
|
||||||
|
src.Esc = EscHeap
|
||||||
|
if Debug['m'] != 0 {
|
||||||
|
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// similar to a slice arraylit and its args.
|
||||||
|
level = level.dec()
|
||||||
|
|
||||||
case OARRAYLIT:
|
case OARRAYLIT:
|
||||||
if Isfixedarray(src.Type) {
|
if Isfixedarray(src.Type) {
|
||||||
break
|
break
|
||||||
|
|
@ -1711,8 +1716,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
|
|
||||||
fallthrough
|
fallthrough
|
||||||
|
|
||||||
case ODDDARG,
|
case OMAKECHAN,
|
||||||
OMAKECHAN,
|
|
||||||
OMAKEMAP,
|
OMAKEMAP,
|
||||||
OMAKESLICE,
|
OMAKESLICE,
|
||||||
OARRAYRUNESTR,
|
OARRAYRUNESTR,
|
||||||
|
|
|
||||||
|
|
@ -253,21 +253,12 @@ func dumpexportvar(s *Sym) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// methodbyname sorts types by symbol name.
|
||||||
type methodbyname []*Type
|
type methodbyname []*Type
|
||||||
|
|
||||||
func (x methodbyname) Len() int {
|
func (x methodbyname) Len() int { return len(x) }
|
||||||
return len(x)
|
func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
}
|
func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
|
||||||
|
|
||||||
func (x methodbyname) Swap(i, j int) {
|
|
||||||
x[i], x[j] = x[j], x[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x methodbyname) Less(i, j int) bool {
|
|
||||||
a := x[i]
|
|
||||||
b := x[j]
|
|
||||||
return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpexporttype(t *Type) {
|
func dumpexporttype(t *Type) {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
|
|
@ -289,24 +280,15 @@ func dumpexporttype(t *Type) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
n := 0
|
var m []*Type
|
||||||
for f := t.Method; f != nil; f = f.Down {
|
for f := t.Method; f != nil; f = f.Down {
|
||||||
dumpexporttype(f)
|
dumpexporttype(f)
|
||||||
n++
|
m = append(m, f)
|
||||||
}
|
}
|
||||||
|
sort.Sort(methodbyname(m))
|
||||||
m := make([]*Type, n)
|
|
||||||
i := 0
|
|
||||||
for f := t.Method; f != nil; f = f.Down {
|
|
||||||
m[i] = f
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Sort(methodbyname(m[:n]))
|
|
||||||
|
|
||||||
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
|
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
|
||||||
var f *Type
|
for _, f := range m {
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
f = m[i]
|
|
||||||
if f.Nointerface {
|
if f.Nointerface {
|
||||||
fmt.Fprintf(bout, "\t//go:nointerface\n")
|
fmt.Fprintf(bout, "\t//go:nointerface\n")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1596,8 +1596,6 @@ func Sconv(s *Sym, flag int) string {
|
||||||
|
|
||||||
sf := flag
|
sf := flag
|
||||||
sm := setfmode(&flag)
|
sm := setfmode(&flag)
|
||||||
var r int
|
|
||||||
_ = r
|
|
||||||
str := symfmt(s, flag)
|
str := symfmt(s, flag)
|
||||||
flag = sf
|
flag = sf
|
||||||
fmtmode = sm
|
fmtmode = sm
|
||||||
|
|
@ -1632,8 +1630,6 @@ func Tconv(t *Type, flag int) string {
|
||||||
flag |= obj.FmtUnsigned
|
flag |= obj.FmtUnsigned
|
||||||
}
|
}
|
||||||
|
|
||||||
var r int
|
|
||||||
_ = r
|
|
||||||
str := typefmt(t, flag)
|
str := typefmt(t, flag)
|
||||||
|
|
||||||
if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
|
if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
|
||||||
|
|
@ -1660,8 +1656,6 @@ func Nconv(n *Node, flag int) string {
|
||||||
sf := flag
|
sf := flag
|
||||||
sm := setfmode(&flag)
|
sm := setfmode(&flag)
|
||||||
|
|
||||||
var r int
|
|
||||||
_ = r
|
|
||||||
var str string
|
var str string
|
||||||
switch fmtmode {
|
switch fmtmode {
|
||||||
case FErr, FExp:
|
case FErr, FExp:
|
||||||
|
|
@ -1694,8 +1688,6 @@ func Hconv(l *NodeList, flag int) string {
|
||||||
|
|
||||||
sf := flag
|
sf := flag
|
||||||
sm := setfmode(&flag)
|
sm := setfmode(&flag)
|
||||||
var r int
|
|
||||||
_ = r
|
|
||||||
sep := "; "
|
sep := "; "
|
||||||
if fmtmode == FDbg {
|
if fmtmode == FDbg {
|
||||||
sep = "\n"
|
sep = "\n"
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ type Type struct {
|
||||||
Embedlineno int32 // first use of TFORW as embedded type
|
Embedlineno int32 // first use of TFORW as embedded type
|
||||||
|
|
||||||
// for TFORW, where to copy the eventual value to
|
// for TFORW, where to copy the eventual value to
|
||||||
Copyto *NodeList
|
Copyto []*Node
|
||||||
|
|
||||||
Lastfn *Node // for usefield
|
Lastfn *Node // for usefield
|
||||||
}
|
}
|
||||||
|
|
@ -376,18 +376,17 @@ type Sig struct {
|
||||||
type_ *Type
|
type_ *Type
|
||||||
mtype *Type
|
mtype *Type
|
||||||
offset int32
|
offset int32
|
||||||
link *Sig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Io struct {
|
type Io struct {
|
||||||
infile string
|
infile string
|
||||||
bin *obj.Biobuf
|
bin *obj.Biobuf
|
||||||
nlsemi int
|
cp string // used for content when bin==nil
|
||||||
eofnl int
|
|
||||||
last int
|
last int
|
||||||
peekc int
|
peekc int
|
||||||
peekc1 int // second peekc for ...
|
peekc1 int // second peekc for ...
|
||||||
cp string // used for content when bin==nil
|
nlsemi bool
|
||||||
|
eofnl bool
|
||||||
importsafe bool
|
importsafe bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -584,13 +583,13 @@ var maxfltval [NTYPE]*Mpflt
|
||||||
|
|
||||||
var xtop *NodeList
|
var xtop *NodeList
|
||||||
|
|
||||||
var externdcl *NodeList
|
var externdcl []*Node
|
||||||
|
|
||||||
var exportlist []*Node
|
var exportlist []*Node
|
||||||
|
|
||||||
var importlist []*Node // imported functions and methods with inlinable bodies
|
var importlist []*Node // imported functions and methods with inlinable bodies
|
||||||
|
|
||||||
var funcsyms *NodeList
|
var funcsyms []*Node
|
||||||
|
|
||||||
var dclcontext uint8 // PEXTERN/PAUTO
|
var dclcontext uint8 // PEXTERN/PAUTO
|
||||||
|
|
||||||
|
|
@ -598,7 +597,7 @@ var incannedimport int
|
||||||
|
|
||||||
var statuniqgen int // name generator for static temps
|
var statuniqgen int // name generator for static temps
|
||||||
|
|
||||||
var loophack int
|
var loophack bool
|
||||||
|
|
||||||
var iota_ int32
|
var iota_ int32
|
||||||
|
|
||||||
|
|
@ -630,12 +629,6 @@ var typesw *Node
|
||||||
|
|
||||||
var nblank *Node
|
var nblank *Node
|
||||||
|
|
||||||
var hunk string
|
|
||||||
|
|
||||||
var nhunk int32
|
|
||||||
|
|
||||||
var thunk int32
|
|
||||||
|
|
||||||
var Funcdepth int32
|
var Funcdepth int32
|
||||||
|
|
||||||
var typecheckok bool
|
var typecheckok bool
|
||||||
|
|
|
||||||
|
|
@ -2311,6 +2311,6 @@ func fixlbrace(lbr int) {
|
||||||
// set up for another one now that we're done.
|
// set up for another one now that we're done.
|
||||||
// See comment in lex.C about loophack.
|
// See comment in lex.C about loophack.
|
||||||
if lbr == LBODY {
|
if lbr == LBODY {
|
||||||
loophack = 1
|
loophack = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -87,7 +87,9 @@ func Gbranch(as int, t *Type, likely int) *obj.Prog {
|
||||||
p.To.Val = nil
|
p.To.Val = nil
|
||||||
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
|
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
p.From.Offset = int64(obj.Bool2int(likely > 0))
|
if likely > 0 {
|
||||||
|
p.From.Offset = 1
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if Debug['g'] != 0 {
|
if Debug['g'] != 0 {
|
||||||
|
|
@ -576,9 +578,7 @@ fp:
|
||||||
n.Op = OINDREG
|
n.Op = OINDREG
|
||||||
|
|
||||||
n.Reg = int16(Thearch.REGSP)
|
n.Reg = int16(Thearch.REGSP)
|
||||||
if HasLinkRegister() {
|
n.Xoffset += Ctxt.FixedFrameSize()
|
||||||
n.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
|
||||||
}
|
|
||||||
|
|
||||||
case 1: // input arg
|
case 1: // input arg
|
||||||
n.Class = PPARAM
|
n.Class = PPARAM
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ var yyprev int
|
||||||
|
|
||||||
var yylast int
|
var yylast int
|
||||||
|
|
||||||
var imported_unsafe int
|
var imported_unsafe bool
|
||||||
|
|
||||||
var (
|
var (
|
||||||
goos string
|
goos string
|
||||||
|
|
@ -60,26 +60,6 @@ var debugtab = []struct {
|
||||||
{"wb", &Debug_wb}, // print information about write barriers
|
{"wb", &Debug_wb}, // print information about write barriers
|
||||||
}
|
}
|
||||||
|
|
||||||
// Our own isdigit, isspace, isalpha, isalnum that take care
|
|
||||||
// of EOF and other out of range arguments.
|
|
||||||
func yy_isdigit(c int) bool {
|
|
||||||
return c >= 0 && c <= 0xFF && isdigit(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func yy_isspace(c int) bool {
|
|
||||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
|
||||||
}
|
|
||||||
|
|
||||||
func yy_isalpha(c int) bool {
|
|
||||||
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
|
|
||||||
}
|
|
||||||
|
|
||||||
func yy_isalnum(c int) bool {
|
|
||||||
return c >= 0 && c <= 0xFF && isalnum(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disallow use of isdigit etc.
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
EOF = -1
|
EOF = -1
|
||||||
)
|
)
|
||||||
|
|
@ -334,8 +314,8 @@ func Main() {
|
||||||
|
|
||||||
curio.peekc = 0
|
curio.peekc = 0
|
||||||
curio.peekc1 = 0
|
curio.peekc1 = 0
|
||||||
curio.nlsemi = 0
|
curio.nlsemi = false
|
||||||
curio.eofnl = 0
|
curio.eofnl = false
|
||||||
curio.last = 0
|
curio.last = 0
|
||||||
|
|
||||||
// Skip initial BOM if present.
|
// Skip initial BOM if present.
|
||||||
|
|
@ -346,7 +326,7 @@ func Main() {
|
||||||
block = 1
|
block = 1
|
||||||
iota_ = -1000000
|
iota_ = -1000000
|
||||||
|
|
||||||
imported_unsafe = 0
|
imported_unsafe = false
|
||||||
|
|
||||||
yyparse()
|
yyparse()
|
||||||
if nsyntaxerrors != 0 {
|
if nsyntaxerrors != 0 {
|
||||||
|
|
@ -484,9 +464,9 @@ func Main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 9: Check external declarations.
|
// Phase 9: Check external declarations.
|
||||||
for l := externdcl; l != nil; l = l.Next {
|
for i, n := range externdcl {
|
||||||
if l.N.Op == ONAME {
|
if n.Op == ONAME {
|
||||||
typecheck(&l.N, Erv)
|
typecheck(&externdcl[i], Erv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -587,7 +567,7 @@ func addidir(dir string) {
|
||||||
// is this path a local name? begins with ./ or ../ or /
|
// is this path a local name? begins with ./ or ../ or /
|
||||||
func islocalname(name string) bool {
|
func islocalname(name string) bool {
|
||||||
return strings.HasPrefix(name, "/") ||
|
return strings.HasPrefix(name, "/") ||
|
||||||
Ctxt.Windows != 0 && len(name) >= 3 && yy_isalpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
|
Ctxt.Windows != 0 && len(name) >= 3 && isAlpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
|
||||||
strings.HasPrefix(name, "./") || name == "." ||
|
strings.HasPrefix(name, "./") || name == "." ||
|
||||||
strings.HasPrefix(name, "../") || name == ".."
|
strings.HasPrefix(name, "../") || name == ".."
|
||||||
}
|
}
|
||||||
|
|
@ -615,9 +595,7 @@ func findpkg(name string) (file string, ok bool) {
|
||||||
// local imports should be canonicalized already.
|
// local imports should be canonicalized already.
|
||||||
// don't want to see "encoding/../encoding/base64"
|
// don't want to see "encoding/../encoding/base64"
|
||||||
// as different from "encoding/base64".
|
// as different from "encoding/base64".
|
||||||
var q string
|
if q := path.Clean(name); q != name {
|
||||||
_ = q
|
|
||||||
if path.Clean(name) != name {
|
|
||||||
Yyerror("non-canonical import path %q (should be %q)", name, q)
|
Yyerror("non-canonical import path %q (should be %q)", name, q)
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
@ -702,7 +680,7 @@ func importfile(f *Val, line int) {
|
||||||
|
|
||||||
importpkg = mkpkg(f.U.(string))
|
importpkg = mkpkg(f.U.(string))
|
||||||
cannedimports("unsafe.o", unsafeimport)
|
cannedimports("unsafe.o", unsafeimport)
|
||||||
imported_unsafe = 1
|
imported_unsafe = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -803,19 +781,18 @@ func importfile(f *Val, line int) {
|
||||||
curio.peekc = 0
|
curio.peekc = 0
|
||||||
curio.peekc1 = 0
|
curio.peekc1 = 0
|
||||||
curio.infile = file
|
curio.infile = file
|
||||||
curio.nlsemi = 0
|
curio.nlsemi = false
|
||||||
typecheckok = true
|
typecheckok = true
|
||||||
|
|
||||||
var c int32
|
|
||||||
for {
|
for {
|
||||||
c = int32(getc())
|
c := getc()
|
||||||
if c == EOF {
|
if c == EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if c != '$' {
|
if c != '$' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c = int32(getc())
|
c = getc()
|
||||||
if c == EOF {
|
if c == EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
@ -854,17 +831,44 @@ func cannedimports(file string, cp string) {
|
||||||
curio.peekc1 = 0
|
curio.peekc1 = 0
|
||||||
curio.infile = file
|
curio.infile = file
|
||||||
curio.cp = cp
|
curio.cp = cp
|
||||||
curio.nlsemi = 0
|
curio.nlsemi = false
|
||||||
curio.importsafe = false
|
curio.importsafe = false
|
||||||
|
|
||||||
typecheckok = true
|
typecheckok = true
|
||||||
incannedimport = 1
|
incannedimport = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSpace(c int) bool {
|
||||||
|
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAlpha(c int) bool {
|
||||||
|
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDigit(c int) bool {
|
||||||
|
return '0' <= c && c <= '9'
|
||||||
|
}
|
||||||
|
func isAlnum(c int) bool {
|
||||||
|
return isAlpha(c) || isDigit(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func plan9quote(s string) string {
|
||||||
|
if s == "" {
|
||||||
|
return "''"
|
||||||
|
}
|
||||||
|
for _, c := range s {
|
||||||
|
if c <= ' ' || c == '\'' {
|
||||||
|
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
func isfrog(c int) bool {
|
func isfrog(c int) bool {
|
||||||
// complain about possibly invisible control characters
|
// complain about possibly invisible control characters
|
||||||
if c < ' ' {
|
if c < ' ' {
|
||||||
return !yy_isspace(c) // exclude good white space
|
return !isSpace(c) // exclude good white space
|
||||||
}
|
}
|
||||||
|
|
||||||
if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
|
if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
|
||||||
|
|
@ -874,8 +878,8 @@ func isfrog(c int) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Loophack struct {
|
type Loophack struct {
|
||||||
v int
|
|
||||||
next *Loophack
|
next *Loophack
|
||||||
|
v bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _yylex_lstk *Loophack
|
var _yylex_lstk *Loophack
|
||||||
|
|
@ -885,7 +889,6 @@ func _yylex(yylval *yySymType) int32 {
|
||||||
var escflag int
|
var escflag int
|
||||||
var v int64
|
var v int64
|
||||||
var cp *bytes.Buffer
|
var cp *bytes.Buffer
|
||||||
var rune_ uint
|
|
||||||
var s *Sym
|
var s *Sym
|
||||||
var h *Loophack
|
var h *Loophack
|
||||||
var str string
|
var str string
|
||||||
|
|
@ -894,8 +897,8 @@ func _yylex(yylval *yySymType) int32 {
|
||||||
|
|
||||||
l0:
|
l0:
|
||||||
c := getc()
|
c := getc()
|
||||||
if yy_isspace(c) {
|
if isSpace(c) {
|
||||||
if c == '\n' && curio.nlsemi != 0 {
|
if c == '\n' && curio.nlsemi {
|
||||||
ungetc(c)
|
ungetc(c)
|
||||||
if Debug['x'] != 0 {
|
if Debug['x'] != 0 {
|
||||||
fmt.Printf("lex: implicit semi\n")
|
fmt.Printf("lex: implicit semi\n")
|
||||||
|
|
@ -916,20 +919,20 @@ l0:
|
||||||
goto talph
|
goto talph
|
||||||
}
|
}
|
||||||
|
|
||||||
if yy_isalpha(c) {
|
if isAlpha(c) {
|
||||||
cp = &lexbuf
|
cp = &lexbuf
|
||||||
cp.Reset()
|
cp.Reset()
|
||||||
goto talph
|
goto talph
|
||||||
}
|
}
|
||||||
|
|
||||||
if yy_isdigit(c) {
|
if isDigit(c) {
|
||||||
cp = &lexbuf
|
cp = &lexbuf
|
||||||
cp.Reset()
|
cp.Reset()
|
||||||
if c != '0' {
|
if c != '0' {
|
||||||
for {
|
for {
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
c = getc()
|
c = getc()
|
||||||
if yy_isdigit(c) {
|
if isDigit(c) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c == '.' {
|
if c == '.' {
|
||||||
|
|
@ -951,7 +954,7 @@ l0:
|
||||||
for {
|
for {
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
c = getc()
|
c = getc()
|
||||||
if yy_isdigit(c) {
|
if isDigit(c) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c >= 'a' && c <= 'f' {
|
if c >= 'a' && c <= 'f' {
|
||||||
|
|
@ -976,7 +979,7 @@ l0:
|
||||||
|
|
||||||
c1 = 0
|
c1 = 0
|
||||||
for {
|
for {
|
||||||
if !yy_isdigit(c) {
|
if !isDigit(c) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if c < '0' || c > '7' {
|
if c < '0' || c > '7' {
|
||||||
|
|
@ -1014,7 +1017,7 @@ l0:
|
||||||
|
|
||||||
case '.':
|
case '.':
|
||||||
c1 = getc()
|
c1 = getc()
|
||||||
if yy_isdigit(c1) {
|
if isDigit(c1) {
|
||||||
cp = &lexbuf
|
cp = &lexbuf
|
||||||
cp.Reset()
|
cp.Reset()
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
|
|
@ -1048,8 +1051,7 @@ l0:
|
||||||
if v < utf8.RuneSelf || escflag != 0 {
|
if v < utf8.RuneSelf || escflag != 0 {
|
||||||
cp.WriteByte(byte(v))
|
cp.WriteByte(byte(v))
|
||||||
} else {
|
} else {
|
||||||
rune_ = uint(v)
|
cp.WriteRune(rune(v))
|
||||||
cp.WriteRune(rune(rune_))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1106,23 +1108,23 @@ l0:
|
||||||
case '/':
|
case '/':
|
||||||
c1 = getc()
|
c1 = getc()
|
||||||
if c1 == '*' {
|
if c1 == '*' {
|
||||||
nl := 0
|
nl := false
|
||||||
for {
|
for {
|
||||||
c = int(getr())
|
c = int(getr())
|
||||||
if c == '\n' {
|
if c == '\n' {
|
||||||
nl = 1
|
nl = true
|
||||||
}
|
}
|
||||||
for c == '*' {
|
for c == '*' {
|
||||||
c = int(getr())
|
c = int(getr())
|
||||||
if c == '/' {
|
if c == '/' {
|
||||||
if nl != 0 {
|
if nl {
|
||||||
ungetc('\n')
|
ungetc('\n')
|
||||||
}
|
}
|
||||||
goto l0
|
goto l0
|
||||||
}
|
}
|
||||||
|
|
||||||
if c == '\n' {
|
if c == '\n' {
|
||||||
nl = 1
|
nl = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1308,15 +1310,15 @@ l0:
|
||||||
*
|
*
|
||||||
* when we see the keyword, the next
|
* when we see the keyword, the next
|
||||||
* non-parenthesized '{' becomes an LBODY.
|
* non-parenthesized '{' becomes an LBODY.
|
||||||
* loophack is normally 0.
|
* loophack is normally false.
|
||||||
* a keyword makes it go up to 1.
|
* a keyword sets it to true.
|
||||||
* parens push loophack onto a stack and go back to 0.
|
* parens push loophack onto a stack and go back to false.
|
||||||
* a '{' with loophack == 1 becomes LBODY and disables loophack.
|
* a '{' with loophack == true becomes LBODY and disables loophack.
|
||||||
*
|
*
|
||||||
* i said it was clumsy.
|
* i said it was clumsy.
|
||||||
*/
|
*/
|
||||||
case '(', '[':
|
case '(', '[':
|
||||||
if loophack != 0 || _yylex_lstk != nil {
|
if loophack || _yylex_lstk != nil {
|
||||||
h = new(Loophack)
|
h = new(Loophack)
|
||||||
if h == nil {
|
if h == nil {
|
||||||
Flusherrors()
|
Flusherrors()
|
||||||
|
|
@ -1327,7 +1329,7 @@ l0:
|
||||||
h.v = loophack
|
h.v = loophack
|
||||||
h.next = _yylex_lstk
|
h.next = _yylex_lstk
|
||||||
_yylex_lstk = h
|
_yylex_lstk = h
|
||||||
loophack = 0
|
loophack = false
|
||||||
}
|
}
|
||||||
|
|
||||||
goto lx
|
goto lx
|
||||||
|
|
@ -1342,11 +1344,11 @@ l0:
|
||||||
goto lx
|
goto lx
|
||||||
|
|
||||||
case '{':
|
case '{':
|
||||||
if loophack == 1 {
|
if loophack {
|
||||||
if Debug['x'] != 0 {
|
if Debug['x'] != 0 {
|
||||||
fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
|
fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
|
||||||
}
|
}
|
||||||
loophack = 0
|
loophack = false
|
||||||
return LBODY
|
return LBODY
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1395,14 +1397,14 @@ talph:
|
||||||
for {
|
for {
|
||||||
if c >= utf8.RuneSelf {
|
if c >= utf8.RuneSelf {
|
||||||
ungetc(c)
|
ungetc(c)
|
||||||
rune_ = uint(getr())
|
r := rune(getr())
|
||||||
|
|
||||||
// 0xb7 · is used for internal names
|
// 0xb7 · is used for internal names
|
||||||
if !unicode.IsLetter(rune(rune_)) && !unicode.IsDigit(rune(rune_)) && (importpkg == nil || rune_ != 0xb7) {
|
if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (importpkg == nil || r != 0xb7) {
|
||||||
Yyerror("invalid identifier character U+%04x", rune_)
|
Yyerror("invalid identifier character U+%04x", r)
|
||||||
}
|
}
|
||||||
cp.WriteRune(rune(rune_))
|
cp.WriteRune(r)
|
||||||
} else if !yy_isalnum(c) && c != '_' {
|
} else if !isAlnum(c) && c != '_' {
|
||||||
break
|
break
|
||||||
} else {
|
} else {
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
|
|
@ -1419,7 +1421,7 @@ talph:
|
||||||
goto l0
|
goto l0
|
||||||
|
|
||||||
case LFOR, LIF, LSWITCH, LSELECT:
|
case LFOR, LIF, LSWITCH, LSELECT:
|
||||||
loophack = 1 // see comment about loophack above
|
loophack = true // see comment about loophack above
|
||||||
}
|
}
|
||||||
|
|
||||||
if Debug['x'] != 0 {
|
if Debug['x'] != 0 {
|
||||||
|
|
@ -1450,7 +1452,7 @@ casedot:
|
||||||
for {
|
for {
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
c = getc()
|
c = getc()
|
||||||
if !yy_isdigit(c) {
|
if !isDigit(c) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1475,10 +1477,10 @@ caseep:
|
||||||
c = getc()
|
c = getc()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !yy_isdigit(c) {
|
if !isDigit(c) {
|
||||||
Yyerror("malformed floating point constant exponent")
|
Yyerror("malformed floating point constant exponent")
|
||||||
}
|
}
|
||||||
for yy_isdigit(c) {
|
for isDigit(c) {
|
||||||
cp.WriteByte(byte(c))
|
cp.WriteByte(byte(c))
|
||||||
c = getc()
|
c = getc()
|
||||||
}
|
}
|
||||||
|
|
@ -1548,7 +1550,7 @@ func internString(b []byte) string {
|
||||||
|
|
||||||
func more(pp *string) bool {
|
func more(pp *string) bool {
|
||||||
p := *pp
|
p := *pp
|
||||||
for p != "" && yy_isspace(int(p[0])) {
|
for p != "" && isSpace(int(p[0])) {
|
||||||
p = p[1:]
|
p = p[1:]
|
||||||
}
|
}
|
||||||
*pp = p
|
*pp = p
|
||||||
|
|
@ -1594,7 +1596,7 @@ func getlinepragma() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
if verb == "go:linkname" {
|
if verb == "go:linkname" {
|
||||||
if imported_unsafe == 0 {
|
if !imported_unsafe {
|
||||||
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
|
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
|
||||||
}
|
}
|
||||||
f := strings.Fields(cmd)
|
f := strings.Fields(cmd)
|
||||||
|
|
@ -1711,7 +1713,7 @@ func getimpsym(pp *string) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
i := 0
|
i := 0
|
||||||
for i < len(p) && !yy_isspace(int(p[i])) && p[i] != '"' {
|
for i < len(p) && !isSpace(int(p[i])) && p[i] != '"' {
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
sym := p[:i]
|
sym := p[:i]
|
||||||
|
|
@ -1746,9 +1748,7 @@ func pragcgo(text string) {
|
||||||
verb := text[3:] // skip "go:"
|
verb := text[3:] // skip "go:"
|
||||||
|
|
||||||
if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
|
if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
|
||||||
var ok bool
|
p, ok := getquoted(&q)
|
||||||
var p string
|
|
||||||
p, ok = getquoted(&q)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
|
Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
|
||||||
return
|
return
|
||||||
|
|
@ -1830,9 +1830,7 @@ func pragcgo(text string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if verb == "cgo_ldflag" {
|
if verb == "cgo_ldflag" {
|
||||||
var ok bool
|
p, ok := getquoted(&q)
|
||||||
var p string
|
|
||||||
p, ok = getquoted(&q)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
Yyerror("usage: //go:cgo_ldflag \"arg\"")
|
Yyerror("usage: //go:cgo_ldflag \"arg\"")
|
||||||
return
|
return
|
||||||
|
|
@ -1866,7 +1864,7 @@ func yyparse() {
|
||||||
func yylex(yylval *yySymType) int32 {
|
func yylex(yylval *yySymType) int32 {
|
||||||
lx := int(_yylex(yylval))
|
lx := int(_yylex(yylval))
|
||||||
|
|
||||||
if curio.nlsemi != 0 && lx == EOF {
|
if curio.nlsemi && lx == EOF {
|
||||||
// Treat EOF as "end of line" for the purposes
|
// Treat EOF as "end of line" for the purposes
|
||||||
// of inserting a semicolon.
|
// of inserting a semicolon.
|
||||||
lx = ';'
|
lx = ';'
|
||||||
|
|
@ -1884,10 +1882,10 @@ func yylex(yylval *yySymType) int32 {
|
||||||
')',
|
')',
|
||||||
'}',
|
'}',
|
||||||
']':
|
']':
|
||||||
curio.nlsemi = 1
|
curio.nlsemi = true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
curio.nlsemi = 0
|
curio.nlsemi = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track last two tokens returned by yylex.
|
// Track last two tokens returned by yylex.
|
||||||
|
|
@ -1942,10 +1940,10 @@ check:
|
||||||
|
|
||||||
// insert \n at EOF
|
// insert \n at EOF
|
||||||
case EOF:
|
case EOF:
|
||||||
if curio.eofnl != 0 || curio.last == '\n' {
|
if curio.eofnl || curio.last == '\n' {
|
||||||
return EOF
|
return EOF
|
||||||
}
|
}
|
||||||
curio.eofnl = 1
|
curio.eofnl = true
|
||||||
c = '\n'
|
c = '\n'
|
||||||
fallthrough
|
fallthrough
|
||||||
|
|
||||||
|
|
@ -2189,32 +2187,22 @@ var syms = []struct {
|
||||||
{"insofaras", LIGNORE, Txxx, OXXX},
|
{"insofaras", LIGNORE, Txxx, OXXX},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lexinit initializes known symbols and the basic types.
|
||||||
func lexinit() {
|
func lexinit() {
|
||||||
var lex int
|
for _, s := range syms {
|
||||||
var s *Sym
|
lex := s.lexical
|
||||||
var s1 *Sym
|
s1 := Lookup(s.name)
|
||||||
var t *Type
|
s1.Lexical = uint16(lex)
|
||||||
var etype int
|
|
||||||
|
|
||||||
/*
|
if etype := s.etype; etype != Txxx {
|
||||||
* initialize basic types array
|
|
||||||
* initialize known symbols
|
|
||||||
*/
|
|
||||||
for i := 0; i < len(syms); i++ {
|
|
||||||
lex = syms[i].lexical
|
|
||||||
s = Lookup(syms[i].name)
|
|
||||||
s.Lexical = uint16(lex)
|
|
||||||
|
|
||||||
etype = syms[i].etype
|
|
||||||
if etype != Txxx {
|
|
||||||
if etype < 0 || etype >= len(Types) {
|
if etype < 0 || etype >= len(Types) {
|
||||||
Fatalf("lexinit: %s bad etype", s.Name)
|
Fatalf("lexinit: %s bad etype", s.name)
|
||||||
}
|
}
|
||||||
s1 = Pkglookup(syms[i].name, builtinpkg)
|
s2 := Pkglookup(s.name, builtinpkg)
|
||||||
t = Types[etype]
|
t := Types[etype]
|
||||||
if t == nil {
|
if t == nil {
|
||||||
t = typ(etype)
|
t = typ(etype)
|
||||||
t.Sym = s1
|
t.Sym = s2
|
||||||
|
|
||||||
if etype != TANY && etype != TSTRING {
|
if etype != TANY && etype != TSTRING {
|
||||||
dowidth(t)
|
dowidth(t)
|
||||||
|
|
@ -2222,19 +2210,18 @@ func lexinit() {
|
||||||
Types[etype] = t
|
Types[etype] = t
|
||||||
}
|
}
|
||||||
|
|
||||||
s1.Lexical = LNAME
|
s2.Lexical = LNAME
|
||||||
s1.Def = typenod(t)
|
s2.Def = typenod(t)
|
||||||
s1.Def.Name = new(Name)
|
s2.Def.Name = new(Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
etype = syms[i].op
|
if etype := s.op; etype != OXXX {
|
||||||
if etype != OXXX {
|
s2 := Pkglookup(s.name, builtinpkg)
|
||||||
s1 = Pkglookup(syms[i].name, builtinpkg)
|
s2.Lexical = LNAME
|
||||||
s1.Lexical = LNAME
|
s2.Def = Nod(ONAME, nil, nil)
|
||||||
s1.Def = Nod(ONAME, nil, nil)
|
s2.Def.Sym = s2
|
||||||
s1.Def.Sym = s1
|
s2.Def.Etype = uint8(etype)
|
||||||
s1.Def.Etype = uint8(etype)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2247,7 +2234,7 @@ func lexinit() {
|
||||||
|
|
||||||
idealbool = typ(TBOOL)
|
idealbool = typ(TBOOL)
|
||||||
|
|
||||||
s = Pkglookup("true", builtinpkg)
|
s := Pkglookup("true", builtinpkg)
|
||||||
s.Def = Nodbool(true)
|
s.Def = Nodbool(true)
|
||||||
s.Def.Sym = Lookup("true")
|
s.Def.Sym = Lookup("true")
|
||||||
s.Def.Name = new(Name)
|
s.Def.Name = new(Name)
|
||||||
|
|
@ -2446,136 +2433,121 @@ func lexfini() {
|
||||||
nodfp.Sym = Lookup(".fp")
|
nodfp.Sym = Lookup(".fp")
|
||||||
}
|
}
|
||||||
|
|
||||||
var lexn = []struct {
|
var lexn = map[int]string{
|
||||||
lex int
|
LANDAND: "ANDAND",
|
||||||
name string
|
LANDNOT: "ANDNOT",
|
||||||
}{
|
LASOP: "ASOP",
|
||||||
{LANDAND, "ANDAND"},
|
LBREAK: "BREAK",
|
||||||
{LANDNOT, "ANDNOT"},
|
LCASE: "CASE",
|
||||||
{LASOP, "ASOP"},
|
LCHAN: "CHAN",
|
||||||
{LBREAK, "BREAK"},
|
LCOLAS: "COLAS",
|
||||||
{LCASE, "CASE"},
|
LCOMM: "<-",
|
||||||
{LCHAN, "CHAN"},
|
LCONST: "CONST",
|
||||||
{LCOLAS, "COLAS"},
|
LCONTINUE: "CONTINUE",
|
||||||
{LCOMM, "<-"},
|
LDDD: "...",
|
||||||
{LCONST, "CONST"},
|
LDEC: "DEC",
|
||||||
{LCONTINUE, "CONTINUE"},
|
LDEFAULT: "DEFAULT",
|
||||||
{LDDD, "..."},
|
LDEFER: "DEFER",
|
||||||
{LDEC, "DEC"},
|
LELSE: "ELSE",
|
||||||
{LDEFAULT, "DEFAULT"},
|
LEQ: "EQ",
|
||||||
{LDEFER, "DEFER"},
|
LFALL: "FALL",
|
||||||
{LELSE, "ELSE"},
|
LFOR: "FOR",
|
||||||
{LEQ, "EQ"},
|
LFUNC: "FUNC",
|
||||||
{LFALL, "FALL"},
|
LGE: "GE",
|
||||||
{LFOR, "FOR"},
|
LGO: "GO",
|
||||||
{LFUNC, "FUNC"},
|
LGOTO: "GOTO",
|
||||||
{LGE, "GE"},
|
LGT: "GT",
|
||||||
{LGO, "GO"},
|
LIF: "IF",
|
||||||
{LGOTO, "GOTO"},
|
LIMPORT: "IMPORT",
|
||||||
{LGT, "GT"},
|
LINC: "INC",
|
||||||
{LIF, "IF"},
|
LINTERFACE: "INTERFACE",
|
||||||
{LIMPORT, "IMPORT"},
|
LLE: "LE",
|
||||||
{LINC, "INC"},
|
LLITERAL: "LITERAL",
|
||||||
{LINTERFACE, "INTERFACE"},
|
LLSH: "LSH",
|
||||||
{LLE, "LE"},
|
LLT: "LT",
|
||||||
{LLITERAL, "LITERAL"},
|
LMAP: "MAP",
|
||||||
{LLSH, "LSH"},
|
LNAME: "NAME",
|
||||||
{LLT, "LT"},
|
LNE: "NE",
|
||||||
{LMAP, "MAP"},
|
LOROR: "OROR",
|
||||||
{LNAME, "NAME"},
|
LPACKAGE: "PACKAGE",
|
||||||
{LNE, "NE"},
|
LRANGE: "RANGE",
|
||||||
{LOROR, "OROR"},
|
LRETURN: "RETURN",
|
||||||
{LPACKAGE, "PACKAGE"},
|
LRSH: "RSH",
|
||||||
{LRANGE, "RANGE"},
|
LSELECT: "SELECT",
|
||||||
{LRETURN, "RETURN"},
|
LSTRUCT: "STRUCT",
|
||||||
{LRSH, "RSH"},
|
LSWITCH: "SWITCH",
|
||||||
{LSELECT, "SELECT"},
|
LTYPE: "TYPE",
|
||||||
{LSTRUCT, "STRUCT"},
|
LVAR: "VAR",
|
||||||
{LSWITCH, "SWITCH"},
|
|
||||||
{LTYPE, "TYPE"},
|
|
||||||
{LVAR, "VAR"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func lexname(lex int) string {
|
func lexname(lex int) string {
|
||||||
for i := 0; i < len(lexn); i++ {
|
if s, ok := lexn[lex]; ok {
|
||||||
if lexn[i].lex == lex {
|
return s
|
||||||
return lexn[i].name
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("LEX-%d", lex)
|
return fmt.Sprintf("LEX-%d", lex)
|
||||||
}
|
}
|
||||||
|
|
||||||
var yytfix = []struct {
|
var yytfix = map[string]string{
|
||||||
have string
|
"$end": "EOF",
|
||||||
want string
|
"LASOP": "op=",
|
||||||
}{
|
"LBREAK": "break",
|
||||||
{"$end", "EOF"},
|
"LCASE": "case",
|
||||||
{"LASOP", "op="},
|
"LCHAN": "chan",
|
||||||
{"LBREAK", "break"},
|
"LCOLAS": ":=",
|
||||||
{"LCASE", "case"},
|
"LCONST": "const",
|
||||||
{"LCHAN", "chan"},
|
"LCONTINUE": "continue",
|
||||||
{"LCOLAS", ":="},
|
"LDDD": "...",
|
||||||
{"LCONST", "const"},
|
"LDEFAULT": "default",
|
||||||
{"LCONTINUE", "continue"},
|
"LDEFER": "defer",
|
||||||
{"LDDD", "..."},
|
"LELSE": "else",
|
||||||
{"LDEFAULT", "default"},
|
"LFALL": "fallthrough",
|
||||||
{"LDEFER", "defer"},
|
"LFOR": "for",
|
||||||
{"LELSE", "else"},
|
"LFUNC": "func",
|
||||||
{"LFALL", "fallthrough"},
|
"LGO": "go",
|
||||||
{"LFOR", "for"},
|
"LGOTO": "goto",
|
||||||
{"LFUNC", "func"},
|
"LIF": "if",
|
||||||
{"LGO", "go"},
|
"LIMPORT": "import",
|
||||||
{"LGOTO", "goto"},
|
"LINTERFACE": "interface",
|
||||||
{"LIF", "if"},
|
"LMAP": "map",
|
||||||
{"LIMPORT", "import"},
|
"LNAME": "name",
|
||||||
{"LINTERFACE", "interface"},
|
"LPACKAGE": "package",
|
||||||
{"LMAP", "map"},
|
"LRANGE": "range",
|
||||||
{"LNAME", "name"},
|
"LRETURN": "return",
|
||||||
{"LPACKAGE", "package"},
|
"LSELECT": "select",
|
||||||
{"LRANGE", "range"},
|
"LSTRUCT": "struct",
|
||||||
{"LRETURN", "return"},
|
"LSWITCH": "switch",
|
||||||
{"LSELECT", "select"},
|
"LTYPE": "type",
|
||||||
{"LSTRUCT", "struct"},
|
"LVAR": "var",
|
||||||
{"LSWITCH", "switch"},
|
"LANDAND": "&&",
|
||||||
{"LTYPE", "type"},
|
"LANDNOT": "&^",
|
||||||
{"LVAR", "var"},
|
"LBODY": "{",
|
||||||
{"LANDAND", "&&"},
|
"LCOMM": "<-",
|
||||||
{"LANDNOT", "&^"},
|
"LDEC": "--",
|
||||||
{"LBODY", "{"},
|
"LINC": "++",
|
||||||
{"LCOMM", "<-"},
|
"LEQ": "==",
|
||||||
{"LDEC", "--"},
|
"LGE": ">=",
|
||||||
{"LINC", "++"},
|
"LGT": ">",
|
||||||
{"LEQ", "=="},
|
"LLE": "<=",
|
||||||
{"LGE", ">="},
|
"LLT": "<",
|
||||||
{"LGT", ">"},
|
"LLSH": "<<",
|
||||||
{"LLE", "<="},
|
"LRSH": ">>",
|
||||||
{"LLT", "<"},
|
"LOROR": "||",
|
||||||
{"LLSH", "<<"},
|
"LNE": "!=",
|
||||||
{"LRSH", ">>"},
|
|
||||||
{"LOROR", "||"},
|
|
||||||
{"LNE", "!="},
|
|
||||||
// spell out to avoid confusion with punctuation in error messages
|
// spell out to avoid confusion with punctuation in error messages
|
||||||
{"';'", "semicolon or newline"},
|
"';'": "semicolon or newline",
|
||||||
{"','", "comma"},
|
"','": "comma",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
yyErrorVerbose = true
|
yyErrorVerbose = true
|
||||||
|
|
||||||
Outer:
|
|
||||||
for i, s := range yyToknames {
|
for i, s := range yyToknames {
|
||||||
// Apply yytfix if possible.
|
// Apply yytfix if possible.
|
||||||
for _, fix := range yytfix {
|
if fix, ok := yytfix[s]; ok {
|
||||||
if s == fix.have {
|
yyToknames[i] = fix
|
||||||
yyToknames[i] = fix.want
|
} else if len(s) == 3 && s[0] == '\'' && s[2] == '\'' {
|
||||||
continue Outer
|
// Turn 'x' into x.
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn 'x' into x.
|
|
||||||
if len(s) == 3 && s[0] == '\'' && s[2] == '\'' {
|
|
||||||
yyToknames[i] = s[1:2]
|
yyToknames[i] = s[1:2]
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"cmd/internal/obj"
|
"cmd/internal/obj"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
/// implements float arihmetic
|
/// implements float arihmetic
|
||||||
|
|
@ -154,30 +153,6 @@ func mpatoflt(a *Mpflt, as string) {
|
||||||
as = as[1:]
|
as = as[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// The spec requires accepting exponents that fit in int32.
|
|
||||||
// Don't accept much more than that.
|
|
||||||
// Count digits in exponent and stop early if there are too many.
|
|
||||||
if i := strings.Index(as, "e"); i >= 0 {
|
|
||||||
i++
|
|
||||||
if i < len(as) && (as[i] == '-' || as[i] == '+') {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
for i < len(as) && as[i] == '0' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
// TODO(rsc): This should be > 10, because we're supposed
|
|
||||||
// to accept any signed 32-bit int as an exponent.
|
|
||||||
// But that's not working terribly well, so we deviate from the
|
|
||||||
// spec in order to make sure that what we accept works.
|
|
||||||
// We can remove this restriction once those larger exponents work.
|
|
||||||
// See golang.org/issue/11326 and test/fixedbugs/issue11326*.go.
|
|
||||||
if len(as)-i > 8 {
|
|
||||||
Yyerror("malformed constant: %s (exponent too large)", as)
|
|
||||||
a.Val.SetUint64(0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f, ok := a.Val.SetString(as)
|
f, ok := a.Val.SetString(as)
|
||||||
if !ok {
|
if !ok {
|
||||||
// At the moment we lose precise error cause;
|
// At the moment we lose precise error cause;
|
||||||
|
|
|
||||||
|
|
@ -72,10 +72,7 @@ func dumpobj() {
|
||||||
|
|
||||||
fmt.Fprintf(bout, "\n!\n")
|
fmt.Fprintf(bout, "\n!\n")
|
||||||
|
|
||||||
var externs *NodeList
|
externs := len(externdcl)
|
||||||
if externdcl != nil {
|
|
||||||
externs = externdcl.End
|
|
||||||
}
|
|
||||||
|
|
||||||
dumpglobls()
|
dumpglobls()
|
||||||
dumptypestructs()
|
dumptypestructs()
|
||||||
|
|
@ -83,8 +80,8 @@ func dumpobj() {
|
||||||
// Dump extra globals.
|
// Dump extra globals.
|
||||||
tmp := externdcl
|
tmp := externdcl
|
||||||
|
|
||||||
if externs != nil {
|
if externdcl != nil {
|
||||||
externdcl = externs.Next
|
externdcl = externdcl[externs:]
|
||||||
}
|
}
|
||||||
dumpglobls()
|
dumpglobls()
|
||||||
externdcl = tmp
|
externdcl = tmp
|
||||||
|
|
@ -107,11 +104,8 @@ func dumpobj() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpglobls() {
|
func dumpglobls() {
|
||||||
var n *Node
|
|
||||||
|
|
||||||
// add globals
|
// add globals
|
||||||
for l := externdcl; l != nil; l = l.Next {
|
for _, n := range externdcl {
|
||||||
n = l.N
|
|
||||||
if n.Op != ONAME {
|
if n.Op != ONAME {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -126,12 +120,10 @@ func dumpglobls() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dowidth(n.Type)
|
dowidth(n.Type)
|
||||||
|
|
||||||
ggloblnod(n)
|
ggloblnod(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
for l := funcsyms; l != nil; l = l.Next {
|
for _, n := range funcsyms {
|
||||||
n = l.N
|
|
||||||
dsymptr(n.Sym, 0, n.Sym.Def.Func.Shortname.Sym, 0)
|
dsymptr(n.Sym, 0, n.Sym.Def.Func.Shortname.Sym, 0)
|
||||||
ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||||
}
|
}
|
||||||
|
|
@ -187,10 +179,6 @@ func duint32(s *Sym, off int, v uint32) int {
|
||||||
return duintxx(s, off, uint64(v), 4)
|
return duintxx(s, off, uint64(v), 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func duint64(s *Sym, off int, v uint64) int {
|
|
||||||
return duintxx(s, off, v, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func duintptr(s *Sym, off int, v uint64) int {
|
func duintptr(s *Sym, off int, v uint64) int {
|
||||||
return duintxx(s, off, v, Widthptr)
|
return duintxx(s, off, v, Widthptr)
|
||||||
}
|
}
|
||||||
|
|
@ -284,25 +272,6 @@ func slicebytes(nam *Node, s string, len int) {
|
||||||
duintxx(nam.Sym, off, uint64(len), Widthint)
|
duintxx(nam.Sym, off, uint64(len), Widthint)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dstringptr(s *Sym, off int, str string) int {
|
|
||||||
off = int(Rnd(int64(off), int64(Widthptr)))
|
|
||||||
p := Thearch.Gins(obj.ADATA, nil, nil)
|
|
||||||
p.From.Type = obj.TYPE_MEM
|
|
||||||
p.From.Name = obj.NAME_EXTERN
|
|
||||||
p.From.Sym = Linksym(s)
|
|
||||||
p.From.Offset = int64(off)
|
|
||||||
p.From3 = new(obj.Addr)
|
|
||||||
p.From3.Type = obj.TYPE_CONST
|
|
||||||
p.From3.Offset = int64(Widthptr)
|
|
||||||
|
|
||||||
Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
|
|
||||||
p.To.Type = obj.TYPE_ADDR
|
|
||||||
p.To.Etype = Simtype[TINT]
|
|
||||||
off += Widthptr
|
|
||||||
|
|
||||||
return off
|
|
||||||
}
|
|
||||||
|
|
||||||
func Datastring(s string, a *obj.Addr) {
|
func Datastring(s string, a *obj.Addr) {
|
||||||
_, symdata := stringsym(s)
|
_, symdata := stringsym(s)
|
||||||
a.Type = obj.TYPE_MEM
|
a.Type = obj.TYPE_MEM
|
||||||
|
|
|
||||||
|
|
@ -165,6 +165,8 @@ func emitptrargsmap() {
|
||||||
ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
|
ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cmpstackvarlt reports whether the stack variable a sorts before b.
|
||||||
|
//
|
||||||
// Sort the list of stack variables. Autos after anything else,
|
// Sort the list of stack variables. Autos after anything else,
|
||||||
// within autos, unused after used, within used, things with
|
// within autos, unused after used, within used, things with
|
||||||
// pointers first, zeroed things first, and then decreasing size.
|
// pointers first, zeroed things first, and then decreasing size.
|
||||||
|
|
@ -173,48 +175,48 @@ func emitptrargsmap() {
|
||||||
// really means, in memory, things with pointers needing zeroing at
|
// really means, in memory, things with pointers needing zeroing at
|
||||||
// the top of the stack and increasing in size.
|
// the top of the stack and increasing in size.
|
||||||
// Non-autos sort on offset.
|
// Non-autos sort on offset.
|
||||||
func cmpstackvar(a *Node, b *Node) int {
|
func cmpstackvarlt(a, b *Node) bool {
|
||||||
if a.Class != b.Class {
|
if a.Class != b.Class {
|
||||||
if a.Class == PAUTO {
|
if a.Class == PAUTO {
|
||||||
return +1
|
return false
|
||||||
}
|
}
|
||||||
return -1
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Class != PAUTO {
|
if a.Class != PAUTO {
|
||||||
if a.Xoffset < b.Xoffset {
|
if a.Xoffset < b.Xoffset {
|
||||||
return -1
|
return true
|
||||||
}
|
}
|
||||||
if a.Xoffset > b.Xoffset {
|
if a.Xoffset > b.Xoffset {
|
||||||
return +1
|
return false
|
||||||
}
|
}
|
||||||
return 0
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Used != b.Used {
|
if a.Used != b.Used {
|
||||||
return obj.Bool2int(b.Used) - obj.Bool2int(a.Used)
|
return a.Used
|
||||||
}
|
}
|
||||||
|
|
||||||
ap := obj.Bool2int(haspointers(a.Type))
|
ap := haspointers(a.Type)
|
||||||
bp := obj.Bool2int(haspointers(b.Type))
|
bp := haspointers(b.Type)
|
||||||
if ap != bp {
|
if ap != bp {
|
||||||
return bp - ap
|
return ap
|
||||||
}
|
}
|
||||||
|
|
||||||
ap = obj.Bool2int(a.Name.Needzero)
|
ap = a.Name.Needzero
|
||||||
bp = obj.Bool2int(b.Name.Needzero)
|
bp = b.Name.Needzero
|
||||||
if ap != bp {
|
if ap != bp {
|
||||||
return bp - ap
|
return ap
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Type.Width < b.Type.Width {
|
if a.Type.Width < b.Type.Width {
|
||||||
return +1
|
return false
|
||||||
}
|
}
|
||||||
if a.Type.Width > b.Type.Width {
|
if a.Type.Width > b.Type.Width {
|
||||||
return -1
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return stringsCompare(a.Sym.Name, b.Sym.Name)
|
return a.Sym.Name < b.Sym.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// stkdelta records the stack offset delta for a node
|
// stkdelta records the stack offset delta for a node
|
||||||
|
|
@ -240,7 +242,7 @@ func allocauto(ptxt *obj.Prog) {
|
||||||
|
|
||||||
markautoused(ptxt)
|
markautoused(ptxt)
|
||||||
|
|
||||||
listsort(&Curfn.Func.Dcl, cmpstackvar)
|
listsort(&Curfn.Func.Dcl, cmpstackvarlt)
|
||||||
|
|
||||||
// Unused autos are at the end, chop 'em off.
|
// Unused autos are at the end, chop 'em off.
|
||||||
ll := Curfn.Func.Dcl
|
ll := Curfn.Func.Dcl
|
||||||
|
|
|
||||||
176
src/cmd/compile/internal/gc/pgen_test.go
Normal file
176
src/cmd/compile/internal/gc/pgen_test.go
Normal file
|
|
@ -0,0 +1,176 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test all code paths for cmpstackvarlt.
|
||||||
|
func TestCmpstackvar(t *testing.T) {
|
||||||
|
testdata := []struct {
|
||||||
|
a, b Node
|
||||||
|
lt bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO},
|
||||||
|
Node{Class: PFUNC},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PFUNC},
|
||||||
|
Node{Class: PAUTO},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PFUNC, Xoffset: 0},
|
||||||
|
Node{Class: PFUNC, Xoffset: 10},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PFUNC, Xoffset: 20},
|
||||||
|
Node{Class: PFUNC, Xoffset: 10},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PFUNC, Xoffset: 10},
|
||||||
|
Node{Class: PFUNC, Xoffset: 10},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Used: true},
|
||||||
|
Node{Class: PAUTO, Used: false},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Used: false},
|
||||||
|
Node{Class: PAUTO, Used: true},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false
|
||||||
|
Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true
|
||||||
|
Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||||
|
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, d := range testdata {
|
||||||
|
got := cmpstackvarlt(&d.a, &d.b)
|
||||||
|
if got != d.lt {
|
||||||
|
t.Errorf("want %#v < %#v", d.a, d.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func slice2nodelist(s []*Node) *NodeList {
|
||||||
|
var nl *NodeList
|
||||||
|
for _, n := range s {
|
||||||
|
nl = list(nl, n)
|
||||||
|
}
|
||||||
|
return nl
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodelist2slice(nl *NodeList) []*Node {
|
||||||
|
var s []*Node
|
||||||
|
for l := nl; l != nil; l = l.Next {
|
||||||
|
s = append(s, l.N)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListsort(t *testing.T) {
|
||||||
|
inp := []*Node{
|
||||||
|
{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||||
|
}
|
||||||
|
want := []*Node{
|
||||||
|
{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||||
|
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||||
|
{Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false
|
||||||
|
}
|
||||||
|
// haspointers updates Type.Haspointers as a side effect, so
|
||||||
|
// exercise this function on all inputs so that reflect.DeepEqual
|
||||||
|
// doesn't produce false positives.
|
||||||
|
for i := range want {
|
||||||
|
haspointers(want[i].Type)
|
||||||
|
haspointers(inp[i].Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
nl := slice2nodelist(inp)
|
||||||
|
listsort(&nl, cmpstackvarlt)
|
||||||
|
got := nodelist2slice(nl)
|
||||||
|
if !reflect.DeepEqual(want, got) {
|
||||||
|
t.Error("listsort failed")
|
||||||
|
for i := range got {
|
||||||
|
g := got[i]
|
||||||
|
w := want[i]
|
||||||
|
eq := reflect.DeepEqual(w, g)
|
||||||
|
if !eq {
|
||||||
|
t.Log(i, w, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -92,14 +92,6 @@ type Liveness struct {
|
||||||
livepointers []Bvec
|
livepointers []Bvec
|
||||||
}
|
}
|
||||||
|
|
||||||
func xmalloc(size uint32) interface{} {
|
|
||||||
result := (interface{})(make([]byte, size))
|
|
||||||
if result == nil {
|
|
||||||
Fatalf("malloc failed")
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constructs a new basic block containing a single instruction.
|
// Constructs a new basic block containing a single instruction.
|
||||||
func newblock(prog *obj.Prog) *BasicBlock {
|
func newblock(prog *obj.Prog) *BasicBlock {
|
||||||
if prog == nil {
|
if prog == nil {
|
||||||
|
|
@ -115,13 +107,6 @@ func newblock(prog *obj.Prog) *BasicBlock {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Frees a basic block and all of its leaf data structures.
|
|
||||||
func freeblock(bb *BasicBlock) {
|
|
||||||
if bb == nil {
|
|
||||||
Fatalf("freeblock: cannot free nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds an edge between two basic blocks by making from a predecessor of to and
|
// Adds an edge between two basic blocks by making from a predecessor of to and
|
||||||
// to a successor of from.
|
// to a successor of from.
|
||||||
func addedge(from *BasicBlock, to *BasicBlock) {
|
func addedge(from *BasicBlock, to *BasicBlock) {
|
||||||
|
|
|
||||||
|
|
@ -523,20 +523,15 @@ type TempVar struct {
|
||||||
merge *TempVar // merge var with this one
|
merge *TempVar // merge var with this one
|
||||||
start int64 // smallest Prog.pc in live range
|
start int64 // smallest Prog.pc in live range
|
||||||
end int64 // largest Prog.pc in live range
|
end int64 // largest Prog.pc in live range
|
||||||
addr uint8 // address taken - no accurate end
|
addr bool // address taken - no accurate end
|
||||||
removed uint8 // removed from program
|
removed bool // removed from program
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startcmp sorts TempVars by start, then id, then symbol name.
|
||||||
type startcmp []*TempVar
|
type startcmp []*TempVar
|
||||||
|
|
||||||
func (x startcmp) Len() int {
|
func (x startcmp) Len() int { return len(x) }
|
||||||
return len(x)
|
func (x startcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
}
|
|
||||||
|
|
||||||
func (x startcmp) Swap(i, j int) {
|
|
||||||
x[i], x[j] = x[j], x[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x startcmp) Less(i, j int) bool {
|
func (x startcmp) Less(i, j int) bool {
|
||||||
a := x[i]
|
a := x[i]
|
||||||
b := x[j]
|
b := x[j]
|
||||||
|
|
@ -556,7 +551,7 @@ func (x startcmp) Less(i, j int) bool {
|
||||||
return int(a.def.Id-b.def.Id) < 0
|
return int(a.def.Id-b.def.Id) < 0
|
||||||
}
|
}
|
||||||
if a.node != b.node {
|
if a.node != b.node {
|
||||||
return stringsCompare(a.node.Sym.Name, b.node.Sym.Name) < 0
|
return a.node.Sym.Name < b.node.Sym.Name
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
@ -577,22 +572,11 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build list of all mergeable variables.
|
// Build list of all mergeable variables.
|
||||||
nvar := 0
|
var vars []*TempVar
|
||||||
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||||
if canmerge(l.N) {
|
if n := l.N; canmerge(n) {
|
||||||
nvar++
|
v := &TempVar{}
|
||||||
}
|
vars = append(vars, v)
|
||||||
}
|
|
||||||
|
|
||||||
var_ := make([]TempVar, nvar)
|
|
||||||
nvar = 0
|
|
||||||
var n *Node
|
|
||||||
var v *TempVar
|
|
||||||
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
|
||||||
n = l.N
|
|
||||||
if canmerge(n) {
|
|
||||||
v = &var_[nvar]
|
|
||||||
nvar++
|
|
||||||
n.SetOpt(v)
|
n.SetOpt(v)
|
||||||
v.node = n
|
v.node = n
|
||||||
}
|
}
|
||||||
|
|
@ -607,8 +591,8 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
if p.From.Node != nil && ((p.From.Node).(*Node)).Opt() != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt() != nil {
|
if p.From.Node != nil && ((p.From.Node).(*Node)).Opt() != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt() != nil {
|
||||||
Fatalf("double node %v", p)
|
Fatalf("double node %v", p)
|
||||||
}
|
}
|
||||||
v = nil
|
var v *TempVar
|
||||||
n, _ = p.From.Node.(*Node)
|
n, _ := p.From.Node.(*Node)
|
||||||
if n != nil {
|
if n != nil {
|
||||||
v, _ = n.Opt().(*TempVar)
|
v, _ = n.Opt().(*TempVar)
|
||||||
}
|
}
|
||||||
|
|
@ -625,7 +609,7 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
f.Data = v.use
|
f.Data = v.use
|
||||||
v.use = f
|
v.use = f
|
||||||
if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
|
if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
|
||||||
v.addr = 1
|
v.addr = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -637,9 +621,8 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
nkill := 0
|
nkill := 0
|
||||||
|
|
||||||
// Special case.
|
// Special case.
|
||||||
for i := 0; i < len(var_); i++ {
|
for _, v := range vars {
|
||||||
v = &var_[i]
|
if v.addr {
|
||||||
if v.addr != 0 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -650,7 +633,7 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
|
if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
|
||||||
p.As = obj.ANOP
|
p.As = obj.ANOP
|
||||||
p.To = obj.Addr{}
|
p.To = obj.Addr{}
|
||||||
v.removed = 1
|
v.removed = true
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("drop write-only %v\n", v.node.Sym)
|
fmt.Printf("drop write-only %v\n", v.node.Sym)
|
||||||
}
|
}
|
||||||
|
|
@ -673,7 +656,7 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
|
if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
|
||||||
p1.From = p.From
|
p1.From = p.From
|
||||||
Thearch.Excise(f)
|
Thearch.Excise(f)
|
||||||
v.removed = 1
|
v.removed = true
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("drop immediate-use %v\n", v.node.Sym)
|
fmt.Printf("drop immediate-use %v\n", v.node.Sym)
|
||||||
}
|
}
|
||||||
|
|
@ -687,29 +670,25 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
// Traverse live range of each variable to set start, end.
|
// Traverse live range of each variable to set start, end.
|
||||||
// Each flood uses a new value of gen so that we don't have
|
// Each flood uses a new value of gen so that we don't have
|
||||||
// to clear all the r->active words after each variable.
|
// to clear all the r->active words after each variable.
|
||||||
gen := int32(0)
|
gen := uint32(0)
|
||||||
|
|
||||||
for i := 0; i < len(var_); i++ {
|
for _, v := range vars {
|
||||||
v = &var_[i]
|
|
||||||
gen++
|
gen++
|
||||||
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
||||||
mergewalk(v, f, uint32(gen))
|
mergewalk(v, f, gen)
|
||||||
}
|
}
|
||||||
if v.addr != 0 {
|
if v.addr {
|
||||||
gen++
|
gen++
|
||||||
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
||||||
varkillwalk(v, f, uint32(gen))
|
varkillwalk(v, f, gen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort variables by start.
|
// Sort variables by start.
|
||||||
bystart := make([]*TempVar, len(var_))
|
bystart := make([]*TempVar, len(vars))
|
||||||
|
copy(bystart, vars)
|
||||||
for i := 0; i < len(var_); i++ {
|
sort.Sort(startcmp(bystart))
|
||||||
bystart[i] = &var_[i]
|
|
||||||
}
|
|
||||||
sort.Sort(startcmp(bystart[:len(var_)]))
|
|
||||||
|
|
||||||
// List of in-use variables, sorted by end, so that the ones that
|
// List of in-use variables, sorted by end, so that the ones that
|
||||||
// will last the longest are the earliest ones in the array.
|
// will last the longest are the earliest ones in the array.
|
||||||
|
|
@ -717,40 +696,35 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
// In theory we should use a sorted tree so that insertions are
|
// In theory we should use a sorted tree so that insertions are
|
||||||
// guaranteed O(log n) and then the loop is guaranteed O(n log n).
|
// guaranteed O(log n) and then the loop is guaranteed O(n log n).
|
||||||
// In practice, it doesn't really matter.
|
// In practice, it doesn't really matter.
|
||||||
inuse := make([]*TempVar, len(var_))
|
inuse := make([]*TempVar, len(bystart))
|
||||||
|
|
||||||
ninuse := 0
|
ninuse := 0
|
||||||
nfree := len(var_)
|
nfree := len(bystart)
|
||||||
var t *Type
|
for _, v := range bystart {
|
||||||
var v1 *TempVar
|
|
||||||
var j int
|
|
||||||
for i := 0; i < len(var_); i++ {
|
|
||||||
v = bystart[i]
|
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
|
fmt.Printf("consider %v: removed=%t\n", Nconv(v.node, obj.FmtSharp), v.removed)
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.removed != 0 {
|
if v.removed {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expire no longer in use.
|
// Expire no longer in use.
|
||||||
for ninuse > 0 && inuse[ninuse-1].end < v.start {
|
for ninuse > 0 && inuse[ninuse-1].end < v.start {
|
||||||
ninuse--
|
ninuse--
|
||||||
v1 = inuse[ninuse]
|
|
||||||
nfree--
|
nfree--
|
||||||
inuse[nfree] = v1
|
inuse[nfree] = inuse[ninuse]
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("consider %v: removed=%d nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(var_))
|
fmt.Printf("consider %v: removed=%t nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(bystart))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find old temp to reuse if possible.
|
// Find old temp to reuse if possible.
|
||||||
t = v.node.Type
|
t := v.node.Type
|
||||||
|
|
||||||
for j = nfree; j < len(var_); j++ {
|
for j := nfree; j < len(inuse); j++ {
|
||||||
v1 = inuse[j]
|
v1 := inuse[j]
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%v,%v\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), t, v1.node.Type, v.node.Addrtaken, v1.node.Addrtaken)
|
fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%v,%v\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), t, v1.node.Type, v.node.Addrtaken, v1.node.Addrtaken)
|
||||||
}
|
}
|
||||||
|
|
@ -774,7 +748,7 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort v into inuse.
|
// Sort v into inuse.
|
||||||
j = ninuse
|
j := ninuse
|
||||||
ninuse++
|
ninuse++
|
||||||
|
|
||||||
for j > 0 && inuse[j-1].end < v.end {
|
for j > 0 && inuse[j-1].end < v.end {
|
||||||
|
|
@ -786,16 +760,14 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugmerge > 0 && Debug['v'] != 0 {
|
if debugmerge > 0 && Debug['v'] != 0 {
|
||||||
fmt.Printf("%v [%d - %d]\n", Curfn.Func.Nname.Sym, len(var_), nkill)
|
fmt.Printf("%v [%d - %d]\n", Curfn.Func.Nname.Sym, len(vars), nkill)
|
||||||
var v *TempVar
|
for _, v := range vars {
|
||||||
for i := 0; i < len(var_); i++ {
|
|
||||||
v = &var_[i]
|
|
||||||
fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), v.node.Type, v.start, v.end)
|
fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), v.node.Type, v.start, v.end)
|
||||||
if v.addr != 0 {
|
if v.addr {
|
||||||
fmt.Printf(" addr=1")
|
fmt.Printf(" addr=true")
|
||||||
}
|
}
|
||||||
if v.removed != 0 {
|
if v.removed {
|
||||||
fmt.Printf(" dead=1")
|
fmt.Printf(" removed=true")
|
||||||
}
|
}
|
||||||
if v.merge != nil {
|
if v.merge != nil {
|
||||||
fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
|
fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
|
||||||
|
|
@ -814,16 +786,16 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
// Update node references to use merged temporaries.
|
// Update node references to use merged temporaries.
|
||||||
for f := g.Start; f != nil; f = f.Link {
|
for f := g.Start; f != nil; f = f.Link {
|
||||||
p := f.Prog
|
p := f.Prog
|
||||||
n, _ = p.From.Node.(*Node)
|
n, _ := p.From.Node.(*Node)
|
||||||
if n != nil {
|
if n != nil {
|
||||||
v, _ = n.Opt().(*TempVar)
|
v, _ := n.Opt().(*TempVar)
|
||||||
if v != nil && v.merge != nil {
|
if v != nil && v.merge != nil {
|
||||||
p.From.Node = v.merge.node
|
p.From.Node = v.merge.node
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n, _ = p.To.Node.(*Node)
|
n, _ = p.To.Node.(*Node)
|
||||||
if n != nil {
|
if n != nil {
|
||||||
v, _ = n.Opt().(*TempVar)
|
v, _ := n.Opt().(*TempVar)
|
||||||
if v != nil && v.merge != nil {
|
if v != nil && v.merge != nil {
|
||||||
p.To.Node = v.merge.node
|
p.To.Node = v.merge.node
|
||||||
}
|
}
|
||||||
|
|
@ -831,17 +803,16 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete merged nodes from declaration list.
|
// Delete merged nodes from declaration list.
|
||||||
var l *NodeList
|
|
||||||
for lp := &Curfn.Func.Dcl; ; {
|
for lp := &Curfn.Func.Dcl; ; {
|
||||||
l = *lp
|
l := *lp
|
||||||
if l == nil {
|
if l == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
Curfn.Func.Dcl.End = l
|
Curfn.Func.Dcl.End = l
|
||||||
n = l.N
|
n := l.N
|
||||||
v, _ = n.Opt().(*TempVar)
|
v, _ := n.Opt().(*TempVar)
|
||||||
if v != nil && (v.merge != nil || v.removed != 0) {
|
if v != nil && (v.merge != nil || v.removed) {
|
||||||
*lp = l.Next
|
*lp = l.Next
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -850,8 +821,8 @@ func mergetemp(firstp *obj.Prog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear aux structures.
|
// Clear aux structures.
|
||||||
for i := 0; i < len(var_); i++ {
|
for _, v := range vars {
|
||||||
var_[i].node.SetOpt(nil)
|
v.node.SetOpt(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
Flowend(g)
|
Flowend(g)
|
||||||
|
|
|
||||||
|
|
@ -32,8 +32,8 @@ var noinst_pkgs = []string{"sync", "sync/atomic"}
|
||||||
|
|
||||||
func ispkgin(pkgs []string) bool {
|
func ispkgin(pkgs []string) bool {
|
||||||
if myimportpath != "" {
|
if myimportpath != "" {
|
||||||
for i := 0; i < len(pkgs); i++ {
|
for _, p := range pkgs {
|
||||||
if myimportpath == pkgs[i] {
|
if myimportpath == p {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -167,87 +167,10 @@ func walkrange(n *Node) {
|
||||||
default:
|
default:
|
||||||
Fatalf("walkrange")
|
Fatalf("walkrange")
|
||||||
|
|
||||||
// Lower n into runtime·memclr if possible, for
|
|
||||||
// fast zeroing of slices and arrays (issue 5373).
|
|
||||||
// Look for instances of
|
|
||||||
//
|
|
||||||
// for i := range a {
|
|
||||||
// a[i] = zero
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// in which the evaluation of a is side-effect-free.
|
|
||||||
case TARRAY:
|
case TARRAY:
|
||||||
if Debug['N'] == 0 {
|
if memclrrange(n, v1, v2, a) {
|
||||||
if flag_race == 0 {
|
lineno = int32(lno)
|
||||||
if v1 != nil {
|
return
|
||||||
if v2 == nil {
|
|
||||||
if n.Nbody != nil {
|
|
||||||
if n.Nbody.N != nil { // at least one statement in body
|
|
||||||
if n.Nbody.Next == nil { // at most one statement in body
|
|
||||||
tmp := n.Nbody.N // first statement of body
|
|
||||||
if tmp.Op == OAS {
|
|
||||||
if tmp.Left.Op == OINDEX {
|
|
||||||
if samesafeexpr(tmp.Left.Left, a) {
|
|
||||||
if samesafeexpr(tmp.Left.Right, v1) {
|
|
||||||
if t.Type.Width > 0 {
|
|
||||||
if iszero(tmp.Right) {
|
|
||||||
// Convert to
|
|
||||||
// if len(a) != 0 {
|
|
||||||
// hp = &a[0]
|
|
||||||
// hn = len(a)*sizeof(elem(a))
|
|
||||||
// memclr(hp, hn)
|
|
||||||
// i = len(a) - 1
|
|
||||||
// }
|
|
||||||
n.Op = OIF
|
|
||||||
|
|
||||||
n.Nbody = nil
|
|
||||||
n.Left = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
|
|
||||||
|
|
||||||
// hp = &a[0]
|
|
||||||
hp := temp(Ptrto(Types[TUINT8]))
|
|
||||||
|
|
||||||
tmp := Nod(OINDEX, a, Nodintconst(0))
|
|
||||||
tmp.Bounded = true
|
|
||||||
tmp = Nod(OADDR, tmp, nil)
|
|
||||||
tmp = Nod(OCONVNOP, tmp, nil)
|
|
||||||
tmp.Type = Ptrto(Types[TUINT8])
|
|
||||||
n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
|
|
||||||
|
|
||||||
// hn = len(a) * sizeof(elem(a))
|
|
||||||
hn := temp(Types[TUINTPTR])
|
|
||||||
|
|
||||||
tmp = Nod(OLEN, a, nil)
|
|
||||||
tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
|
|
||||||
tmp = conv(tmp, Types[TUINTPTR])
|
|
||||||
n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
|
|
||||||
|
|
||||||
// memclr(hp, hn)
|
|
||||||
fn := mkcall("memclr", nil, nil, hp, hn)
|
|
||||||
|
|
||||||
n.Nbody = list(n.Nbody, fn)
|
|
||||||
|
|
||||||
// i = len(a) - 1
|
|
||||||
v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
|
|
||||||
|
|
||||||
n.Nbody = list(n.Nbody, v1)
|
|
||||||
|
|
||||||
typecheck(&n.Left, Erv)
|
|
||||||
typechecklist(n.Nbody, Etop)
|
|
||||||
walkstmt(&n)
|
|
||||||
lineno = int32(lno)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// orderstmt arranged for a copy of the array/slice variable if needed.
|
// orderstmt arranged for a copy of the array/slice variable if needed.
|
||||||
|
|
@ -404,3 +327,82 @@ func walkrange(n *Node) {
|
||||||
|
|
||||||
lineno = int32(lno)
|
lineno = int32(lno)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lower n into runtime·memclr if possible, for
|
||||||
|
// fast zeroing of slices and arrays (issue 5373).
|
||||||
|
// Look for instances of
|
||||||
|
//
|
||||||
|
// for i := range a {
|
||||||
|
// a[i] = zero
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// in which the evaluation of a is side-effect-free.
|
||||||
|
//
|
||||||
|
// Parameters are as in walkrange: "for v1, v2 = range a".
|
||||||
|
func memclrrange(n, v1, v2, a *Node) bool {
|
||||||
|
if Debug['N'] != 0 || flag_race != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if v1 == nil || v2 != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if n.Nbody == nil || n.Nbody.N == nil || n.Nbody.Next != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
stmt := n.Nbody.N // only stmt in body
|
||||||
|
if stmt.Op != OAS || stmt.Left.Op != OINDEX {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
elemsize := n.Type.Type.Width
|
||||||
|
if elemsize <= 0 || !iszero(stmt.Right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to
|
||||||
|
// if len(a) != 0 {
|
||||||
|
// hp = &a[0]
|
||||||
|
// hn = len(a)*sizeof(elem(a))
|
||||||
|
// memclr(hp, hn)
|
||||||
|
// i = len(a) - 1
|
||||||
|
// }
|
||||||
|
n.Op = OIF
|
||||||
|
|
||||||
|
n.Nbody = nil
|
||||||
|
n.Left = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
|
||||||
|
|
||||||
|
// hp = &a[0]
|
||||||
|
hp := temp(Ptrto(Types[TUINT8]))
|
||||||
|
|
||||||
|
tmp := Nod(OINDEX, a, Nodintconst(0))
|
||||||
|
tmp.Bounded = true
|
||||||
|
tmp = Nod(OADDR, tmp, nil)
|
||||||
|
tmp = Nod(OCONVNOP, tmp, nil)
|
||||||
|
tmp.Type = Ptrto(Types[TUINT8])
|
||||||
|
n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
|
||||||
|
|
||||||
|
// hn = len(a) * sizeof(elem(a))
|
||||||
|
hn := temp(Types[TUINTPTR])
|
||||||
|
|
||||||
|
tmp = Nod(OLEN, a, nil)
|
||||||
|
tmp = Nod(OMUL, tmp, Nodintconst(elemsize))
|
||||||
|
tmp = conv(tmp, Types[TUINTPTR])
|
||||||
|
n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
|
||||||
|
|
||||||
|
// memclr(hp, hn)
|
||||||
|
fn := mkcall("memclr", nil, nil, hp, hn)
|
||||||
|
|
||||||
|
n.Nbody = list(n.Nbody, fn)
|
||||||
|
|
||||||
|
// i = len(a) - 1
|
||||||
|
v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
|
||||||
|
|
||||||
|
n.Nbody = list(n.Nbody, v1)
|
||||||
|
|
||||||
|
typecheck(&n.Left, Erv)
|
||||||
|
typechecklist(n.Nbody, Etop)
|
||||||
|
walkstmt(&n)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"cmd/internal/obj"
|
"cmd/internal/obj"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -16,93 +17,30 @@ import (
|
||||||
*/
|
*/
|
||||||
var signatlist *NodeList
|
var signatlist *NodeList
|
||||||
|
|
||||||
func sigcmp(a *Sig, b *Sig) int {
|
// byMethodNameAndPackagePath sorts method signatures by name, then package path.
|
||||||
i := stringsCompare(a.name, b.name)
|
type byMethodNameAndPackagePath []*Sig
|
||||||
if i != 0 {
|
|
||||||
return i
|
func (x byMethodNameAndPackagePath) Len() int { return len(x) }
|
||||||
}
|
func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
if a.pkg == b.pkg {
|
func (x byMethodNameAndPackagePath) Less(i, j int) bool {
|
||||||
return 0
|
return siglt(x[i], x[j])
|
||||||
}
|
|
||||||
if a.pkg == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if b.pkg == nil {
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
return stringsCompare(a.pkg.Path, b.pkg.Path)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
|
// siglt reports whether a < b
|
||||||
if l == nil || l.link == nil {
|
func siglt(a, b *Sig) bool {
|
||||||
return l
|
if a.name != b.name {
|
||||||
|
return a.name < b.name
|
||||||
}
|
}
|
||||||
|
if a.pkg == b.pkg {
|
||||||
l1 := l
|
return false
|
||||||
l2 := l
|
|
||||||
for {
|
|
||||||
l2 = l2.link
|
|
||||||
if l2 == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
l2 = l2.link
|
|
||||||
if l2 == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
l1 = l1.link
|
|
||||||
}
|
}
|
||||||
|
if a.pkg == nil {
|
||||||
l2 = l1.link
|
return true
|
||||||
l1.link = nil
|
|
||||||
l1 = lsort(l, f)
|
|
||||||
l2 = lsort(l2, f)
|
|
||||||
|
|
||||||
/* set up lead element */
|
|
||||||
if f(l1, l2) < 0 {
|
|
||||||
l = l1
|
|
||||||
l1 = l1.link
|
|
||||||
} else {
|
|
||||||
l = l2
|
|
||||||
l2 = l2.link
|
|
||||||
}
|
}
|
||||||
|
if b.pkg == nil {
|
||||||
le := l
|
return false
|
||||||
|
|
||||||
for {
|
|
||||||
if l1 == nil {
|
|
||||||
for l2 != nil {
|
|
||||||
le.link = l2
|
|
||||||
le = l2
|
|
||||||
l2 = l2.link
|
|
||||||
}
|
|
||||||
|
|
||||||
le.link = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if l2 == nil {
|
|
||||||
for l1 != nil {
|
|
||||||
le.link = l1
|
|
||||||
le = l1
|
|
||||||
l1 = l1.link
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if f(l1, l2) < 0 {
|
|
||||||
le.link = l1
|
|
||||||
le = l1
|
|
||||||
l1 = l1.link
|
|
||||||
} else {
|
|
||||||
le.link = l2
|
|
||||||
le = l2
|
|
||||||
l2 = l2.link
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return a.pkg.Path < b.pkg.Path
|
||||||
le.link = nil
|
|
||||||
return l
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builds a type representing a Bucket structure for
|
// Builds a type representing a Bucket structure for
|
||||||
|
|
@ -335,11 +273,9 @@ func methodfunc(f *Type, receiver *Type) *Type {
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// methods returns the methods of the non-interface type t, sorted by name.
|
||||||
* return methods of non-interface type t, sorted by name.
|
// Generates stub functions as needed.
|
||||||
* generates stub functions as needed.
|
func methods(t *Type) []*Sig {
|
||||||
*/
|
|
||||||
func methods(t *Type) *Sig {
|
|
||||||
// method type
|
// method type
|
||||||
mt := methtype(t, 0)
|
mt := methtype(t, 0)
|
||||||
|
|
||||||
|
|
@ -357,11 +293,7 @@ func methods(t *Type) *Sig {
|
||||||
|
|
||||||
// make list of methods for t,
|
// make list of methods for t,
|
||||||
// generating code if necessary.
|
// generating code if necessary.
|
||||||
var a *Sig
|
var ms []*Sig
|
||||||
|
|
||||||
var this *Type
|
|
||||||
var b *Sig
|
|
||||||
var method *Sym
|
|
||||||
for f := mt.Xmethod; f != nil; f = f.Down {
|
for f := mt.Xmethod; f != nil; f = f.Down {
|
||||||
if f.Etype != TFIELD {
|
if f.Etype != TFIELD {
|
||||||
Fatalf("methods: not field %v", f)
|
Fatalf("methods: not field %v", f)
|
||||||
|
|
@ -376,7 +308,7 @@ func methods(t *Type) *Sig {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
method = f.Sym
|
method := f.Sym
|
||||||
if method == nil {
|
if method == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -385,7 +317,7 @@ func methods(t *Type) *Sig {
|
||||||
// if pointer receiver but non-pointer t and
|
// if pointer receiver but non-pointer t and
|
||||||
// this is not an embedded pointer inside a struct,
|
// this is not an embedded pointer inside a struct,
|
||||||
// method does not apply.
|
// method does not apply.
|
||||||
this = getthisx(f.Type).Type.Type
|
this := getthisx(f.Type).Type.Type
|
||||||
|
|
||||||
if Isptr[this.Etype] && this.Type == t {
|
if Isptr[this.Etype] && this.Type == t {
|
||||||
continue
|
continue
|
||||||
|
|
@ -394,55 +326,48 @@ func methods(t *Type) *Sig {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
b = new(Sig)
|
var sig Sig
|
||||||
b.link = a
|
ms = append(ms, &sig)
|
||||||
a = b
|
|
||||||
|
|
||||||
a.name = method.Name
|
sig.name = method.Name
|
||||||
if !exportname(method.Name) {
|
if !exportname(method.Name) {
|
||||||
if method.Pkg == nil {
|
if method.Pkg == nil {
|
||||||
Fatalf("methods: missing package")
|
Fatalf("methods: missing package")
|
||||||
}
|
}
|
||||||
a.pkg = method.Pkg
|
sig.pkg = method.Pkg
|
||||||
}
|
}
|
||||||
|
|
||||||
a.isym = methodsym(method, it, 1)
|
sig.isym = methodsym(method, it, 1)
|
||||||
a.tsym = methodsym(method, t, 0)
|
sig.tsym = methodsym(method, t, 0)
|
||||||
a.type_ = methodfunc(f.Type, t)
|
sig.type_ = methodfunc(f.Type, t)
|
||||||
a.mtype = methodfunc(f.Type, nil)
|
sig.mtype = methodfunc(f.Type, nil)
|
||||||
|
|
||||||
if a.isym.Flags&SymSiggen == 0 {
|
if sig.isym.Flags&SymSiggen == 0 {
|
||||||
a.isym.Flags |= SymSiggen
|
sig.isym.Flags |= SymSiggen
|
||||||
if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
|
if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
|
||||||
compiling_wrappers = 1
|
compiling_wrappers = 1
|
||||||
genwrapper(it, f, a.isym, 1)
|
genwrapper(it, f, sig.isym, 1)
|
||||||
compiling_wrappers = 0
|
compiling_wrappers = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.tsym.Flags&SymSiggen == 0 {
|
if sig.tsym.Flags&SymSiggen == 0 {
|
||||||
a.tsym.Flags |= SymSiggen
|
sig.tsym.Flags |= SymSiggen
|
||||||
if !Eqtype(this, t) {
|
if !Eqtype(this, t) {
|
||||||
compiling_wrappers = 1
|
compiling_wrappers = 1
|
||||||
genwrapper(t, f, a.tsym, 0)
|
genwrapper(t, f, sig.tsym, 0)
|
||||||
compiling_wrappers = 0
|
compiling_wrappers = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return lsort(a, sigcmp)
|
sort.Sort(byMethodNameAndPackagePath(ms))
|
||||||
|
return ms
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// imethods returns the methods of the interface type t, sorted by name.
|
||||||
* return methods of interface type t, sorted by name.
|
func imethods(t *Type) []*Sig {
|
||||||
*/
|
var methods []*Sig
|
||||||
func imethods(t *Type) *Sig {
|
|
||||||
var a *Sig
|
|
||||||
var method *Sym
|
|
||||||
var isym *Sym
|
|
||||||
|
|
||||||
var all *Sig
|
|
||||||
var last *Sig
|
|
||||||
for f := t.Type; f != nil; f = f.Down {
|
for f := t.Type; f != nil; f = f.Down {
|
||||||
if f.Etype != TFIELD {
|
if f.Etype != TFIELD {
|
||||||
Fatalf("imethods: not field")
|
Fatalf("imethods: not field")
|
||||||
|
|
@ -450,29 +375,28 @@ func imethods(t *Type) *Sig {
|
||||||
if f.Type.Etype != TFUNC || f.Sym == nil {
|
if f.Type.Etype != TFUNC || f.Sym == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
method = f.Sym
|
method := f.Sym
|
||||||
a = new(Sig)
|
var sig = Sig{
|
||||||
a.name = method.Name
|
name: method.Name,
|
||||||
|
}
|
||||||
if !exportname(method.Name) {
|
if !exportname(method.Name) {
|
||||||
if method.Pkg == nil {
|
if method.Pkg == nil {
|
||||||
Fatalf("imethods: missing package")
|
Fatalf("imethods: missing package")
|
||||||
}
|
}
|
||||||
a.pkg = method.Pkg
|
sig.pkg = method.Pkg
|
||||||
}
|
}
|
||||||
|
|
||||||
a.mtype = f.Type
|
sig.mtype = f.Type
|
||||||
a.offset = 0
|
sig.offset = 0
|
||||||
a.type_ = methodfunc(f.Type, nil)
|
sig.type_ = methodfunc(f.Type, nil)
|
||||||
|
|
||||||
if last != nil && sigcmp(last, a) >= 0 {
|
if n := len(methods); n > 0 {
|
||||||
Fatalf("sigcmp vs sortinter %s %s", last.name, a.name)
|
last := methods[n-1]
|
||||||
|
if !(siglt(last, &sig)) {
|
||||||
|
Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if last == nil {
|
methods = append(methods, &sig)
|
||||||
all = a
|
|
||||||
} else {
|
|
||||||
last.link = a
|
|
||||||
}
|
|
||||||
last = a
|
|
||||||
|
|
||||||
// Compiler can only refer to wrappers for non-blank methods.
|
// Compiler can only refer to wrappers for non-blank methods.
|
||||||
if isblanksym(method) {
|
if isblanksym(method) {
|
||||||
|
|
@ -483,7 +407,7 @@ func imethods(t *Type) *Sig {
|
||||||
// IfaceType.Method is not in the reflect data.
|
// IfaceType.Method is not in the reflect data.
|
||||||
// Generate the method body, so that compiled
|
// Generate the method body, so that compiled
|
||||||
// code can refer to it.
|
// code can refer to it.
|
||||||
isym = methodsym(method, t, 0)
|
isym := methodsym(method, t, 0)
|
||||||
|
|
||||||
if isym.Flags&SymSiggen == 0 {
|
if isym.Flags&SymSiggen == 0 {
|
||||||
isym.Flags |= SymSiggen
|
isym.Flags |= SymSiggen
|
||||||
|
|
@ -491,7 +415,7 @@ func imethods(t *Type) *Sig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return all
|
return methods
|
||||||
}
|
}
|
||||||
|
|
||||||
var dimportpath_gopkg *Pkg
|
var dimportpath_gopkg *Pkg
|
||||||
|
|
@ -559,7 +483,7 @@ func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
|
||||||
*/
|
*/
|
||||||
func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
||||||
m := methods(t)
|
m := methods(t)
|
||||||
if t.Sym == nil && m == nil {
|
if t.Sym == nil && len(m) == 0 {
|
||||||
return off
|
return off
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -568,10 +492,8 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
||||||
|
|
||||||
dsymptr(sym, ptroff, sym, off)
|
dsymptr(sym, ptroff, sym, off)
|
||||||
|
|
||||||
n := 0
|
for _, a := range m {
|
||||||
for a := m; a != nil; a = a.link {
|
|
||||||
dtypesym(a.type_)
|
dtypesym(a.type_)
|
||||||
n++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ot := off
|
ot := off
|
||||||
|
|
@ -591,11 +513,12 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
||||||
// slice header
|
// slice header
|
||||||
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
||||||
|
|
||||||
|
n := len(m)
|
||||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||||
|
|
||||||
// methods
|
// methods
|
||||||
for a := m; a != nil; a = a.link {
|
for _, a := range m {
|
||||||
// method
|
// method
|
||||||
// ../../runtime/type.go:/method
|
// ../../runtime/type.go:/method
|
||||||
ot = dgostringptr(s, ot, a.name)
|
ot = dgostringptr(s, ot, a.name)
|
||||||
|
|
@ -943,10 +866,8 @@ func weaktypesym(t *Type) *Sym {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// isreflexive reports whether t has a reflexive equality operator.
|
||||||
* Returns 1 if t has a reflexive equality operator.
|
// That is, if x==x for all x of type t.
|
||||||
* That is, if x==x for all x of type t.
|
|
||||||
*/
|
|
||||||
func isreflexive(t *Type) bool {
|
func isreflexive(t *Type) bool {
|
||||||
switch t.Etype {
|
switch t.Etype {
|
||||||
case TBOOL,
|
case TBOOL,
|
||||||
|
|
@ -987,7 +908,6 @@ func isreflexive(t *Type) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
@ -996,6 +916,56 @@ func isreflexive(t *Type) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// needkeyupdate reports whether map updates with t as a key
|
||||||
|
// need the key to be updated.
|
||||||
|
func needkeyupdate(t *Type) bool {
|
||||||
|
switch t.Etype {
|
||||||
|
case TBOOL,
|
||||||
|
TINT,
|
||||||
|
TUINT,
|
||||||
|
TINT8,
|
||||||
|
TUINT8,
|
||||||
|
TINT16,
|
||||||
|
TUINT16,
|
||||||
|
TINT32,
|
||||||
|
TUINT32,
|
||||||
|
TINT64,
|
||||||
|
TUINT64,
|
||||||
|
TUINTPTR,
|
||||||
|
TPTR32,
|
||||||
|
TPTR64,
|
||||||
|
TUNSAFEPTR,
|
||||||
|
TCHAN:
|
||||||
|
return false
|
||||||
|
|
||||||
|
case TFLOAT32, // floats can be +0/-0
|
||||||
|
TFLOAT64,
|
||||||
|
TCOMPLEX64,
|
||||||
|
TCOMPLEX128,
|
||||||
|
TINTER,
|
||||||
|
TSTRING: // strings might have smaller backing stores
|
||||||
|
return true
|
||||||
|
|
||||||
|
case TARRAY:
|
||||||
|
if Isslice(t) {
|
||||||
|
Fatalf("slice can't be a map key: %v", t)
|
||||||
|
}
|
||||||
|
return needkeyupdate(t.Type)
|
||||||
|
|
||||||
|
case TSTRUCT:
|
||||||
|
for t1 := t.Type; t1 != nil; t1 = t1.Down {
|
||||||
|
if needkeyupdate(t1.Type) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
|
||||||
|
default:
|
||||||
|
Fatalf("bad type for map key: %v", t)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func dtypesym(t *Type) *Sym {
|
func dtypesym(t *Type) *Sym {
|
||||||
// Replace byte, rune aliases with real type.
|
// Replace byte, rune aliases with real type.
|
||||||
// They've been separate internally to make error messages
|
// They've been separate internally to make error messages
|
||||||
|
|
@ -1124,28 +1094,27 @@ ok:
|
||||||
|
|
||||||
case TINTER:
|
case TINTER:
|
||||||
m := imethods(t)
|
m := imethods(t)
|
||||||
n := 0
|
n := len(m)
|
||||||
for a := m; a != nil; a = a.link {
|
for _, a := range m {
|
||||||
dtypesym(a.type_)
|
dtypesym(a.type_)
|
||||||
n++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ../../runtime/type.go:/InterfaceType
|
// ../../../runtime/type.go:/InterfaceType
|
||||||
ot = dcommontype(s, ot, t)
|
ot = dcommontype(s, ot, t)
|
||||||
|
|
||||||
xt = ot - 2*Widthptr
|
xt = ot - 2*Widthptr
|
||||||
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
||||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||||
for a := m; a != nil; a = a.link {
|
for _, a := range m {
|
||||||
// ../../runtime/type.go:/imethod
|
// ../../../runtime/type.go:/imethod
|
||||||
ot = dgostringptr(s, ot, a.name)
|
ot = dgostringptr(s, ot, a.name)
|
||||||
|
|
||||||
ot = dgopkgpath(s, ot, a.pkg)
|
ot = dgopkgpath(s, ot, a.pkg)
|
||||||
ot = dsymptr(s, ot, dtypesym(a.type_), 0)
|
ot = dsymptr(s, ot, dtypesym(a.type_), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ../../runtime/type.go:/MapType
|
// ../../../runtime/type.go:/MapType
|
||||||
case TMAP:
|
case TMAP:
|
||||||
s1 := dtypesym(t.Down)
|
s1 := dtypesym(t.Down)
|
||||||
|
|
||||||
|
|
@ -1176,6 +1145,7 @@ ok:
|
||||||
|
|
||||||
ot = duint16(s, ot, uint16(mapbucket(t).Width))
|
ot = duint16(s, ot, uint16(mapbucket(t).Width))
|
||||||
ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
|
ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
|
||||||
|
ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down))))
|
||||||
|
|
||||||
case TPTR32, TPTR64:
|
case TPTR32, TPTR64:
|
||||||
if t.Type.Etype == TANY {
|
if t.Type.Etype == TANY {
|
||||||
|
|
@ -1269,8 +1239,7 @@ func dumptypestructs() {
|
||||||
var n *Node
|
var n *Node
|
||||||
|
|
||||||
// copy types from externdcl list to signatlist
|
// copy types from externdcl list to signatlist
|
||||||
for l := externdcl; l != nil; l = l.Next {
|
for _, n := range externdcl {
|
||||||
n = l.N
|
|
||||||
if n.Op != OTYPE {
|
if n.Op != OTYPE {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
47
src/cmd/compile/internal/gc/reflect_test.go
Normal file
47
src/cmd/compile/internal/gc/reflect_test.go
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSortingByMethodNameAndPackagePath(t *testing.T) {
|
||||||
|
data := []*Sig{
|
||||||
|
&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
|
||||||
|
&Sig{name: "b", pkg: nil},
|
||||||
|
&Sig{name: "c", pkg: nil},
|
||||||
|
&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
|
||||||
|
&Sig{name: "c", pkg: nil},
|
||||||
|
&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
|
||||||
|
&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
|
||||||
|
&Sig{name: "b", pkg: nil},
|
||||||
|
}
|
||||||
|
want := []*Sig{
|
||||||
|
&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
|
||||||
|
&Sig{name: "b", pkg: nil},
|
||||||
|
&Sig{name: "b", pkg: nil},
|
||||||
|
&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
|
||||||
|
&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
|
||||||
|
&Sig{name: "c", pkg: nil},
|
||||||
|
&Sig{name: "c", pkg: nil},
|
||||||
|
&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
|
||||||
|
}
|
||||||
|
if len(data) != len(want) {
|
||||||
|
t.Fatal("want and data must match")
|
||||||
|
}
|
||||||
|
if reflect.DeepEqual(data, want) {
|
||||||
|
t.Fatal("data must be shuffled")
|
||||||
|
}
|
||||||
|
sort.Sort(byMethodNameAndPackagePath(data))
|
||||||
|
if !reflect.DeepEqual(data, want) {
|
||||||
|
t.Logf("want: %#v", want)
|
||||||
|
t.Logf("data: %#v", data)
|
||||||
|
t.Errorf("sorting failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -170,7 +170,7 @@ func setaddrs(bit Bits) {
|
||||||
|
|
||||||
for bany(&bit) {
|
for bany(&bit) {
|
||||||
// convert each bit to a variable
|
// convert each bit to a variable
|
||||||
i = bnum(bit)
|
i = bnum(&bit)
|
||||||
|
|
||||||
node = vars[i].node
|
node = vars[i].node
|
||||||
n = int(vars[i].name)
|
n = int(vars[i].name)
|
||||||
|
|
@ -1321,7 +1321,7 @@ loop2:
|
||||||
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
|
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
|
||||||
}
|
}
|
||||||
for bany(&bit) {
|
for bany(&bit) {
|
||||||
i = bnum(bit)
|
i = bnum(&bit)
|
||||||
change = 0
|
change = 0
|
||||||
paint1(f, i)
|
paint1(f, i)
|
||||||
biclr(&bit, uint(i))
|
biclr(&bit, uint(i))
|
||||||
|
|
@ -1465,7 +1465,7 @@ func bany(a *Bits) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// bnum reports the lowest index of a 1 bit in a.
|
// bnum reports the lowest index of a 1 bit in a.
|
||||||
func bnum(a Bits) int {
|
func bnum(a *Bits) int {
|
||||||
for i, x := range &a.b { // & to avoid making a copy of a.b
|
for i, x := range &a.b { // & to avoid making a copy of a.b
|
||||||
if x != 0 {
|
if x != 0 {
|
||||||
return 64*i + Bitno(x)
|
return 64*i + Bitno(x)
|
||||||
|
|
@ -1541,7 +1541,7 @@ func (bits Bits) String() string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
sep := ""
|
sep := ""
|
||||||
for bany(&bits) {
|
for bany(&bits) {
|
||||||
i := bnum(bits)
|
i := bnum(&bits)
|
||||||
buf.WriteString(sep)
|
buf.WriteString(sep)
|
||||||
sep = " "
|
sep = " "
|
||||||
v := &vars[i]
|
v := &vars[i]
|
||||||
|
|
|
||||||
|
|
@ -329,13 +329,13 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
||||||
// copy slice
|
// copy slice
|
||||||
a := inittemps[r]
|
a := inittemps[r]
|
||||||
|
|
||||||
n1 := *l
|
n := *l
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_array)
|
n.Xoffset = l.Xoffset + int64(Array_array)
|
||||||
gdata(&n1, Nod(OADDR, a, nil), Widthptr)
|
gdata(&n, Nod(OADDR, a, nil), Widthptr)
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_nel)
|
n.Xoffset = l.Xoffset + int64(Array_nel)
|
||||||
gdata(&n1, r.Right, Widthint)
|
gdata(&n, r.Right, Widthint)
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_cap)
|
n.Xoffset = l.Xoffset + int64(Array_cap)
|
||||||
gdata(&n1, r.Right, Widthint)
|
gdata(&n, r.Right, Widthint)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
|
|
@ -344,24 +344,21 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
||||||
case OSTRUCTLIT:
|
case OSTRUCTLIT:
|
||||||
p := initplans[r]
|
p := initplans[r]
|
||||||
|
|
||||||
n1 := *l
|
n := *l
|
||||||
var e *InitEntry
|
for i := range p.E {
|
||||||
var ll *Node
|
e := &p.E[i]
|
||||||
var rr *Node
|
n.Xoffset = l.Xoffset + e.Xoffset
|
||||||
for i := 0; i < len(p.E); i++ {
|
n.Type = e.Expr.Type
|
||||||
e = &p.E[i]
|
|
||||||
n1.Xoffset = l.Xoffset + e.Xoffset
|
|
||||||
n1.Type = e.Expr.Type
|
|
||||||
if e.Expr.Op == OLITERAL {
|
if e.Expr.Op == OLITERAL {
|
||||||
gdata(&n1, e.Expr, int(n1.Type.Width))
|
gdata(&n, e.Expr, int(n.Type.Width))
|
||||||
} else {
|
} else {
|
||||||
ll = Nod(OXXX, nil, nil)
|
ll := Nod(OXXX, nil, nil)
|
||||||
*ll = n1
|
*ll = n
|
||||||
ll.Orig = ll // completely separate copy
|
ll.Orig = ll // completely separate copy
|
||||||
if !staticassign(ll, e.Expr, out) {
|
if !staticassign(ll, e.Expr, out) {
|
||||||
// Requires computation, but we're
|
// Requires computation, but we're
|
||||||
// copying someone else's computation.
|
// copying someone else's computation.
|
||||||
rr = Nod(OXXX, nil, nil)
|
rr := Nod(OXXX, nil, nil)
|
||||||
|
|
||||||
*rr = *orig
|
*rr = *orig
|
||||||
rr.Orig = rr // completely separate copy
|
rr.Orig = rr // completely separate copy
|
||||||
|
|
@ -380,8 +377,6 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func staticassign(l *Node, r *Node, out **NodeList) bool {
|
func staticassign(l *Node, r *Node, out **NodeList) bool {
|
||||||
var n1 Node
|
|
||||||
|
|
||||||
for r.Op == OCONVNOP {
|
for r.Op == OCONVNOP {
|
||||||
r = r.Left
|
r = r.Left
|
||||||
}
|
}
|
||||||
|
|
@ -404,9 +399,9 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
||||||
case OADDR:
|
case OADDR:
|
||||||
var nam Node
|
var nam Node
|
||||||
if stataddr(&nam, r.Left) {
|
if stataddr(&nam, r.Left) {
|
||||||
n1 := *r
|
n := *r
|
||||||
n1.Left = &nam
|
n.Left = &nam
|
||||||
gdata(l, &n1, int(l.Type.Width))
|
gdata(l, &n, int(l.Type.Width))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
|
|
@ -448,13 +443,13 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
||||||
ta.Bound = Mpgetfix(r.Right.Val().U.(*Mpint))
|
ta.Bound = Mpgetfix(r.Right.Val().U.(*Mpint))
|
||||||
a := staticname(ta, 1)
|
a := staticname(ta, 1)
|
||||||
inittemps[r] = a
|
inittemps[r] = a
|
||||||
n1 = *l
|
n := *l
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_array)
|
n.Xoffset = l.Xoffset + int64(Array_array)
|
||||||
gdata(&n1, Nod(OADDR, a, nil), Widthptr)
|
gdata(&n, Nod(OADDR, a, nil), Widthptr)
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_nel)
|
n.Xoffset = l.Xoffset + int64(Array_nel)
|
||||||
gdata(&n1, r.Right, Widthint)
|
gdata(&n, r.Right, Widthint)
|
||||||
n1.Xoffset = l.Xoffset + int64(Array_cap)
|
n.Xoffset = l.Xoffset + int64(Array_cap)
|
||||||
gdata(&n1, r.Right, Widthint)
|
gdata(&n, r.Right, Widthint)
|
||||||
|
|
||||||
// Fall through to init underlying array.
|
// Fall through to init underlying array.
|
||||||
l = a
|
l = a
|
||||||
|
|
@ -466,19 +461,17 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
||||||
initplan(r)
|
initplan(r)
|
||||||
|
|
||||||
p := initplans[r]
|
p := initplans[r]
|
||||||
n1 = *l
|
n := *l
|
||||||
var e *InitEntry
|
for i := range p.E {
|
||||||
var a *Node
|
e := &p.E[i]
|
||||||
for i := 0; i < len(p.E); i++ {
|
n.Xoffset = l.Xoffset + e.Xoffset
|
||||||
e = &p.E[i]
|
n.Type = e.Expr.Type
|
||||||
n1.Xoffset = l.Xoffset + e.Xoffset
|
|
||||||
n1.Type = e.Expr.Type
|
|
||||||
if e.Expr.Op == OLITERAL {
|
if e.Expr.Op == OLITERAL {
|
||||||
gdata(&n1, e.Expr, int(n1.Type.Width))
|
gdata(&n, e.Expr, int(n.Type.Width))
|
||||||
} else {
|
} else {
|
||||||
setlineno(e.Expr)
|
setlineno(e.Expr)
|
||||||
a = Nod(OXXX, nil, nil)
|
a := Nod(OXXX, nil, nil)
|
||||||
*a = n1
|
*a = n
|
||||||
a.Orig = a // completely separate copy
|
a.Orig = a // completely separate copy
|
||||||
if !staticassign(a, e.Expr, out) {
|
if !staticassign(a, e.Expr, out) {
|
||||||
*out = list(*out, Nod(OAS, a, e.Expr))
|
*out = list(*out, Nod(OAS, a, e.Expr))
|
||||||
|
|
@ -569,9 +562,8 @@ func getdyn(n *Node, top int) int {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
var value *Node
|
|
||||||
for nl := n.List; nl != nil; nl = nl.Next {
|
for nl := n.List; nl != nil; nl = nl.Next {
|
||||||
value = nl.N.Right
|
value := nl.N.Right
|
||||||
mode |= getdyn(value, 0)
|
mode |= getdyn(value, 0)
|
||||||
if mode == MODEDYNAM|MODECONST {
|
if mode == MODEDYNAM|MODECONST {
|
||||||
break
|
break
|
||||||
|
|
@ -582,18 +574,15 @@ func getdyn(n *Node, top int) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
||||||
var r *Node
|
|
||||||
var a *Node
|
|
||||||
var index *Node
|
|
||||||
var value *Node
|
|
||||||
|
|
||||||
for nl := n.List; nl != nil; nl = nl.Next {
|
for nl := n.List; nl != nil; nl = nl.Next {
|
||||||
r = nl.N
|
r := nl.N
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("structlit: rhs not OKEY: %v", r)
|
Fatalf("structlit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
|
|
||||||
|
var a *Node
|
||||||
|
|
||||||
switch value.Op {
|
switch value.Op {
|
||||||
case OARRAYLIT:
|
case OARRAYLIT:
|
||||||
|
|
@ -650,18 +639,15 @@ func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
||||||
var r *Node
|
|
||||||
var a *Node
|
|
||||||
var index *Node
|
|
||||||
var value *Node
|
|
||||||
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
r = l.N
|
r := l.N
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("arraylit: rhs not OKEY: %v", r)
|
Fatalf("arraylit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
|
|
||||||
|
var a *Node
|
||||||
|
|
||||||
switch value.Op {
|
switch value.Op {
|
||||||
case OARRAYLIT:
|
case OARRAYLIT:
|
||||||
|
|
@ -828,17 +814,14 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
*init = list(*init, a)
|
*init = list(*init, a)
|
||||||
|
|
||||||
// put dynamics into slice (6)
|
// put dynamics into slice (6)
|
||||||
var value *Node
|
|
||||||
var r *Node
|
|
||||||
var index *Node
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
r = l.N
|
r := l.N
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("slicelit: rhs not OKEY: %v", r)
|
Fatalf("slicelit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
a = Nod(OINDEX, var_, index)
|
a := Nod(OINDEX, var_, index)
|
||||||
a.Bounded = true
|
a.Bounded = true
|
||||||
|
|
||||||
// TODO need to check bounds?
|
// TODO need to check bounds?
|
||||||
|
|
@ -872,10 +855,6 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
var r *Node
|
|
||||||
var index *Node
|
|
||||||
var value *Node
|
|
||||||
|
|
||||||
ctxt = 0
|
ctxt = 0
|
||||||
|
|
||||||
// make the map var
|
// make the map var
|
||||||
|
|
@ -889,13 +868,12 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
b := int64(0)
|
b := int64(0)
|
||||||
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
r = l.N
|
r := l.N
|
||||||
|
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
|
|
||||||
if isliteral(index) && isliteral(value) {
|
if isliteral(index) && isliteral(value) {
|
||||||
b++
|
b++
|
||||||
|
|
@ -936,17 +914,14 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
vstat := staticname(t, ctxt)
|
vstat := staticname(t, ctxt)
|
||||||
|
|
||||||
b := int64(0)
|
b := int64(0)
|
||||||
var index *Node
|
|
||||||
var r *Node
|
|
||||||
var value *Node
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
r = l.N
|
r := l.N
|
||||||
|
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
|
|
||||||
if isliteral(index) && isliteral(value) {
|
if isliteral(index) && isliteral(value) {
|
||||||
// build vstat[b].a = key;
|
// build vstat[b].a = key;
|
||||||
|
|
@ -981,13 +956,13 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
// for i = 0; i < len(vstat); i++ {
|
// for i = 0; i < len(vstat); i++ {
|
||||||
// map[vstat[i].a] = vstat[i].b
|
// map[vstat[i].a] = vstat[i].b
|
||||||
// }
|
// }
|
||||||
index = temp(Types[TINT])
|
index := temp(Types[TINT])
|
||||||
|
|
||||||
a = Nod(OINDEX, vstat, index)
|
a = Nod(OINDEX, vstat, index)
|
||||||
a.Bounded = true
|
a.Bounded = true
|
||||||
a = Nod(ODOT, a, newname(symb))
|
a = Nod(ODOT, a, newname(symb))
|
||||||
|
|
||||||
r = Nod(OINDEX, vstat, index)
|
r := Nod(OINDEX, vstat, index)
|
||||||
r.Bounded = true
|
r.Bounded = true
|
||||||
r = Nod(ODOT, r, newname(syma))
|
r = Nod(ODOT, r, newname(syma))
|
||||||
r = Nod(OINDEX, var_, r)
|
r = Nod(OINDEX, var_, r)
|
||||||
|
|
@ -1011,13 +986,13 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||||
|
|
||||||
var val *Node
|
var val *Node
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
r = l.N
|
r := l.N
|
||||||
|
|
||||||
if r.Op != OKEY {
|
if r.Op != OKEY {
|
||||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||||
}
|
}
|
||||||
index = r.Left
|
index := r.Left
|
||||||
value = r.Right
|
value := r.Right
|
||||||
|
|
||||||
if isliteral(index) && isliteral(value) {
|
if isliteral(index) && isliteral(value) {
|
||||||
continue
|
continue
|
||||||
|
|
@ -1291,9 +1266,8 @@ func initplan(n *Node) {
|
||||||
Fatalf("initplan")
|
Fatalf("initplan")
|
||||||
|
|
||||||
case OARRAYLIT:
|
case OARRAYLIT:
|
||||||
var a *Node
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
a = l.N
|
a := l.N
|
||||||
if a.Op != OKEY || !Smallintconst(a.Left) {
|
if a.Op != OKEY || !Smallintconst(a.Left) {
|
||||||
Fatalf("initplan arraylit")
|
Fatalf("initplan arraylit")
|
||||||
}
|
}
|
||||||
|
|
@ -1301,9 +1275,8 @@ func initplan(n *Node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
case OSTRUCTLIT:
|
case OSTRUCTLIT:
|
||||||
var a *Node
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
a = l.N
|
a := l.N
|
||||||
if a.Op != OKEY || a.Left.Type == nil {
|
if a.Op != OKEY || a.Left.Type == nil {
|
||||||
Fatalf("initplan structlit")
|
Fatalf("initplan structlit")
|
||||||
}
|
}
|
||||||
|
|
@ -1311,9 +1284,8 @@ func initplan(n *Node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
case OMAPLIT:
|
case OMAPLIT:
|
||||||
var a *Node
|
|
||||||
for l := n.List; l != nil; l = l.Next {
|
for l := n.List; l != nil; l = l.Next {
|
||||||
a = l.N
|
a := l.N
|
||||||
if a.Op != OKEY {
|
if a.Op != OKEY {
|
||||||
Fatalf("initplan maplit")
|
Fatalf("initplan maplit")
|
||||||
}
|
}
|
||||||
|
|
@ -1333,13 +1305,11 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
|
||||||
if isvaluelit(n) {
|
if isvaluelit(n) {
|
||||||
initplan(n)
|
initplan(n)
|
||||||
q := initplans[n]
|
q := initplans[n]
|
||||||
var e *InitEntry
|
for _, qe := range q.E {
|
||||||
for i := 0; i < len(q.E); i++ {
|
e := entry(p)
|
||||||
e = entry(p)
|
*e = qe
|
||||||
*e = q.E[i]
|
|
||||||
e.Xoffset += xoffset
|
e.Xoffset += xoffset
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2120,10 +2120,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
|
||||||
|
|
||||||
// Set receiver (for interface calls)
|
// Set receiver (for interface calls)
|
||||||
if rcvr != nil {
|
if rcvr != nil {
|
||||||
var argStart int64
|
argStart := Ctxt.FixedFrameSize()
|
||||||
if HasLinkRegister() {
|
|
||||||
argStart += int64(Widthptr)
|
|
||||||
}
|
|
||||||
if k != callNormal {
|
if k != callNormal {
|
||||||
argStart += int64(2 * Widthptr)
|
argStart += int64(2 * Widthptr)
|
||||||
}
|
}
|
||||||
|
|
@ -3737,6 +3734,12 @@ func (s *genState) genValue(v *ssa.Value) {
|
||||||
p.To.Type = obj.TYPE_ADDR
|
p.To.Type = obj.TYPE_ADDR
|
||||||
p.To.Sym = Linksym(Pkglookup("duffzero", Runtimepkg))
|
p.To.Sym = Linksym(Pkglookup("duffzero", Runtimepkg))
|
||||||
p.To.Offset = v.AuxInt
|
p.To.Offset = v.AuxInt
|
||||||
|
case ssa.OpAMD64MOVOconst:
|
||||||
|
if v.AuxInt != 0 {
|
||||||
|
v.Unimplementedf("MOVOconst can only do constant=0")
|
||||||
|
}
|
||||||
|
r := regnum(v)
|
||||||
|
opregreg(x86.AXORPS, r, r)
|
||||||
|
|
||||||
case ssa.OpCopy: // TODO: lower to MOVQ earlier?
|
case ssa.OpCopy: // TODO: lower to MOVQ earlier?
|
||||||
if v.Type.IsMemory() {
|
if v.Type.IsMemory() {
|
||||||
|
|
|
||||||
|
|
@ -59,26 +59,21 @@ func adderr(line int, format string, args ...interface{}) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// errcmp sorts errors by line, then seq, then message.
|
||||||
type errcmp []Error
|
type errcmp []Error
|
||||||
|
|
||||||
func (x errcmp) Len() int {
|
func (x errcmp) Len() int { return len(x) }
|
||||||
return len(x)
|
func (x errcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
}
|
|
||||||
|
|
||||||
func (x errcmp) Swap(i, j int) {
|
|
||||||
x[i], x[j] = x[j], x[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x errcmp) Less(i, j int) bool {
|
func (x errcmp) Less(i, j int) bool {
|
||||||
a := &x[i]
|
a := &x[i]
|
||||||
b := &x[j]
|
b := &x[j]
|
||||||
if a.lineno != b.lineno {
|
if a.lineno != b.lineno {
|
||||||
return a.lineno-b.lineno < 0
|
return a.lineno < b.lineno
|
||||||
}
|
}
|
||||||
if a.seq != b.seq {
|
if a.seq != b.seq {
|
||||||
return a.seq-b.seq < 0
|
return a.seq < b.seq
|
||||||
}
|
}
|
||||||
return stringsCompare(a.msg, b.msg) < 0
|
return a.msg < b.msg
|
||||||
}
|
}
|
||||||
|
|
||||||
func Flusherrors() {
|
func Flusherrors() {
|
||||||
|
|
@ -86,7 +81,7 @@ func Flusherrors() {
|
||||||
if len(errors) == 0 {
|
if len(errors) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sort.Sort(errcmp(errors[:len(errors)]))
|
sort.Sort(errcmp(errors))
|
||||||
for i := 0; i < len(errors); i++ {
|
for i := 0; i < len(errors); i++ {
|
||||||
if i == 0 || errors[i].msg != errors[i-1].msg {
|
if i == 0 || errors[i].msg != errors[i-1].msg {
|
||||||
fmt.Printf("%s", errors[i].msg)
|
fmt.Printf("%s", errors[i].msg)
|
||||||
|
|
@ -127,7 +122,7 @@ func Yyerror(format string, args ...interface{}) {
|
||||||
|
|
||||||
// An unexpected EOF caused a syntax error. Use the previous
|
// An unexpected EOF caused a syntax error. Use the previous
|
||||||
// line number since getc generated a fake newline character.
|
// line number since getc generated a fake newline character.
|
||||||
if curio.eofnl != 0 {
|
if curio.eofnl {
|
||||||
lexlineno = prevlineno
|
lexlineno = prevlineno
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -352,23 +347,6 @@ func importdot(opkg *Pkg, pack *Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gethunk() {
|
|
||||||
nh := int32(NHUNK)
|
|
||||||
if thunk >= 10*NHUNK {
|
|
||||||
nh = 10 * NHUNK
|
|
||||||
}
|
|
||||||
h := string(make([]byte, nh))
|
|
||||||
if h == "" {
|
|
||||||
Flusherrors()
|
|
||||||
Yyerror("out of memory")
|
|
||||||
errorexit()
|
|
||||||
}
|
|
||||||
|
|
||||||
hunk = h
|
|
||||||
nhunk = nh
|
|
||||||
thunk += nh
|
|
||||||
}
|
|
||||||
|
|
||||||
func Nod(op int, nleft *Node, nright *Node) *Node {
|
func Nod(op int, nleft *Node, nright *Node) *Node {
|
||||||
n := new(Node)
|
n := new(Node)
|
||||||
n.Op = uint8(op)
|
n.Op = uint8(op)
|
||||||
|
|
@ -612,16 +590,11 @@ func typ(et int) *Type {
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// methcmp sorts by symbol, then by package path for unexported symbols.
|
||||||
type methcmp []*Type
|
type methcmp []*Type
|
||||||
|
|
||||||
func (x methcmp) Len() int {
|
func (x methcmp) Len() int { return len(x) }
|
||||||
return len(x)
|
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
}
|
|
||||||
|
|
||||||
func (x methcmp) Swap(i, j int) {
|
|
||||||
x[i], x[j] = x[j], x[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x methcmp) Less(i, j int) bool {
|
func (x methcmp) Less(i, j int) bool {
|
||||||
a := x[i]
|
a := x[i]
|
||||||
b := x[j]
|
b := x[j]
|
||||||
|
|
@ -632,16 +605,14 @@ func (x methcmp) Less(i, j int) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if b.Sym == nil {
|
if b.Sym == nil {
|
||||||
return 1 < 0
|
return false
|
||||||
}
|
}
|
||||||
k := stringsCompare(a.Sym.Name, b.Sym.Name)
|
if a.Sym.Name != b.Sym.Name {
|
||||||
if k != 0 {
|
return a.Sym.Name < b.Sym.Name
|
||||||
return k < 0
|
|
||||||
}
|
}
|
||||||
if !exportname(a.Sym.Name) {
|
if !exportname(a.Sym.Name) {
|
||||||
k := stringsCompare(a.Sym.Pkg.Path, b.Sym.Pkg.Path)
|
if a.Sym.Pkg.Path != b.Sym.Pkg.Path {
|
||||||
if k != 0 {
|
return a.Sym.Pkg.Path < b.Sym.Pkg.Path
|
||||||
return k < 0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -653,24 +624,19 @@ func sortinter(t *Type) *Type {
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
i := 0
|
var a []*Type
|
||||||
for f := t.Type; f != nil; f = f.Down {
|
for f := t.Type; f != nil; f = f.Down {
|
||||||
i++
|
a = append(a, f)
|
||||||
}
|
|
||||||
a := make([]*Type, i)
|
|
||||||
i = 0
|
|
||||||
var f *Type
|
|
||||||
for f = t.Type; f != nil; f = f.Down {
|
|
||||||
a[i] = f
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Sort(methcmp(a[:i]))
|
|
||||||
for i--; i >= 0; i-- {
|
|
||||||
a[i].Down = f
|
|
||||||
f = a[i]
|
|
||||||
}
|
}
|
||||||
|
sort.Sort(methcmp(a))
|
||||||
|
|
||||||
t.Type = f
|
n := len(a) // n > 0 due to initial conditions.
|
||||||
|
for i := 0; i < n-1; i++ {
|
||||||
|
a[i].Down = a[i+1]
|
||||||
|
}
|
||||||
|
a[n-1].Down = nil
|
||||||
|
|
||||||
|
t.Type = a[0]
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1618,33 +1584,32 @@ func Ptrto(t *Type) *Type {
|
||||||
}
|
}
|
||||||
|
|
||||||
func frame(context int) {
|
func frame(context int) {
|
||||||
var l *NodeList
|
|
||||||
|
|
||||||
if context != 0 {
|
if context != 0 {
|
||||||
fmt.Printf("--- external frame ---\n")
|
fmt.Printf("--- external frame ---\n")
|
||||||
l = externdcl
|
for _, n := range externdcl {
|
||||||
} else if Curfn != nil {
|
printframenode(n)
|
||||||
fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
|
}
|
||||||
l = Curfn.Func.Dcl
|
|
||||||
} else {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var n *Node
|
if Curfn != nil {
|
||||||
var w int64
|
fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
|
||||||
for ; l != nil; l = l.Next {
|
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||||
n = l.N
|
printframenode(l.N)
|
||||||
w = -1
|
|
||||||
if n.Type != nil {
|
|
||||||
w = n.Type.Width
|
|
||||||
}
|
}
|
||||||
switch n.Op {
|
}
|
||||||
case ONAME:
|
}
|
||||||
fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Name.Vargen, n.Type, w)
|
|
||||||
|
|
||||||
case OTYPE:
|
func printframenode(n *Node) {
|
||||||
fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
|
w := int64(-1)
|
||||||
}
|
if n.Type != nil {
|
||||||
|
w = n.Type.Width
|
||||||
|
}
|
||||||
|
switch n.Op {
|
||||||
|
case ONAME:
|
||||||
|
fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Name.Vargen, n.Type, w)
|
||||||
|
case OTYPE:
|
||||||
|
fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1983,19 +1948,6 @@ func cheapexpr(n *Node, init **NodeList) *Node {
|
||||||
return copyexpr(n, n.Type, init)
|
return copyexpr(n, n.Type, init)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* return n in a local variable of type t if it is not already.
|
|
||||||
* the value is guaranteed not to change except by direct
|
|
||||||
* assignment to it.
|
|
||||||
*/
|
|
||||||
func localexpr(n *Node, t *Type, init **NodeList) *Node {
|
|
||||||
if n.Op == ONAME && (!n.Addrtaken || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
return copyexpr(n, t, init)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Setmaxarg(t *Type, extra int32) {
|
func Setmaxarg(t *Type, extra int32) {
|
||||||
dowidth(t)
|
dowidth(t)
|
||||||
w := t.Argwid
|
w := t.Argwid
|
||||||
|
|
@ -2163,17 +2115,17 @@ func adddot(n *Node) *Node {
|
||||||
*/
|
*/
|
||||||
type Symlink struct {
|
type Symlink struct {
|
||||||
field *Type
|
field *Type
|
||||||
good uint8
|
|
||||||
followptr uint8
|
|
||||||
link *Symlink
|
link *Symlink
|
||||||
|
good bool
|
||||||
|
followptr bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var slist *Symlink
|
var slist *Symlink
|
||||||
|
|
||||||
func expand0(t *Type, followptr int) {
|
func expand0(t *Type, followptr bool) {
|
||||||
u := t
|
u := t
|
||||||
if Isptr[u.Etype] {
|
if Isptr[u.Etype] {
|
||||||
followptr = 1
|
followptr = true
|
||||||
u = u.Type
|
u = u.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2187,7 +2139,7 @@ func expand0(t *Type, followptr int) {
|
||||||
sl = new(Symlink)
|
sl = new(Symlink)
|
||||||
sl.field = f
|
sl.field = f
|
||||||
sl.link = slist
|
sl.link = slist
|
||||||
sl.followptr = uint8(followptr)
|
sl.followptr = followptr
|
||||||
slist = sl
|
slist = sl
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2205,13 +2157,13 @@ func expand0(t *Type, followptr int) {
|
||||||
sl = new(Symlink)
|
sl = new(Symlink)
|
||||||
sl.field = f
|
sl.field = f
|
||||||
sl.link = slist
|
sl.link = slist
|
||||||
sl.followptr = uint8(followptr)
|
sl.followptr = followptr
|
||||||
slist = sl
|
slist = sl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func expand1(t *Type, d int, followptr int) {
|
func expand1(t *Type, d int, followptr bool) {
|
||||||
if t.Trecur != 0 {
|
if t.Trecur != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -2226,7 +2178,7 @@ func expand1(t *Type, d int, followptr int) {
|
||||||
|
|
||||||
u := t
|
u := t
|
||||||
if Isptr[u.Etype] {
|
if Isptr[u.Etype] {
|
||||||
followptr = 1
|
followptr = true
|
||||||
u = u.Type
|
u = u.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2263,7 +2215,7 @@ func expandmeth(t *Type) {
|
||||||
// generate all reachable methods
|
// generate all reachable methods
|
||||||
slist = nil
|
slist = nil
|
||||||
|
|
||||||
expand1(t, len(dotlist)-1, 0)
|
expand1(t, len(dotlist)-1, false)
|
||||||
|
|
||||||
// check each method to be uniquely reachable
|
// check each method to be uniquely reachable
|
||||||
var c int
|
var c int
|
||||||
|
|
@ -2278,7 +2230,7 @@ func expandmeth(t *Type) {
|
||||||
if c == 1 {
|
if c == 1 {
|
||||||
// addot1 may have dug out arbitrary fields, we only want methods.
|
// addot1 may have dug out arbitrary fields, we only want methods.
|
||||||
if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
|
if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
|
||||||
sl.good = 1
|
sl.good = true
|
||||||
sl.field = f
|
sl.field = f
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -2293,13 +2245,13 @@ func expandmeth(t *Type) {
|
||||||
|
|
||||||
t.Xmethod = t.Method
|
t.Xmethod = t.Method
|
||||||
for sl := slist; sl != nil; sl = sl.link {
|
for sl := slist; sl != nil; sl = sl.link {
|
||||||
if sl.good != 0 {
|
if sl.good {
|
||||||
// add it to the base type method list
|
// add it to the base type method list
|
||||||
f = typ(TFIELD)
|
f = typ(TFIELD)
|
||||||
|
|
||||||
*f = *sl.field
|
*f = *sl.field
|
||||||
f.Embedded = 1 // needs a trampoline
|
f.Embedded = 1 // needs a trampoline
|
||||||
if sl.followptr != 0 {
|
if sl.followptr {
|
||||||
f.Embedded = 2
|
f.Embedded = 2
|
||||||
}
|
}
|
||||||
f.Down = t.Xmethod
|
f.Down = t.Xmethod
|
||||||
|
|
@ -2616,21 +2568,6 @@ func genhash(sym *Sym, t *Type) {
|
||||||
colasdefn(n.List, n)
|
colasdefn(n.List, n)
|
||||||
ni = n.List.N
|
ni = n.List.N
|
||||||
|
|
||||||
// TODO: with aeshash we don't need these shift/mul parts
|
|
||||||
|
|
||||||
// h = h<<3 | h>>61
|
|
||||||
n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OOR, Nod(OLSH, nh, Nodintconst(3)), Nod(ORSH, nh, Nodintconst(int64(Widthptr)*8-3)))))
|
|
||||||
|
|
||||||
// h *= mul
|
|
||||||
// Same multipliers as in runtime.memhash.
|
|
||||||
var mul int64
|
|
||||||
if Widthptr == 4 {
|
|
||||||
mul = 3267000013
|
|
||||||
} else {
|
|
||||||
mul = 23344194077549503
|
|
||||||
}
|
|
||||||
n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
|
|
||||||
|
|
||||||
// h = hashel(&p[i], h)
|
// h = hashel(&p[i], h)
|
||||||
call := Nod(OCALL, hashel, nil)
|
call := Nod(OCALL, hashel, nil)
|
||||||
|
|
||||||
|
|
@ -2968,8 +2905,8 @@ func geneq(sym *Sym, t *Type) {
|
||||||
safemode = old_safemode
|
safemode = old_safemode
|
||||||
}
|
}
|
||||||
|
|
||||||
func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
|
func ifacelookdot(s *Sym, t *Type, followptr *bool, ignorecase int) *Type {
|
||||||
*followptr = 0
|
*followptr = false
|
||||||
|
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -2988,7 +2925,7 @@ func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
|
||||||
if c == 1 {
|
if c == 1 {
|
||||||
for i = 0; i < d; i++ {
|
for i = 0; i < d; i++ {
|
||||||
if Isptr[dotlist[i].field.Type.Etype] {
|
if Isptr[dotlist[i].field.Type.Etype] {
|
||||||
*followptr = 1
|
*followptr = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -3046,9 +2983,12 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
|
||||||
}
|
}
|
||||||
var tm *Type
|
var tm *Type
|
||||||
var imtype *Type
|
var imtype *Type
|
||||||
var followptr int
|
var followptr bool
|
||||||
var rcvr *Type
|
var rcvr *Type
|
||||||
for im := iface.Type; im != nil; im = im.Down {
|
for im := iface.Type; im != nil; im = im.Down {
|
||||||
|
if im.Broke {
|
||||||
|
continue
|
||||||
|
}
|
||||||
imtype = methodfunc(im.Type, nil)
|
imtype = methodfunc(im.Type, nil)
|
||||||
tm = ifacelookdot(im.Sym, t, &followptr, 0)
|
tm = ifacelookdot(im.Sym, t, &followptr, 0)
|
||||||
if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
|
if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
|
||||||
|
|
@ -3065,7 +3005,7 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
|
||||||
// the method does not exist for value types.
|
// the method does not exist for value types.
|
||||||
rcvr = getthisx(tm.Type).Type.Type
|
rcvr = getthisx(tm.Type).Type.Type
|
||||||
|
|
||||||
if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && followptr == 0 && !isifacemethod(tm.Type) {
|
if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && !followptr && !isifacemethod(tm.Type) {
|
||||||
if false && Debug['r'] != 0 {
|
if false && Debug['r'] != 0 {
|
||||||
Yyerror("interface pointer mismatch")
|
Yyerror("interface pointer mismatch")
|
||||||
}
|
}
|
||||||
|
|
@ -3420,7 +3360,6 @@ func ngotype(n *Node) *Sym {
|
||||||
* only in the last segment of the path, and it makes for happier
|
* only in the last segment of the path, and it makes for happier
|
||||||
* users if we escape that as little as possible.
|
* users if we escape that as little as possible.
|
||||||
*
|
*
|
||||||
* If you edit this, edit ../ld/lib.c:/^pathtoprefix too.
|
|
||||||
* If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
|
* If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
|
||||||
*/
|
*/
|
||||||
func pathtoprefix(s string) string {
|
func pathtoprefix(s string) string {
|
||||||
|
|
@ -3492,17 +3431,13 @@ func isbadimport(path string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(reservedimports); i++ {
|
for _, ri := range reservedimports {
|
||||||
if path == reservedimports[i] {
|
if path == ri {
|
||||||
Yyerror("import path %q is reserved and cannot be used", path)
|
Yyerror("import path %q is reserved and cannot be used", path)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var s string
|
|
||||||
_ = s
|
|
||||||
var r uint
|
|
||||||
_ = r
|
|
||||||
for _, r := range path {
|
for _, r := range path {
|
||||||
if r == utf8.RuneError {
|
if r == utf8.RuneError {
|
||||||
Yyerror("import path contains invalid UTF-8 sequence: %q", path)
|
Yyerror("import path contains invalid UTF-8 sequence: %q", path)
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ package gc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmd/internal/obj"
|
"cmd/internal/obj"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
@ -779,7 +778,13 @@ func exprcmp(c1, c2 *caseClause) int {
|
||||||
if len(a) > len(b) {
|
if len(a) > len(b) {
|
||||||
return +1
|
return +1
|
||||||
}
|
}
|
||||||
return stringsCompare(a, b)
|
if a == b {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return +1
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
@ -806,43 +811,3 @@ func (x caseClauseByType) Less(i, j int) bool {
|
||||||
// sort by ordinal
|
// sort by ordinal
|
||||||
return c1.ordinal < c2.ordinal
|
return c1.ordinal < c2.ordinal
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpcase(cc []*caseClause) {
|
|
||||||
for _, c := range cc {
|
|
||||||
switch c.typ {
|
|
||||||
case caseKindDefault:
|
|
||||||
fmt.Printf("case-default\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
|
|
||||||
case caseKindExprConst:
|
|
||||||
fmt.Printf("case-exprconst\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
|
|
||||||
case caseKindExprVar:
|
|
||||||
fmt.Printf("case-exprvar\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
|
|
||||||
|
|
||||||
case caseKindTypeNil:
|
|
||||||
fmt.Printf("case-typenil\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
|
|
||||||
case caseKindTypeConst:
|
|
||||||
fmt.Printf("case-typeconst\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
fmt.Printf("\thash=%x\n", c.hash)
|
|
||||||
|
|
||||||
case caseKindTypeVar:
|
|
||||||
fmt.Printf("case-typevar\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
|
|
||||||
default:
|
|
||||||
fmt.Printf("case-???\n")
|
|
||||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
|
||||||
fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
|
|
||||||
fmt.Printf("\thash=%x\n", c.hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n")
|
|
||||||
}
|
|
||||||
|
|
|
||||||
144
src/cmd/compile/internal/gc/swt_test.go
Normal file
144
src/cmd/compile/internal/gc/swt_test.go
Normal file
|
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmd/compile/internal/big"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExprcmp(t *testing.T) {
|
||||||
|
testdata := []struct {
|
||||||
|
a, b caseClause
|
||||||
|
want int
|
||||||
|
}{
|
||||||
|
// Non-constants.
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprVar},
|
||||||
|
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprVar},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
// Type switches
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, Nodbool(true), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, Nodbool(true), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 0}}, nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 0}}, nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
// Constant values.
|
||||||
|
// CTFLT
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.2)}}), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.2)}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
// CTINT
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
// CTRUNE
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('a'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('a'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
// CTSTR
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"ab"}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"xyz"}), nil), typ: caseKindExprConst},
|
||||||
|
-1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"ab"}), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"xyz"}), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||||
|
+1,
|
||||||
|
},
|
||||||
|
// Everything else should compare equal.
|
||||||
|
{
|
||||||
|
caseClause{node: Nod(OXXX, nodnil(), nil), typ: caseKindExprConst},
|
||||||
|
caseClause{node: Nod(OXXX, nodnil(), nil), typ: caseKindExprConst},
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, d := range testdata {
|
||||||
|
got := exprcmp(&d.a, &d.b)
|
||||||
|
if d.want != got {
|
||||||
|
t.Errorf("%d: exprcmp(a, b) = %d; want %d", i, got, d.want)
|
||||||
|
t.Logf("\ta = caseClause{node: %#v, typ: %#v}", d.a.node, d.a.typ)
|
||||||
|
t.Logf("\tb = caseClause{node: %#v, typ: %#v}", d.b.node, d.b.typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -409,9 +409,10 @@ func list(l *NodeList, n *Node) *NodeList {
|
||||||
return concat(l, list1(n))
|
return concat(l, list1(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// listsort sorts *l in place according to the 3-way comparison function f.
|
// listsort sorts *l in place according to the comparison function lt.
|
||||||
|
// The algorithm expects lt(a, b) to be equivalent to a < b.
|
||||||
// The algorithm is mergesort, so it is guaranteed to be O(n log n).
|
// The algorithm is mergesort, so it is guaranteed to be O(n log n).
|
||||||
func listsort(l **NodeList, f func(*Node, *Node) int) {
|
func listsort(l **NodeList, lt func(*Node, *Node) bool) {
|
||||||
if *l == nil || (*l).Next == nil {
|
if *l == nil || (*l).Next == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -436,10 +437,10 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
|
||||||
(*l).End = l1
|
(*l).End = l1
|
||||||
|
|
||||||
l1 = *l
|
l1 = *l
|
||||||
listsort(&l1, f)
|
listsort(&l1, lt)
|
||||||
listsort(&l2, f)
|
listsort(&l2, lt)
|
||||||
|
|
||||||
if f(l1.N, l2.N) < 0 {
|
if lt(l1.N, l2.N) {
|
||||||
*l = l1
|
*l = l1
|
||||||
} else {
|
} else {
|
||||||
*l = l2
|
*l = l2
|
||||||
|
|
@ -451,7 +452,7 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
|
||||||
|
|
||||||
var le *NodeList
|
var le *NodeList
|
||||||
for (l1 != nil) && (l2 != nil) {
|
for (l1 != nil) && (l2 != nil) {
|
||||||
for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
|
for (l1.Next != nil) && lt(l1.Next.N, l2.N) {
|
||||||
l1 = l1.Next
|
l1 = l1.Next
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import (
|
||||||
* marks variables that escape the local frame.
|
* marks variables that escape the local frame.
|
||||||
* rewrites n->op to be more specific in some cases.
|
* rewrites n->op to be more specific in some cases.
|
||||||
*/
|
*/
|
||||||
var typecheckdefstack *NodeList
|
var typecheckdefstack []*Node
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* resolve ONONAME to definition, if any.
|
* resolve ONONAME to definition, if any.
|
||||||
|
|
@ -1026,11 +1026,11 @@ OpSwitch:
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if Isconst(n.Right, CTINT) {
|
if !n.Bounded && Isconst(n.Right, CTINT) {
|
||||||
x := Mpgetfix(n.Right.Val().U.(*Mpint))
|
x := Mpgetfix(n.Right.Val().U.(*Mpint))
|
||||||
if x < 0 {
|
if x < 0 {
|
||||||
Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
|
Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
|
||||||
} else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
|
} else if Isfixedarray(t) && x >= t.Bound {
|
||||||
Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound)
|
Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound)
|
||||||
} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) {
|
} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) {
|
||||||
Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string)))
|
Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string)))
|
||||||
|
|
@ -1160,16 +1160,16 @@ OpSwitch:
|
||||||
}
|
}
|
||||||
|
|
||||||
lo := n.Right.Left
|
lo := n.Right.Left
|
||||||
if lo != nil && checksliceindex(l, lo, tp) < 0 {
|
if lo != nil && !checksliceindex(l, lo, tp) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hi := n.Right.Right
|
hi := n.Right.Right
|
||||||
if hi != nil && checksliceindex(l, hi, tp) < 0 {
|
if hi != nil && !checksliceindex(l, hi, tp) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if checksliceconst(lo, hi) < 0 {
|
if !checksliceconst(lo, hi) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1227,21 +1227,21 @@ OpSwitch:
|
||||||
}
|
}
|
||||||
|
|
||||||
lo := n.Right.Left
|
lo := n.Right.Left
|
||||||
if lo != nil && checksliceindex(l, lo, tp) < 0 {
|
if lo != nil && !checksliceindex(l, lo, tp) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mid := n.Right.Right.Left
|
mid := n.Right.Right.Left
|
||||||
if mid != nil && checksliceindex(l, mid, tp) < 0 {
|
if mid != nil && !checksliceindex(l, mid, tp) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hi := n.Right.Right.Right
|
hi := n.Right.Right.Right
|
||||||
if hi != nil && checksliceindex(l, hi, tp) < 0 {
|
if hi != nil && !checksliceindex(l, hi, tp) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if checksliceconst(lo, hi) < 0 || checksliceconst(lo, mid) < 0 || checksliceconst(mid, hi) < 0 {
|
if !checksliceconst(lo, hi) || !checksliceconst(lo, mid) || !checksliceconst(mid, hi) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1300,7 +1300,7 @@ OpSwitch:
|
||||||
|
|
||||||
n.Op = OCONV
|
n.Op = OCONV
|
||||||
n.Type = l.Type
|
n.Type = l.Type
|
||||||
if onearg(n, "conversion to %v", l.Type) < 0 {
|
if !onearg(n, "conversion to %v", l.Type) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1388,7 +1388,7 @@ OpSwitch:
|
||||||
|
|
||||||
case OCAP, OLEN, OREAL, OIMAG:
|
case OCAP, OLEN, OREAL, OIMAG:
|
||||||
ok |= Erv
|
ok |= Erv
|
||||||
if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
|
if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1484,7 +1484,7 @@ OpSwitch:
|
||||||
l = t.Nname
|
l = t.Nname
|
||||||
r = t.Down.Nname
|
r = t.Down.Nname
|
||||||
} else {
|
} else {
|
||||||
if twoarg(n) < 0 {
|
if !twoarg(n) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1538,7 +1538,7 @@ OpSwitch:
|
||||||
break OpSwitch
|
break OpSwitch
|
||||||
|
|
||||||
case OCLOSE:
|
case OCLOSE:
|
||||||
if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
|
if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1837,9 +1837,7 @@ OpSwitch:
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
et := obj.Bool2int(checkmake(t, "len", l) < 0)
|
if !checkmake(t, "len", l) || r != nil && !checkmake(t, "cap", r) {
|
||||||
et |= obj.Bool2int(r != nil && checkmake(t, "cap", r) < 0)
|
|
||||||
if et != 0 {
|
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1863,7 +1861,7 @@ OpSwitch:
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if checkmake(t, "size", l) < 0 {
|
if !checkmake(t, "size", l) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1884,7 +1882,7 @@ OpSwitch:
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if checkmake(t, "buffer", l) < 0 {
|
if !checkmake(t, "buffer", l) {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1947,7 +1945,7 @@ OpSwitch:
|
||||||
|
|
||||||
case OPANIC:
|
case OPANIC:
|
||||||
ok |= Etop
|
ok |= Etop
|
||||||
if onearg(n, "panic") < 0 {
|
if !onearg(n, "panic") {
|
||||||
n.Type = nil
|
n.Type = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -2228,42 +2226,42 @@ OpSwitch:
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func checksliceindex(l *Node, r *Node, tp *Type) int {
|
func checksliceindex(l *Node, r *Node, tp *Type) bool {
|
||||||
t := r.Type
|
t := r.Type
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
if !Isint[t.Etype] {
|
if !Isint[t.Etype] {
|
||||||
Yyerror("invalid slice index %v (type %v)", r, t)
|
Yyerror("invalid slice index %v (type %v)", r, t)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Op == OLITERAL {
|
if r.Op == OLITERAL {
|
||||||
if Mpgetfix(r.Val().U.(*Mpint)) < 0 {
|
if Mpgetfix(r.Val().U.(*Mpint)) < 0 {
|
||||||
Yyerror("invalid slice index %v (index must be non-negative)", r)
|
Yyerror("invalid slice index %v (index must be non-negative)", r)
|
||||||
return -1
|
return false
|
||||||
} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val().U.(*Mpint)) > tp.Bound {
|
} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val().U.(*Mpint)) > tp.Bound {
|
||||||
Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound)
|
Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound)
|
||||||
return -1
|
return false
|
||||||
} else if Isconst(l, CTSTR) && Mpgetfix(r.Val().U.(*Mpint)) > int64(len(l.Val().U.(string))) {
|
} else if Isconst(l, CTSTR) && Mpgetfix(r.Val().U.(*Mpint)) > int64(len(l.Val().U.(string))) {
|
||||||
Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string)))
|
Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string)))
|
||||||
return -1
|
return false
|
||||||
} else if Mpcmpfixfix(r.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
} else if Mpcmpfixfix(r.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
||||||
Yyerror("invalid slice index %v (index too large)", r)
|
Yyerror("invalid slice index %v (index too large)", r)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func checksliceconst(lo *Node, hi *Node) int {
|
func checksliceconst(lo *Node, hi *Node) bool {
|
||||||
if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val().U.(*Mpint), hi.Val().U.(*Mpint)) > 0 {
|
if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val().U.(*Mpint), hi.Val().U.(*Mpint)) > 0 {
|
||||||
Yyerror("invalid slice index: %v > %v", lo, hi)
|
Yyerror("invalid slice index: %v > %v", lo, hi)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkdefergo(n *Node) {
|
func checkdefergo(n *Node) {
|
||||||
|
|
@ -2341,14 +2339,14 @@ func implicitstar(nn **Node) {
|
||||||
*nn = n
|
*nn = n
|
||||||
}
|
}
|
||||||
|
|
||||||
func onearg(n *Node, f string, args ...interface{}) int {
|
func onearg(n *Node, f string, args ...interface{}) bool {
|
||||||
if n.Left != nil {
|
if n.Left != nil {
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
if n.List == nil {
|
if n.List == nil {
|
||||||
p := fmt.Sprintf(f, args...)
|
p := fmt.Sprintf(f, args...)
|
||||||
Yyerror("missing argument to %s: %v", p, n)
|
Yyerror("missing argument to %s: %v", p, n)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.List.Next != nil {
|
if n.List.Next != nil {
|
||||||
|
|
@ -2356,39 +2354,39 @@ func onearg(n *Node, f string, args ...interface{}) int {
|
||||||
Yyerror("too many arguments to %s: %v", p, n)
|
Yyerror("too many arguments to %s: %v", p, n)
|
||||||
n.Left = n.List.N
|
n.Left = n.List.N
|
||||||
n.List = nil
|
n.List = nil
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Left = n.List.N
|
n.Left = n.List.N
|
||||||
n.List = nil
|
n.List = nil
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func twoarg(n *Node) int {
|
func twoarg(n *Node) bool {
|
||||||
if n.Left != nil {
|
if n.Left != nil {
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
if n.List == nil {
|
if n.List == nil {
|
||||||
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Left = n.List.N
|
n.Left = n.List.N
|
||||||
if n.List.Next == nil {
|
if n.List.Next == nil {
|
||||||
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
||||||
n.List = nil
|
n.List = nil
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.List.Next.Next != nil {
|
if n.List.Next.Next != nil {
|
||||||
Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), n)
|
Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), n)
|
||||||
n.List = nil
|
n.List = nil
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Right = n.List.Next.N
|
n.Right = n.List.Next.N
|
||||||
n.List = nil
|
n.List = nil
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
|
func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
|
||||||
|
|
@ -2849,20 +2847,25 @@ func keydup(n *Node, hash map[uint32][]*Node) {
|
||||||
for _, a := range hash[h] {
|
for _, a := range hash[h] {
|
||||||
cmp.Op = OEQ
|
cmp.Op = OEQ
|
||||||
cmp.Left = n
|
cmp.Left = n
|
||||||
b := uint32(0)
|
b := false
|
||||||
if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
|
if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
|
||||||
if Eqtype(a.Left.Type, n.Type) {
|
if Eqtype(a.Left.Type, n.Type) {
|
||||||
cmp.Right = a.Left
|
cmp.Right = a.Left
|
||||||
evconst(&cmp)
|
evconst(&cmp)
|
||||||
b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
|
if cmp.Op == OLITERAL {
|
||||||
|
// Sometimes evconst fails. See issue 12536.
|
||||||
|
b = cmp.Val().U.(bool)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if Eqtype(a.Type, n.Type) {
|
} else if Eqtype(a.Type, n.Type) {
|
||||||
cmp.Right = a
|
cmp.Right = a
|
||||||
evconst(&cmp)
|
evconst(&cmp)
|
||||||
b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
|
if cmp.Op == OLITERAL {
|
||||||
|
b = cmp.Val().U.(bool)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b != 0 {
|
if b {
|
||||||
Yyerror("duplicate key %v in map literal", n)
|
Yyerror("duplicate key %v in map literal", n)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -3539,7 +3542,7 @@ var mapqueue *NodeList
|
||||||
func copytype(n *Node, t *Type) {
|
func copytype(n *Node, t *Type) {
|
||||||
if t.Etype == TFORW {
|
if t.Etype == TFORW {
|
||||||
// This type isn't computed yet; when it is, update n.
|
// This type isn't computed yet; when it is, update n.
|
||||||
t.Copyto = list(t.Copyto, n)
|
t.Copyto = append(t.Copyto, n)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -3564,8 +3567,8 @@ func copytype(n *Node, t *Type) {
|
||||||
t.Copyto = nil
|
t.Copyto = nil
|
||||||
|
|
||||||
// Update nodes waiting on this type.
|
// Update nodes waiting on this type.
|
||||||
for ; l != nil; l = l.Next {
|
for _, n := range l {
|
||||||
copytype(l.N, t)
|
copytype(n, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Double-check use of type as embedded type.
|
// Double-check use of type as embedded type.
|
||||||
|
|
@ -3674,16 +3677,13 @@ func typecheckdef(n *Node) *Node {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
l := new(NodeList)
|
typecheckdefstack = append(typecheckdefstack, n)
|
||||||
l.N = n
|
|
||||||
l.Next = typecheckdefstack
|
|
||||||
typecheckdefstack = l
|
|
||||||
|
|
||||||
if n.Walkdef == 2 {
|
if n.Walkdef == 2 {
|
||||||
Flusherrors()
|
Flusherrors()
|
||||||
fmt.Printf("typecheckdef loop:")
|
fmt.Printf("typecheckdef loop:")
|
||||||
for l := typecheckdefstack; l != nil; l = l.Next {
|
for i := len(typecheckdefstack) - 1; i >= 0; i-- {
|
||||||
fmt.Printf(" %v", l.N.Sym)
|
n := typecheckdefstack[i]
|
||||||
|
fmt.Printf(" %v", n.Sym)
|
||||||
}
|
}
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
Fatalf("typecheckdef loop")
|
Fatalf("typecheckdef loop")
|
||||||
|
|
@ -3819,37 +3819,38 @@ ret:
|
||||||
if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
|
if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
|
||||||
Fatalf("got %v for %v", n.Type, n)
|
Fatalf("got %v for %v", n.Type, n)
|
||||||
}
|
}
|
||||||
if typecheckdefstack.N != n {
|
last := len(typecheckdefstack) - 1
|
||||||
|
if typecheckdefstack[last] != n {
|
||||||
Fatalf("typecheckdefstack mismatch")
|
Fatalf("typecheckdefstack mismatch")
|
||||||
}
|
}
|
||||||
l = typecheckdefstack
|
typecheckdefstack[last] = nil
|
||||||
typecheckdefstack = l.Next
|
typecheckdefstack = typecheckdefstack[:last]
|
||||||
|
|
||||||
lineno = int32(lno)
|
lineno = int32(lno)
|
||||||
n.Walkdef = 1
|
n.Walkdef = 1
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkmake(t *Type, arg string, n *Node) int {
|
func checkmake(t *Type, arg string, n *Node) bool {
|
||||||
if n.Op == OLITERAL {
|
if n.Op == OLITERAL {
|
||||||
switch n.Val().Ctype() {
|
switch n.Val().Ctype() {
|
||||||
case CTINT, CTRUNE, CTFLT, CTCPLX:
|
case CTINT, CTRUNE, CTFLT, CTCPLX:
|
||||||
n.SetVal(toint(n.Val()))
|
n.SetVal(toint(n.Val()))
|
||||||
if mpcmpfixc(n.Val().U.(*Mpint), 0) < 0 {
|
if mpcmpfixc(n.Val().U.(*Mpint), 0) < 0 {
|
||||||
Yyerror("negative %s argument in make(%v)", arg, t)
|
Yyerror("negative %s argument in make(%v)", arg, t)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if Mpcmpfixfix(n.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
if Mpcmpfixfix(n.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
||||||
Yyerror("%s argument too large in make(%v)", arg, t)
|
Yyerror("%s argument too large in make(%v)", arg, t)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delay defaultlit until after we've checked range, to avoid
|
// Delay defaultlit until after we've checked range, to avoid
|
||||||
// a redundant "constant NNN overflows int" error.
|
// a redundant "constant NNN overflows int" error.
|
||||||
defaultlit(&n, Types[TINT])
|
defaultlit(&n, Types[TINT])
|
||||||
|
|
||||||
return 0
|
return true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break
|
break
|
||||||
|
|
@ -3858,13 +3859,13 @@ func checkmake(t *Type, arg string, n *Node) int {
|
||||||
|
|
||||||
if !Isint[n.Type.Etype] && n.Type.Etype != TIDEAL {
|
if !Isint[n.Type.Etype] && n.Type.Etype != TIDEAL {
|
||||||
Yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
|
Yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
|
||||||
return -1
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defaultlit still necessary for non-constant: n might be 1<<k.
|
// Defaultlit still necessary for non-constant: n might be 1<<k.
|
||||||
defaultlit(&n, Types[TINT])
|
defaultlit(&n, Types[TINT])
|
||||||
|
|
||||||
return 0
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func markbreak(n *Node, implicit *Node) {
|
func markbreak(n *Node, implicit *Node) {
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (n *Node) Line() string {
|
func (n *Node) Line() string {
|
||||||
|
|
@ -18,41 +17,6 @@ func atoi(s string) int {
|
||||||
return int(n)
|
return int(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isalnum(c int) bool {
|
|
||||||
return isalpha(c) || isdigit(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isalpha(c int) bool {
|
|
||||||
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isdigit(c int) bool {
|
|
||||||
return '0' <= c && c <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func plan9quote(s string) string {
|
|
||||||
if s == "" {
|
|
||||||
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
|
||||||
}
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] <= ' ' || s[i] == '\'' {
|
|
||||||
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// strings.Compare, introduced in Go 1.5.
|
|
||||||
func stringsCompare(a, b string) int {
|
|
||||||
if a == b {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if a < b {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
|
|
||||||
var atExitFuncs []func()
|
var atExitFuncs []func()
|
||||||
|
|
||||||
func AtExit(f func()) {
|
func AtExit(f func()) {
|
||||||
|
|
|
||||||
|
|
@ -1356,7 +1356,7 @@ func walkexpr(np **Node, init **NodeList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
|
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
|
||||||
if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
|
if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
|
||||||
r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
|
r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
|
||||||
typecheck(&r, Erv)
|
typecheck(&r, Erv)
|
||||||
walkexpr(&r, init)
|
walkexpr(&r, init)
|
||||||
|
|
@ -2225,8 +2225,6 @@ func needwritebarrier(l *Node, r *Node) bool {
|
||||||
|
|
||||||
// TODO(rsc): Perhaps componentgen should run before this.
|
// TODO(rsc): Perhaps componentgen should run before this.
|
||||||
|
|
||||||
var applywritebarrier_bv Bvec
|
|
||||||
|
|
||||||
func applywritebarrier(n *Node, init **NodeList) *Node {
|
func applywritebarrier(n *Node, init **NodeList) *Node {
|
||||||
if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
|
if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
|
||||||
if Debug_wb > 1 {
|
if Debug_wb > 1 {
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ func fixlbrace(lbr int) {
|
||||||
// set up for another one now that we're done.
|
// set up for another one now that we're done.
|
||||||
// See comment in lex.C about loophack.
|
// See comment in lex.C about loophack.
|
||||||
if lbr == LBODY {
|
if lbr == LBODY {
|
||||||
loophack = 1
|
loophack = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -545,30 +545,18 @@ hard:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
|
||||||
switch {
|
|
||||||
case n == nil:
|
|
||||||
return
|
|
||||||
case gc.Isconst(n, gc.CTINT):
|
|
||||||
return n.Int(), true
|
|
||||||
case gc.Isconst(n, gc.CTBOOL):
|
|
||||||
return int64(obj.Bool2int(n.Bool())), true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// gins is called by the front end.
|
// gins is called by the front end.
|
||||||
// It synthesizes some multiple-instruction sequences
|
// It synthesizes some multiple-instruction sequences
|
||||||
// so the front end can stay simpler.
|
// so the front end can stay simpler.
|
||||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||||
if as >= obj.A_ARCHSPECIFIC {
|
if as >= obj.A_ARCHSPECIFIC {
|
||||||
if x, ok := intLiteral(f); ok {
|
if x, ok := f.IntLiteral(); ok {
|
||||||
ginscon(as, x, t)
|
ginscon(as, x, t)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
||||||
if x, ok := intLiteral(t); ok {
|
if x, ok := t.IntLiteral(); ok {
|
||||||
ginscon2(as, f, x)
|
ginscon2(as, f, x)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -69,13 +69,10 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
|
||||||
}
|
}
|
||||||
if cnt < int64(4*gc.Widthptr) {
|
if cnt < int64(4*gc.Widthptr) {
|
||||||
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
||||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
|
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
|
||||||
}
|
}
|
||||||
// TODO(dfc): https://golang.org/issue/12108
|
} else if cnt <= int64(128*gc.Widthptr) {
|
||||||
// If DUFFZERO is used inside a tail call (see genwrapper) it will
|
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
|
||||||
// overwrite the link register.
|
|
||||||
} else if false && cnt <= int64(128*gc.Widthptr) {
|
|
||||||
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
|
|
||||||
p.Reg = ppc64.REGSP
|
p.Reg = ppc64.REGSP
|
||||||
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||||
f := gc.Sysfunc("duffzero")
|
f := gc.Sysfunc("duffzero")
|
||||||
|
|
@ -83,7 +80,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
|
||||||
gc.Afunclit(&p.To, f)
|
gc.Afunclit(&p.To, f)
|
||||||
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
|
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
|
||||||
} else {
|
} else {
|
||||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
|
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
|
||||||
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
|
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
|
||||||
p.Reg = ppc64.REGSP
|
p.Reg = ppc64.REGSP
|
||||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
|
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
|
||||||
|
|
@ -443,10 +440,7 @@ func clearfat(nl *gc.Node) {
|
||||||
|
|
||||||
// The loop leaves R3 on the last zeroed dword
|
// The loop leaves R3 on the last zeroed dword
|
||||||
boff = 8
|
boff = 8
|
||||||
// TODO(dfc): https://golang.org/issue/12108
|
} else if q >= 4 {
|
||||||
// If DUFFZERO is used inside a tail call (see genwrapper) it will
|
|
||||||
// overwrite the link register.
|
|
||||||
} else if false && q >= 4 {
|
|
||||||
p := gins(ppc64.ASUB, nil, &dst)
|
p := gins(ppc64.ASUB, nil, &dst)
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
p.From.Offset = 8
|
p.From.Offset = 8
|
||||||
|
|
|
||||||
|
|
@ -545,30 +545,18 @@ hard:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
|
||||||
switch {
|
|
||||||
case n == nil:
|
|
||||||
return
|
|
||||||
case gc.Isconst(n, gc.CTINT):
|
|
||||||
return n.Int(), true
|
|
||||||
case gc.Isconst(n, gc.CTBOOL):
|
|
||||||
return int64(obj.Bool2int(n.Bool())), true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// gins is called by the front end.
|
// gins is called by the front end.
|
||||||
// It synthesizes some multiple-instruction sequences
|
// It synthesizes some multiple-instruction sequences
|
||||||
// so the front end can stay simpler.
|
// so the front end can stay simpler.
|
||||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||||
if as >= obj.A_ARCHSPECIFIC {
|
if as >= obj.A_ARCHSPECIFIC {
|
||||||
if x, ok := intLiteral(f); ok {
|
if x, ok := f.IntLiteral(); ok {
|
||||||
ginscon(as, x, t)
|
ginscon(as, x, t)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
||||||
if x, ok := intLiteral(t); ok {
|
if x, ok := t.IntLiteral(); ok {
|
||||||
ginscon2(as, f, x)
|
ginscon2(as, f, x)
|
||||||
return nil // caller must not use
|
return nil // caller must not use
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -183,89 +183,89 @@ func proginfo(p *obj.Prog) {
|
||||||
// for the "base" form of each instruction. On the first call to
|
// for the "base" form of each instruction. On the first call to
|
||||||
// as2variant or variant2as, we'll add the variants to the table.
|
// as2variant or variant2as, we'll add the variants to the table.
|
||||||
var varianttable = [ppc64.ALAST][4]int{
|
var varianttable = [ppc64.ALAST][4]int{
|
||||||
ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
|
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
|
||||||
ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
|
ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
|
||||||
ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
|
ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
|
||||||
ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
|
ppc64.AADDME: {ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
|
||||||
ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
|
ppc64.AADDZE: {ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
|
||||||
ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
|
ppc64.AAND: {ppc64.AAND, ppc64.AANDCC, 0, 0},
|
||||||
ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
|
ppc64.AANDN: {ppc64.AANDN, ppc64.AANDNCC, 0, 0},
|
||||||
ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
|
ppc64.ACNTLZD: {ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
|
||||||
ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
|
ppc64.ACNTLZW: {ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
|
||||||
ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
|
ppc64.ADIVD: {ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
|
||||||
ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
|
ppc64.ADIVDU: {ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
|
||||||
ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
|
ppc64.ADIVW: {ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
|
||||||
ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
|
ppc64.ADIVWU: {ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
|
||||||
ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
|
ppc64.AEQV: {ppc64.AEQV, ppc64.AEQVCC, 0, 0},
|
||||||
ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
|
ppc64.AEXTSB: {ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
|
||||||
ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
|
ppc64.AEXTSH: {ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
|
||||||
ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
|
ppc64.AEXTSW: {ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
|
||||||
ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
|
ppc64.AFABS: {ppc64.AFABS, ppc64.AFABSCC, 0, 0},
|
||||||
ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
|
ppc64.AFADD: {ppc64.AFADD, ppc64.AFADDCC, 0, 0},
|
||||||
ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
|
ppc64.AFADDS: {ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
|
||||||
ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
|
ppc64.AFCFID: {ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
|
||||||
ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
|
ppc64.AFCTID: {ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
|
||||||
ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
|
ppc64.AFCTIDZ: {ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
|
||||||
ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
|
ppc64.AFCTIW: {ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
|
||||||
ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
|
ppc64.AFCTIWZ: {ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
|
||||||
ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
|
ppc64.AFDIV: {ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
|
||||||
ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
|
ppc64.AFDIVS: {ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
|
||||||
ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
|
ppc64.AFMADD: {ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
|
||||||
ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
|
ppc64.AFMADDS: {ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
|
||||||
ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
|
ppc64.AFMOVD: {ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
|
||||||
ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
|
ppc64.AFMSUB: {ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
|
||||||
ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
|
ppc64.AFMSUBS: {ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
|
||||||
ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
|
ppc64.AFMUL: {ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
|
||||||
ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
|
ppc64.AFMULS: {ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
|
||||||
ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
|
ppc64.AFNABS: {ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
|
||||||
ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
|
ppc64.AFNEG: {ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
|
||||||
ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
|
ppc64.AFNMADD: {ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
|
||||||
ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
|
ppc64.AFNMADDS: {ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
|
||||||
ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
|
ppc64.AFNMSUB: {ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
|
||||||
ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
|
ppc64.AFNMSUBS: {ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
|
||||||
ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
|
ppc64.AFRES: {ppc64.AFRES, ppc64.AFRESCC, 0, 0},
|
||||||
ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
|
ppc64.AFRSP: {ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
|
||||||
ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
|
ppc64.AFRSQRTE: {ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
|
||||||
ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
|
ppc64.AFSEL: {ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
|
||||||
ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
|
ppc64.AFSQRT: {ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
|
||||||
ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
|
ppc64.AFSQRTS: {ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
|
||||||
ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
|
ppc64.AFSUB: {ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
|
||||||
ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
|
ppc64.AFSUBS: {ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
|
||||||
ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
|
ppc64.AMTFSB0: {ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
|
||||||
ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
|
ppc64.AMTFSB1: {ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
|
||||||
ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
|
ppc64.AMULHD: {ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
|
||||||
ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
|
ppc64.AMULHDU: {ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
|
||||||
ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
|
ppc64.AMULHW: {ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
|
||||||
ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
|
ppc64.AMULHWU: {ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
|
||||||
ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
|
ppc64.AMULLD: {ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
|
||||||
ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
|
ppc64.AMULLW: {ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
|
||||||
ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
|
ppc64.ANAND: {ppc64.ANAND, ppc64.ANANDCC, 0, 0},
|
||||||
ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
|
ppc64.ANEG: {ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
|
||||||
ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
|
ppc64.ANOR: {ppc64.ANOR, ppc64.ANORCC, 0, 0},
|
||||||
ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
|
ppc64.AOR: {ppc64.AOR, ppc64.AORCC, 0, 0},
|
||||||
ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
|
ppc64.AORN: {ppc64.AORN, ppc64.AORNCC, 0, 0},
|
||||||
ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
|
ppc64.AREM: {ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
|
||||||
ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
|
ppc64.AREMD: {ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
|
||||||
ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
|
ppc64.AREMDU: {ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
|
||||||
ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
|
ppc64.AREMU: {ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
|
||||||
ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
|
ppc64.ARLDC: {ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
|
||||||
ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
|
ppc64.ARLDCL: {ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
|
||||||
ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
|
ppc64.ARLDCR: {ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
|
||||||
ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
|
ppc64.ARLDMI: {ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
|
||||||
ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
|
ppc64.ARLWMI: {ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
|
||||||
ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
|
ppc64.ARLWNM: {ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
|
||||||
ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
|
ppc64.ASLD: {ppc64.ASLD, ppc64.ASLDCC, 0, 0},
|
||||||
ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
|
ppc64.ASLW: {ppc64.ASLW, ppc64.ASLWCC, 0, 0},
|
||||||
ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
|
ppc64.ASRAD: {ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
|
||||||
ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
|
ppc64.ASRAW: {ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
|
||||||
ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
|
ppc64.ASRD: {ppc64.ASRD, ppc64.ASRDCC, 0, 0},
|
||||||
ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
|
ppc64.ASRW: {ppc64.ASRW, ppc64.ASRWCC, 0, 0},
|
||||||
ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
|
ppc64.ASUB: {ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
|
||||||
ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
|
ppc64.ASUBC: {ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
|
||||||
ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
|
ppc64.ASUBE: {ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
|
||||||
ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
|
ppc64.ASUBME: {ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
|
||||||
ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
|
ppc64.ASUBZE: {ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
|
||||||
ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
|
ppc64.AXOR: {ppc64.AXOR, ppc64.AXORCC, 0, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
var initvariants_initialized int
|
var initvariants_initialized int
|
||||||
|
|
|
||||||
|
|
@ -111,7 +111,7 @@ func regnames(n *int) []string {
|
||||||
|
|
||||||
func excludedregs() uint64 {
|
func excludedregs() uint64 {
|
||||||
// Exclude registers with fixed functions
|
// Exclude registers with fixed functions
|
||||||
regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
|
regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
|
||||||
|
|
||||||
// Also exclude floating point registers with fixed constants
|
// Also exclude floating point registers with fixed constants
|
||||||
regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
|
regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
|
||||||
|
|
|
||||||
|
|
@ -602,8 +602,10 @@
|
||||||
(MOVQstore destptr (MOVQconst [0]) mem))))
|
(MOVQstore destptr (MOVQconst [0]) mem))))
|
||||||
|
|
||||||
// Medium zeroing uses a duff device.
|
// Medium zeroing uses a duff device.
|
||||||
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 ->
|
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 ->
|
||||||
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVQconst [0]) mem)
|
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||||
|
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 ->
|
||||||
|
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
|
||||||
|
|
||||||
// Large zeroing uses REP STOSQ.
|
// Large zeroing uses REP STOSQ.
|
||||||
(Zero [size] destptr mem) && size > 1024 && size%8 == 0 ->
|
(Zero [size] destptr mem) && size > 1024 && size%8 == 0 ->
|
||||||
|
|
|
||||||
|
|
@ -388,10 +388,11 @@ func init() {
|
||||||
{
|
{
|
||||||
name: "DUFFZERO",
|
name: "DUFFZERO",
|
||||||
reg: regInfo{
|
reg: regInfo{
|
||||||
inputs: []regMask{buildReg("DI"), buildReg("AX")},
|
inputs: []regMask{buildReg("DI"), buildReg("X0")},
|
||||||
clobbers: buildReg("DI FLAGS"),
|
clobbers: buildReg("DI FLAGS"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Float64"},
|
||||||
|
|
||||||
// arg0 = address of memory to zero
|
// arg0 = address of memory to zero
|
||||||
// arg1 = # of 8-byte words to zero
|
// arg1 = # of 8-byte words to zero
|
||||||
|
|
|
||||||
|
|
@ -261,6 +261,7 @@ const (
|
||||||
OpAMD64MOVQstore
|
OpAMD64MOVQstore
|
||||||
OpAMD64MOVQstoreidx8
|
OpAMD64MOVQstoreidx8
|
||||||
OpAMD64DUFFZERO
|
OpAMD64DUFFZERO
|
||||||
|
OpAMD64MOVOconst
|
||||||
OpAMD64REPSTOSQ
|
OpAMD64REPSTOSQ
|
||||||
OpAMD64CALLstatic
|
OpAMD64CALLstatic
|
||||||
OpAMD64CALLclosure
|
OpAMD64CALLclosure
|
||||||
|
|
@ -3039,12 +3040,20 @@ var opcodeTable = [...]opInfo{
|
||||||
name: "DUFFZERO",
|
name: "DUFFZERO",
|
||||||
reg: regInfo{
|
reg: regInfo{
|
||||||
inputs: []inputInfo{
|
inputs: []inputInfo{
|
||||||
{0, 128}, // .DI
|
{0, 128}, // .DI
|
||||||
{1, 1}, // .AX
|
{1, 65536}, // .X0
|
||||||
},
|
},
|
||||||
clobbers: 8589934720, // .DI .FLAGS
|
clobbers: 8589934720, // .DI .FLAGS
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "MOVOconst",
|
||||||
|
reg: regInfo{
|
||||||
|
outputs: []regMask{
|
||||||
|
4294901760, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "REPSTOSQ",
|
name: "REPSTOSQ",
|
||||||
reg: regInfo{
|
reg: regInfo{
|
||||||
|
|
|
||||||
|
|
@ -179,23 +179,21 @@ func f2i(f float64) int64 {
|
||||||
return int64(math.Float64bits(f))
|
return int64(math.Float64bits(f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// DUFFZERO consists of repeated blocks of 4 MOVs + ADD,
|
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
|
||||||
// with 4 STOSQs at the very end.
|
|
||||||
// The trailing STOSQs prevent the need for a DI preadjustment
|
|
||||||
// for small numbers of words to clear.
|
|
||||||
// See runtime/mkduff.go.
|
// See runtime/mkduff.go.
|
||||||
const (
|
const (
|
||||||
dzBlocks = 31 // number of MOV/ADD blocks
|
dzBlocks = 16 // number of MOV/ADD blocks
|
||||||
dzBlockLen = 4 // number of clears per block
|
dzBlockLen = 4 // number of clears per block
|
||||||
dzBlockSize = 19 // size of instructions in a single block
|
dzBlockSize = 19 // size of instructions in a single block
|
||||||
dzMovSize = 4 // size of single MOV instruction w/ offset
|
dzMovSize = 4 // size of single MOV instruction w/ offset
|
||||||
dzAddSize = 4 // size of single ADD instruction
|
dzAddSize = 4 // size of single ADD instruction
|
||||||
dzDIStep = 8 // number of bytes cleared by each MOV instruction
|
dzClearStep = 16 // number of bytes cleared by each MOV instruction
|
||||||
|
|
||||||
dzTailLen = 4 // number of final STOSQ instructions
|
dzTailLen = 4 // number of final STOSQ instructions
|
||||||
dzTailSize = 2 // size of single STOSQ instruction
|
dzTailSize = 2 // size of single STOSQ instruction
|
||||||
|
|
||||||
dzSize = dzBlocks*dzBlockSize + dzTailLen*dzTailSize // total size of DUFFZERO routine
|
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
|
||||||
|
dzSize = dzBlocks * dzBlockSize
|
||||||
)
|
)
|
||||||
|
|
||||||
func duffStart(size int64) int64 {
|
func duffStart(size int64) int64 {
|
||||||
|
|
@ -210,20 +208,19 @@ func duffAdj(size int64) int64 {
|
||||||
// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
|
// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
|
||||||
// required to use the duffzero mechanism for a block of the given size.
|
// required to use the duffzero mechanism for a block of the given size.
|
||||||
func duff(size int64) (int64, int64) {
|
func duff(size int64) (int64, int64) {
|
||||||
if size < 32 || size > 1024 || size%8 != 0 {
|
if size < 32 || size > 1024 || size%dzClearStep != 0 {
|
||||||
panic("bad duffzero size")
|
panic("bad duffzero size")
|
||||||
}
|
}
|
||||||
// TODO: arch-dependent
|
// TODO: arch-dependent
|
||||||
off := int64(dzSize)
|
steps := size / dzClearStep
|
||||||
off -= dzTailLen * dzTailSize
|
blocks := steps / dzBlockLen
|
||||||
size -= dzTailLen * dzDIStep
|
steps %= dzBlockLen
|
||||||
q := size / dzDIStep
|
off := dzBlockSize * (dzBlocks - blocks)
|
||||||
blocks, singles := q/dzBlockLen, q%dzBlockLen
|
|
||||||
off -= dzBlockSize * blocks
|
|
||||||
var adj int64
|
var adj int64
|
||||||
if singles > 0 {
|
if steps != 0 {
|
||||||
off -= dzAddSize + dzMovSize*singles
|
off -= dzAddSize
|
||||||
adj -= dzDIStep * (dzBlockLen - singles)
|
off -= dzMovSize * steps
|
||||||
|
adj -= dzClearStep * (dzBlockLen - steps)
|
||||||
}
|
}
|
||||||
return off, adj
|
return off, adj
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10635,14 +10635,48 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
|
||||||
end282b5e36693f06e2cd1ac563e0d419b5:
|
end282b5e36693f06e2cd1ac563e0d419b5:
|
||||||
;
|
;
|
||||||
// match: (Zero [size] destptr mem)
|
// match: (Zero [size] destptr mem)
|
||||||
// cond: size <= 1024 && size%8 == 0
|
// cond: size <= 1024 && size%8 == 0 && size%16 != 0
|
||||||
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVQconst [0]) mem)
|
// result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||||
{
|
{
|
||||||
size := v.AuxInt
|
size := v.AuxInt
|
||||||
destptr := v.Args[0]
|
destptr := v.Args[0]
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(size <= 1024 && size%8 == 0) {
|
if !(size <= 1024 && size%8 == 0 && size%16 != 0) {
|
||||||
goto endfae59ebc96f670276efea844c3b302ac
|
goto end240266449c3e493db1c3b38a78682ff0
|
||||||
|
}
|
||||||
|
v.Op = OpZero
|
||||||
|
v.AuxInt = 0
|
||||||
|
v.Aux = nil
|
||||||
|
v.resetArgs()
|
||||||
|
v.AuxInt = size - 8
|
||||||
|
v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, TypeInvalid)
|
||||||
|
v0.AuxInt = 8
|
||||||
|
v0.AddArg(destptr)
|
||||||
|
v0.Type = config.fe.TypeUInt64()
|
||||||
|
v.AddArg(v0)
|
||||||
|
v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeInvalid)
|
||||||
|
v1.AddArg(destptr)
|
||||||
|
v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
|
||||||
|
v2.AuxInt = 0
|
||||||
|
v2.Type = config.fe.TypeUInt64()
|
||||||
|
v1.AddArg(v2)
|
||||||
|
v1.AddArg(mem)
|
||||||
|
v1.Type = TypeMem
|
||||||
|
v.AddArg(v1)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
goto end240266449c3e493db1c3b38a78682ff0
|
||||||
|
end240266449c3e493db1c3b38a78682ff0:
|
||||||
|
;
|
||||||
|
// match: (Zero [size] destptr mem)
|
||||||
|
// cond: size <= 1024 && size%16 == 0
|
||||||
|
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
|
||||||
|
{
|
||||||
|
size := v.AuxInt
|
||||||
|
destptr := v.Args[0]
|
||||||
|
mem := v.Args[1]
|
||||||
|
if !(size <= 1024 && size%16 == 0) {
|
||||||
|
goto endf508bb887eee9119069b22c23dbca138
|
||||||
}
|
}
|
||||||
v.Op = OpAMD64DUFFZERO
|
v.Op = OpAMD64DUFFZERO
|
||||||
v.AuxInt = 0
|
v.AuxInt = 0
|
||||||
|
|
@ -10654,15 +10688,15 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
|
||||||
v0.AddArg(destptr)
|
v0.AddArg(destptr)
|
||||||
v0.Type = config.fe.TypeUInt64()
|
v0.Type = config.fe.TypeUInt64()
|
||||||
v.AddArg(v0)
|
v.AddArg(v0)
|
||||||
v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
|
v1 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInvalid)
|
||||||
v1.AuxInt = 0
|
v1.AuxInt = 0
|
||||||
v1.Type = config.fe.TypeUInt64()
|
v1.Type = config.fe.TypeFloat64()
|
||||||
v.AddArg(v1)
|
v.AddArg(v1)
|
||||||
v.AddArg(mem)
|
v.AddArg(mem)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
goto endfae59ebc96f670276efea844c3b302ac
|
goto endf508bb887eee9119069b22c23dbca138
|
||||||
endfae59ebc96f670276efea844c3b302ac:
|
endf508bb887eee9119069b22c23dbca138:
|
||||||
;
|
;
|
||||||
// match: (Zero [size] destptr mem)
|
// match: (Zero [size] destptr mem)
|
||||||
// cond: size > 1024 && size%8 == 0
|
// cond: size > 1024 && size%8 == 0
|
||||||
|
|
|
||||||
120
src/cmd/dist/build.go
vendored
120
src/cmd/dist/build.go
vendored
|
|
@ -11,7 +11,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Initialization for any invocation.
|
// Initialization for any invocation.
|
||||||
|
|
@ -487,9 +489,20 @@ var gentab = []struct {
|
||||||
{"anames9.c", nil},
|
{"anames9.c", nil},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// installed maps from a dir name (as given to install) to a chan
|
||||||
|
// closed when the dir's package is installed.
|
||||||
|
var installed = make(map[string]chan struct{})
|
||||||
|
|
||||||
// install installs the library, package, or binary associated with dir,
|
// install installs the library, package, or binary associated with dir,
|
||||||
// which is relative to $GOROOT/src.
|
// which is relative to $GOROOT/src.
|
||||||
func install(dir string) {
|
func install(dir string) {
|
||||||
|
if ch, ok := installed[dir]; ok {
|
||||||
|
defer close(ch)
|
||||||
|
}
|
||||||
|
for _, dep := range builddeps[dir] {
|
||||||
|
<-installed[dep]
|
||||||
|
}
|
||||||
|
|
||||||
if vflag > 0 {
|
if vflag > 0 {
|
||||||
if goos != gohostos || goarch != gohostarch {
|
if goos != gohostos || goarch != gohostarch {
|
||||||
errprintf("%s (%s/%s)\n", dir, goos, goarch)
|
errprintf("%s (%s/%s)\n", dir, goos, goarch)
|
||||||
|
|
@ -498,6 +511,9 @@ func install(dir string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
workdir := pathf("%s/%s", workdir, dir)
|
||||||
|
xmkdirall(workdir)
|
||||||
|
|
||||||
var clean []string
|
var clean []string
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, name := range clean {
|
for _, name := range clean {
|
||||||
|
|
@ -610,6 +626,8 @@ func install(dir string) {
|
||||||
pathf("%s/src/runtime/textflag.h", goroot), 0)
|
pathf("%s/src/runtime/textflag.h", goroot), 0)
|
||||||
copyfile(pathf("%s/pkg/include/funcdata.h", goroot),
|
copyfile(pathf("%s/pkg/include/funcdata.h", goroot),
|
||||||
pathf("%s/src/runtime/funcdata.h", goroot), 0)
|
pathf("%s/src/runtime/funcdata.h", goroot), 0)
|
||||||
|
copyfile(pathf("%s/pkg/include/asm_ppc64x.h", goroot),
|
||||||
|
pathf("%s/src/runtime/asm_ppc64x.h", goroot), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate any missing files; regenerate existing ones.
|
// Generate any missing files; regenerate existing ones.
|
||||||
|
|
@ -673,6 +691,7 @@ func install(dir string) {
|
||||||
run(path, CheckExit|ShowOutput, compile...)
|
run(path, CheckExit|ShowOutput, compile...)
|
||||||
|
|
||||||
// Compile the files.
|
// Compile the files.
|
||||||
|
var wg sync.WaitGroup
|
||||||
for _, p := range files {
|
for _, p := range files {
|
||||||
if !strings.HasSuffix(p, ".s") {
|
if !strings.HasSuffix(p, ".s") {
|
||||||
continue
|
continue
|
||||||
|
|
@ -695,14 +714,14 @@ func install(dir string) {
|
||||||
// Change the last character of the output file (which was c or s).
|
// Change the last character of the output file (which was c or s).
|
||||||
b = b[:len(b)-1] + "o"
|
b = b[:len(b)-1] + "o"
|
||||||
compile = append(compile, "-o", b, p)
|
compile = append(compile, "-o", b, p)
|
||||||
bgrun(path, compile...)
|
bgrun(&wg, path, compile...)
|
||||||
|
|
||||||
link = append(link, b)
|
link = append(link, b)
|
||||||
if doclean {
|
if doclean {
|
||||||
clean = append(clean, b)
|
clean = append(clean, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bgwait()
|
bgwait(&wg)
|
||||||
|
|
||||||
if ispackcmd {
|
if ispackcmd {
|
||||||
xremove(link[targ])
|
xremove(link[targ])
|
||||||
|
|
@ -839,62 +858,19 @@ func dopack(dst, src string, extra []string) {
|
||||||
writefile(bdst.String(), dst, 0)
|
writefile(bdst.String(), dst, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildorder records the order of builds for the 'go bootstrap' command.
|
// builddeps records the build dependencies for the 'go bootstrap' command.
|
||||||
// The Go packages and commands must be in dependency order,
|
// It is a map[string][]string and generated by mkdeps.bash into deps.go.
|
||||||
// maintained by hand, but the order doesn't change often.
|
|
||||||
var buildorder = []string{
|
// buildlist is the list of directories being built, sorted by name.
|
||||||
// Go libraries and programs for bootstrap.
|
var buildlist = makeBuildlist()
|
||||||
"runtime",
|
|
||||||
"errors",
|
func makeBuildlist() []string {
|
||||||
"sync/atomic",
|
var all []string
|
||||||
"sync",
|
for dir := range builddeps {
|
||||||
"internal/singleflight",
|
all = append(all, dir)
|
||||||
"io",
|
}
|
||||||
"unicode",
|
sort.Strings(all)
|
||||||
"unicode/utf8",
|
return all
|
||||||
"unicode/utf16",
|
|
||||||
"bytes",
|
|
||||||
"math",
|
|
||||||
"strings",
|
|
||||||
"strconv",
|
|
||||||
"bufio",
|
|
||||||
"sort",
|
|
||||||
"container/heap",
|
|
||||||
"encoding/base64",
|
|
||||||
"syscall",
|
|
||||||
"internal/syscall/windows/registry",
|
|
||||||
"time",
|
|
||||||
"internal/syscall/windows",
|
|
||||||
"os",
|
|
||||||
"reflect",
|
|
||||||
"fmt",
|
|
||||||
"encoding",
|
|
||||||
"encoding/binary",
|
|
||||||
"encoding/json",
|
|
||||||
"flag",
|
|
||||||
"path/filepath",
|
|
||||||
"path",
|
|
||||||
"io/ioutil",
|
|
||||||
"log",
|
|
||||||
"regexp/syntax",
|
|
||||||
"regexp",
|
|
||||||
"go/token",
|
|
||||||
"go/scanner",
|
|
||||||
"go/ast",
|
|
||||||
"go/parser",
|
|
||||||
"os/exec",
|
|
||||||
"os/signal",
|
|
||||||
"net/url",
|
|
||||||
"text/template/parse",
|
|
||||||
"text/template",
|
|
||||||
"go/doc",
|
|
||||||
"go/build",
|
|
||||||
"hash",
|
|
||||||
"crypto",
|
|
||||||
"crypto/sha1",
|
|
||||||
"debug/dwarf",
|
|
||||||
"debug/elf",
|
|
||||||
"cmd/go",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var runtimegen = []string{
|
var runtimegen = []string{
|
||||||
|
|
@ -903,7 +879,7 @@ var runtimegen = []string{
|
||||||
}
|
}
|
||||||
|
|
||||||
func clean() {
|
func clean() {
|
||||||
for _, name := range buildorder {
|
for _, name := range buildlist {
|
||||||
path := pathf("%s/src/%s", goroot, name)
|
path := pathf("%s/src/%s", goroot, name)
|
||||||
// Remove generated files.
|
// Remove generated files.
|
||||||
for _, elem := range xreaddir(path) {
|
for _, elem := range xreaddir(path) {
|
||||||
|
|
@ -1044,19 +1020,30 @@ func cmdbootstrap() {
|
||||||
// than in a standard release like Go 1.4, so don't do this rebuild by default.
|
// than in a standard release like Go 1.4, so don't do this rebuild by default.
|
||||||
if false {
|
if false {
|
||||||
xprintf("##### Building Go toolchain using itself.\n")
|
xprintf("##### Building Go toolchain using itself.\n")
|
||||||
for _, dir := range buildorder {
|
for _, dir := range buildlist {
|
||||||
if dir == "cmd/go" {
|
installed[dir] = make(chan struct{})
|
||||||
break
|
|
||||||
}
|
|
||||||
install(dir)
|
|
||||||
}
|
}
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, dir := range builddeps["cmd/go"] {
|
||||||
|
wg.Add(1)
|
||||||
|
dir := dir
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
install(dir)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
xprintf("\n")
|
xprintf("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
xprintf("##### Building go_bootstrap for host, %s/%s.\n", gohostos, gohostarch)
|
xprintf("##### Building go_bootstrap for host, %s/%s.\n", gohostos, gohostarch)
|
||||||
for _, dir := range buildorder {
|
for _, dir := range buildlist {
|
||||||
install(dir)
|
installed[dir] = make(chan struct{})
|
||||||
}
|
}
|
||||||
|
for _, dir := range buildlist {
|
||||||
|
go install(dir)
|
||||||
|
}
|
||||||
|
<-installed["cmd/go"]
|
||||||
|
|
||||||
goos = oldgoos
|
goos = oldgoos
|
||||||
goarch = oldgoarch
|
goarch = oldgoarch
|
||||||
|
|
@ -1065,6 +1052,7 @@ func cmdbootstrap() {
|
||||||
|
|
||||||
// Build runtime for actual goos/goarch too.
|
// Build runtime for actual goos/goarch too.
|
||||||
if goos != gohostos || goarch != gohostarch {
|
if goos != gohostos || goarch != gohostarch {
|
||||||
|
installed["runtime"] = make(chan struct{})
|
||||||
install("runtime")
|
install("runtime")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue